def do_nested_sampling(nreplicas=10, niter=200, mciter=1000, stepsize=.8, estop=-.9, x0=[1,1], r0=2, xlim=None, ylim=None, circle=False ): path = [] def mc_record_position_event(coords=None, **kwargs): if len(path) == 0 or not np.all(path[-1] == coords): path.append(coords) p = Pot() print p.get_energy(np.array([1,2.])) mc_walker = MonteCarloWalker(p, mciter=mciter, events=[mc_record_position_event]) # initialize the replicas with random positions replicas = [] for i in xrange(nreplicas): # choose points uniformly in a circle if circle: coords = vector_random_uniform_hypersphere(2) * r0 + x0 else: coords = np.zeros(2) coords[0] = np.random.uniform(xlim[0], xlim[1]) coords[1] = np.random.uniform(ylim[0], ylim[1]) # coords = np.random.uniform(-1,3,size=2) r = Replica(coords, p.get_energy(coords)) replicas.append(r) ns = NestedSampling(replicas, mc_walker, stepsize=stepsize) results = [Result()] results[0].replicas = [r.copy() for r in replicas] for i in xrange(niter): ns.one_iteration() new_res = Result() new_res.replicas = [r.copy() for r in replicas] new_res.starting_replica = ns.starting_replicas[0].copy() new_res.new_replica = ns.new_replicas[0].copy() path.insert(0, new_res.starting_replica.x) new_res.mc_path = path results.append(new_res) path = [] if ns.replicas[-1].energy < estop: break # plt.plot(ns.max_energies) # plt.show() return ns, results
def get_exact_dos(self): N = len(self.Zlinear_sorted) V = N-1-self.sidebar_e_to_index(self.ns.max_energies[0]) print self.ns.max_energies[0], self.Zlinear_sorted[V] K = float(len(self.ns.replicas)) n = len(self.ns.max_energies) self.better_dos = Result() self.better_dos.energies = [ self.Zlinear_sorted[np.round(V * (K/(K+1))**i)] for i in xrange(n)] self.better_dos.dos = self.compute_dos(len(self.better_dos.energies)) # also make some random dos versions self.better_dos.random_energies = [] for i in xrange(20): alphas = np.random.beta(K,1, size=n-1) elist = [ self.Zlinear_sorted[np.round(V * prod(alphas[:i]))] for i in xrange(1,n)] elist.insert(0, self.Zlinear_sorted[np.round(V)]) self.better_dos.random_energies.append(elist)
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=1500): """ a wrapper function for lbfgs routine in scipy .. warn:: the scipy version of lbfgs uses linesearch based only on energy which can make the minimization stop early. When the step size is so small that the energy doesn't change to within machine precision (times the parameter `factr`) the routine declares success and stops. This sounds fine, but if the gradient is analytical the gradient can still be not converged. This is because in the vicinity of the minimum the gradient changes much more rapidly then the energy. Thus we want to make factr as small as possible. Unfortunately, if we make it too small the routine realizes that the linesearch routine isn't working and declares failure and exits. So long story short, if your tolerance is very small (< 1e-6) this routine will probably stop before truly reaching that tolerance. If you reduce `factr` too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly. """ assert hasattr(pot, "getEnergyGradient") from scipy.optimize import Result, fmin_l_bfgs_b res = Result() res.coords, res.energy, dictionary = fmin_l_bfgs_b(pot.getEnergyGradient, coords, iprint=iprint, pgtol=tol, maxfun=nsteps, factr=10.) res.grad = dictionary["grad"] res.nfev = dictionary["funcalls"] warnflag = dictionary['warnflag'] #res.nsteps = dictionary['nit'] # new in scipy version 0.12 res.nsteps = res.nfev res.message = dictionary['task'] res.success = True if warnflag > 0: print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "too many function evaluations" else: res.message = str(dictionary['task']) print res.message #note: if the linesearch fails the lbfgs may fail without setting warnflag. Check #tolerance exactly if False: if res.success: maxV = np.max(np.abs(res.grad)) if maxV > tol: print "warning: gradient seems too large", maxV, "tol =", tol, ". This is a known, but not understood issue of scipy_lbfgs" print res.message res.rms = res.grad.std() return res
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=10000): """ a wrapper function for lbfgs routine in scipy .. warn:: the scipy version of lbfgs uses linesearch based only on energy which can make the minimization stop early. When the step size is so small that the energy doesn't change to within machine precision (times the parameter `factr`) the routine declares success and stops. This sounds fine, but if the gradient is analytical the gradient can still be not converged. This is because in the vicinity of the minimum the gradient changes much more rapidly then the energy. Thus we want to make factr as small as possible. Unfortunately, if we make it too small the routine realizes that the linesearch routine isn't working and declares failure and exits. So long story short, if your tolerance is very small (< 1e-6) this routine will probably stop before truly reaching that tolerance. If you reduce `factr` too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly. """ assert hasattr(pot, "getEnergyGradient") res = Result() res.coords, res.energy, dictionary = fmin_l_bfgs_b(pot.getEnergyGradient, coords, iprint=iprint, pgtol=tol, maxfun=nsteps, factr=10.) res.grad = dictionary["grad"] res.nfev = dictionary["funcalls"] warnflag = dictionary['warnflag'] #res.nsteps = dictionary['nit'] # new in scipy version 0.12 res.nsteps = res.nfev res.message = dictionary['task'] res.success = True if warnflag > 0: print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "too many function evaluations" else: res.message = str(dictionary['task']) print res.message print " the energy is", res.energy, "the rms gradient is", np.linalg.norm(res.grad) / np.sqrt(res.grad.size), "nfev", res.nfev print " X: ", res.coords #note: if the linesearch fails the lbfgs may fail without setting warnflag. Check #tolerance exactly if False: if res.success: maxV = np.max( np.abs(res.grad) ) if maxV > tol: print "warning: gradient seems too large", maxV, "tol =", tol, ". This is a known, but not understood issue of scipy_lbfgs" print res.message res.rms = res.grad.std() return res
def _minimize_anneal(func, x0, args=(), schedule='fast', T0=None, Tf=1e-12, maxfev=None, maxaccept=None, maxiter=400, boltzmann=1.0, learn_rate=0.5, ftol=1e-6, quench=1.0, m=1.0, n=1.0, lower=-100, upper=100, dwell=50, disp=False, **unknown_options): """ Minimization of scalar function of one or more variables using the simulated annealing algorithm. Options for the simulated annealing algorithm are: disp : bool Set to True to print convergence messages. schedule : str Annealing schedule to use. One of: 'fast', 'cauchy' or 'boltzmann'. T0 : float Initial Temperature (estimated as 1.2 times the largest cost-function deviation over random points in the range). Tf : float Final goal temperature. maxfev : int Maximum number of function evaluations to make. maxaccept : int Maximum changes to accept. maxiter : int Maximum number of iterations to perform. boltzmann : float Boltzmann constant in acceptance test (increase for less stringent test at each temperature). learn_rate : float Scale constant for adjusting guesses. ftol : float Relative error in ``fun(x)`` acceptable for convergence. quench, m, n : float Parameters to alter fast_sa schedule. lower, upper : float or ndarray Lower and upper bounds on `x`. dwell : int The number of times to search the space at each temperature. This function is called by the `minimize` function with `method=anneal`. It is not supposed to be called directly. """ _check_unknown_options(unknown_options) maxeval = maxfev feps = ftol x0 = asarray(x0) lower = asarray(lower) upper = asarray(upper) schedule = eval(schedule+'_sa()') # initialize the schedule schedule.init(dims=shape(x0), func=func, args=args, boltzmann=boltzmann, T0=T0, learn_rate=learn_rate, lower=lower, upper=upper, m=m, n=n, quench=quench, dwell=dwell) current_state, last_state, best_state = _state(), _state(), _state() if T0 is None: x0 = schedule.getstart_temp(best_state) else: best_state.x = None best_state.cost = numpy.Inf last_state.x = asarray(x0).copy() fval = func(x0, *args) schedule.feval += 1 last_state.cost = fval if last_state.cost < best_state.cost: best_state.cost = fval best_state.x = asarray(x0).copy() schedule.T = schedule.T0 fqueue = [100, 300, 500, 700] iters = 0 while 1: for n in xrange(dwell): current_state.x = schedule.update_guess(last_state.x) current_state.cost = func(current_state.x, *args) schedule.feval += 1 dE = current_state.cost - last_state.cost if schedule.accept_test(dE): last_state.x = current_state.x.copy() last_state.cost = current_state.cost if last_state.cost < best_state.cost: best_state.x = last_state.x.copy() best_state.cost = last_state.cost schedule.update_temp() iters += 1 # Stopping conditions # 0) last saved values of f from each cooling step # are all very similar (effectively cooled) # 1) Tf is set and we are below it # 2) maxeval is set and we are past it # 3) maxiter is set and we are past it # 4) maxaccept is set and we are past it fqueue.append(squeeze(last_state.cost)) fqueue.pop(0) af = asarray(fqueue)*1.0 if all(abs((af-af[0])/af[0]) < feps): retval = 0 if abs(af[-1]-best_state.cost) > feps*10: retval = 5 if disp: print("Warning: Cooled to %f at %s but this is not" % (squeeze(last_state.cost), str(squeeze(last_state.x))) + " the smallest point found.") break if (Tf is not None) and (schedule.T < Tf): retval = 1 break if (maxeval is not None) and (schedule.feval > maxeval): retval = 2 break if (iters > maxiter): if disp: print("Warning: Maximum number of iterations exceeded.") retval = 3 break if (maxaccept is not None) and (schedule.accepted > maxaccept): retval = 4 break res = Result(x=best_state.x, fun=best_state.cost, <<<<<<< HEAD T=schedule.T, nfev=schedule.feval, nit=iters, accept=schedule.accepted, status=retval, success=(retval <= 1), message={0: 'Points no longer changing', 1: 'Cooled to final temperature', 2: 'Maximum function evaluations', 3: 'Maximum cooling iterations reached', 4: 'Maximum accepted query locations reached', 5: 'Final point not the minimum amongst ' 'encountered points'}[retval]) print(res['x'], res['fun'], res['T'], res['nfev'], res['nit'], \ res['accept'], res['status'])
def solve(self): """ Runs the DifferentialEvolutionSolver. Returns ------- res : Result The optimization result represented as a ``Result`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `Result` for a description of other attributes. If polish was employed, then Result also contains the ``hess_inv`` and ``jac`` attributes. """ nfev, nit, warning_flag = 0, 0, False status_message = _status_message['success'] # calculate energies to start with for index, candidate in enumerate(self.population): parameters = self._scale_parameters(candidate) self.population_energies[index] = self.func(parameters, *self.args) nfev += 1 if nfev > self.maxfun: warning_flag = True status_message = _status_message['maxfev'] break minval = np.argmin(self.population_energies) # put the lowest energy into the best solution position. lowest_energy = self.population_energies[minval] self.population_energies[minval] = self.population_energies[0] self.population_energies[0] = lowest_energy self.population[[0, minval], :] = self.population[[minval, 0], :] if warning_flag: return Result( x=self.x, fun=self.population_energies[0], nfev=nfev, nit=nit, message=status_message, success=(warning_flag is not True)) # do the optimisation. for nit in range(1, self.maxiter + 1): if self.dither is not None: self.scale = self.random_number_generator.rand( ) * (self.dither[1] - self.dither[0]) + self.dither[0] for candidate in range(np.size(self.population, 0)): if nfev > self.maxfun: warning_flag = True status_message = _status_message['maxfev'] break trial = self._mutate(candidate) self._ensure_constraint(trial) parameters = self._scale_parameters(trial) energy = self.func(parameters, *self.args) nfev += 1 if energy < self.population_energies[candidate]: self.population[candidate] = trial self.population_energies[candidate] = energy if energy < self.population_energies[0]: self.population_energies[0] = energy self.population[0] = trial # stop when the fractional s.d. of the population is less than tol # of the mean energy convergence = (np.std(self.population_energies) / np.abs(np.mean(self.population_energies) + _MACHEPS)) if self.disp: print("differential_evolution step %d: f(x)= %g" % (nit, self.population_energies[0])) if (self.callback and self.callback(self._scale_parameters(self.population[0]), convergence=self.tol / convergence) is True): warning_flag = True status_message = ('callback function requested stop early ' 'by returning True') break if convergence < self.tol or warning_flag: break else: status_message = _status_message['maxiter'] warning_flag = True DE_result = Result( x=self.x, fun=self.population_energies[0], nfev=nfev, nit=nit, message=status_message, success=(warning_flag is not True)) print("\n\n\nDE Result:\n") print(DE_result) print("\n\n\n\n") if self.polish: result = minimize(self.func, np.copy(DE_result.x), method=self.polishmethod, bounds=self.limits.T, args=self.args, tol=self.polishtol, options=self.polishoptions) print("\n\n\nPolish Result:\n") print(result) print("\n\n\n\n") nfev += result.nfev DE_result.nfev = nfev if result.fun < DE_result.fun: DE_result.fun = result.fun DE_result.x = result.x DE_result.jac = result.jac # to keep internal state consistent self.population_energies[0] = result.fun self.population[0] = self._unscale_parameters(result.x) return DE_result