def run(self): """ the main loop of the algorithm """ res = Result() res.message = [] tol = self.tol iprint = self.iprint nsteps = self.nsteps #iprint =40 X = self.X sqrtN = np.sqrt(self.N) i = 1 self.funcalls += 1 e, G = self.pot.getEnergyGradient(X) res.success = False while i < nsteps: stp = self.getStep(X, G) try: X, e, G = self.adjustStepSize(X, e, G, stp) except LineSearchError: print "Warning: problem with adjustStepSize, ending quench" rms = np.linalg.norm(G) / sqrtN print " on failure: quench step", i, e, rms, self.funcalls res.message.append( "problem with adjustStepSize" ) break #e, G = self.pot.getEnergyGradient(X) rms = np.linalg.norm(G) / sqrtN if iprint > 0: if i % iprint == 0: print "lbfgs:", i, e, rms, self.funcalls, self.stepsize for event in self.events: event(coords=X, energy=e, rms=rms) if self.alternate_stop_criterion is None: i_am_done = rms < self.tol else: i_am_done = self.alternate_stop_criterion(energy=e, gradient=G, tol=self.tol) if i_am_done: res.success = True break i += 1 res.nsteps = i res.nfev = self.funcalls res.coords = X res.energy = e res.rms = rms res.grad = G res.H0 = self.H0 return res
def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for conjugate gradient routine in scipy """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_cg(pot.getEnergy, coords, pot.getGradient, gtol=tol, full_output=True, disp=iprint>0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] #e = ret[1] res.nfev = ret[2] res.nfev += ret[3] #calls to gradient res.success = True warnflag = ret[4] if warnflag > 0: # print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "Maximum number of iterations exceeded" if warnflag == 2: print "Gradient and/or function calls not changing" res.energy, res.grad = pot.getEnergyGradient(res.coords) g = res.grad res.rms = np.linalg.norm(g)/np.sqrt(len(g)) return res
def bfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for the scipy BFGS algorithm """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_bfgs(pot.getEnergy, coords, fprime=pot.getGradient, gtol=tol, full_output=True, disp=iprint > 0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] res.energy = ret[1] res.grad = ret[2] res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) res.nfev = ret[4] + ret[5] res.nsteps = res.nfev # not correct, but no better information res.success = np.max(np.abs(res.grad)) < tol return res
def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for conjugate gradient routine in scipy """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_cg(pot.getEnergy, coords, pot.getGradient, gtol=tol, full_output=True, disp=iprint > 0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] #e = ret[1] res.nfev = ret[2] res.nfev += ret[3] #calls to gradient res.success = True warnflag = ret[4] if warnflag > 0: # print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "Maximum number of iterations exceeded" if warnflag == 2: print "Gradient and/or function calls not changing" res.energy, res.grad = pot.getEnergyGradient(res.coords) g = res.grad res.rms = np.linalg.norm(g) / np.sqrt(len(g)) return res
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=15000): """ a wrapper function for lbfgs routine in scipy .. warn:: the scipy version of lbfgs uses linesearch based only on energy which can make the minimization stop early. When the step size is so small that the energy doesn't change to within machine precision (times the parameter `factr`) the routine declares success and stops. This sounds fine, but if the gradient is analytical the gradient can still be not converged. This is because in the vicinity of the minimum the gradient changes much more rapidly then the energy. Thus we want to make factr as small as possible. Unfortunately, if we make it too small the routine realizes that the linesearch routine isn't working and declares failure and exits. So long story short, if your tolerance is very small (< 1e-6) this routine will probably stop before truly reaching that tolerance. If you reduce `factr` too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly. """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize res = Result() res.coords, res.energy, dictionary = scipy.optimize.fmin_l_bfgs_b( pot.getEnergyGradient, coords, iprint=iprint, pgtol=tol, maxfun=nsteps, factr=10.) res.grad = dictionary["grad"] res.nfev = dictionary["funcalls"] warnflag = dictionary['warnflag'] #res.nsteps = dictionary['nit'] # new in scipy version 0.12 res.nsteps = res.nfev res.message = dictionary['task'] res.success = True if warnflag > 0: print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "too many function evaluations" else: res.message = str(dictionary['task']) print res.message #note: if the linesearch fails the lbfgs may fail without setting warnflag. Check #tolerance exactly if False: if res.success: maxV = np.max(np.abs(res.grad)) if maxV > tol: print "warning: gradient seems too large", maxV, "tol =", tol, ". This is a known, but not understood issue of scipy_lbfgs" print res.message res.rms = res.grad.std() return res
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=15000): """ a wrapper function for lbfgs routine in scipy .. warn:: the scipy version of lbfgs uses linesearch based only on energy which can make the minimization stop early. When the step size is so small that the energy doesn't change to within machine precision (times the parameter `factr`) the routine declares success and stops. This sounds fine, but if the gradient is analytical the gradient can still be not converged. This is because in the vicinity of the minimum the gradient changes much more rapidly then the energy. Thus we want to make factr as small as possible. Unfortunately, if we make it too small the routine realizes that the linesearch routine isn't working and declares failure and exits. So long story short, if your tolerance is very small (< 1e-6) this routine will probably stop before truly reaching that tolerance. If you reduce `factr` too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly. """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize res = Result() res.coords, res.energy, dictionary = scipy.optimize.fmin_l_bfgs_b(pot.getEnergyGradient, coords, iprint=iprint, pgtol=tol, maxfun=nsteps, factr=10.) res.grad = dictionary["grad"] res.nfev = dictionary["funcalls"] warnflag = dictionary['warnflag'] #res.nsteps = dictionary['nit'] # new in scipy version 0.12 res.nsteps = res.nfev res.message = dictionary['task'] res.success = True if warnflag > 0: print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "too many function evaluations" else: res.message = str(dictionary['task']) print res.message #note: if the linesearch fails the lbfgs may fail without setting warnflag. Check #tolerance exactly if False: if res.success: maxV = np.max( np.abs(res.grad) ) if maxV > tol: print "warning: gradient seems too large", maxV, "tol =", tol, ". This is a known, but not understood issue of scipy_lbfgs" print res.message res.rms = res.grad.std() return res
def steepest_descent(x0, pot, iprint=-1, dx=1e-4, nsteps=100000, tol=1e-3, maxstep=-1., event=None): if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) N = len(x0) x = x0.copy() E, V = pot.getEnergyGradient(x) funcalls = 1 for k in xrange(nsteps): stp = -V * dx if maxstep > 0: stpsize = np.max(np.abs(V)) if stpsize > maxstep: stp *= maxstep / stpsize x += stp E, V = pot.getEnergyGradient(x) funcalls += 1 rms = np.linalg.norm(V) / np.sqrt(N) if iprint > 0: if funcalls % iprint == 0: print "step %8d energy %20.12g rms gradient %20.12g" % ( funcalls, E, rms) if event != None: event(E, x, rms) if rms < tol: break res = Result() res.coords = x res.energy = E res.rms = rms res.grad = V res.nfev = funcalls res.nsteps = k res.success = res.rms <= tol return res
def run(self, fmax=1e-3, steps=100000): """Run structure optimization algorithm. This method will return when the forces on all individual atoms are less than *fmax* or when the number of steps exceeds *steps*.""" self.fmax = fmax step = 0 res = Result() res.success = False while step < steps: E, f = self.potential.getEnergyGradient(self.coords) #self.call_observers() #print E if self.alternate_stop_criterion is None: i_am_done = self.converged(f) else: i_am_done = self.alternate_stop_criterion(energy=E, gradient=f, tol=self.fmax) if i_am_done: res.success = True break self.step(-f) self.nsteps += 1 rms = np.linalg.norm(f) / np.sqrt(len(f)) if self.iprint > 0: if step % self.iprint == 0: self.logger.info("fire: %s E %s rms %s", step, E, rms) for event in self.events: event(coords=self.coords, energy=E, rms=rms) step += 1 res.nsteps = step res.nfev = step res.coords = self.coords res.energy = E res.grad = -f res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) self.result = res return res
def run(self, fmax=1e-3, steps=100000): """Run structure optimization algorithm. This method will return when the forces on all individual atoms are less than *fmax* or when the number of steps exceeds *steps*.""" self.fmax = fmax step = 0 res = Result() res.success = False while step < steps: E, f = self.potential.getEnergyGradient(self.coords) #self.call_observers() #print E if self.alternate_stop_criterion is None: i_am_done = self.converged(f) else: i_am_done = self.alternate_stop_criterion(energy=E, gradient=f, tol=self.fmax) if i_am_done: res.success = True break self.step(-f) self.nsteps += 1 rms = np.linalg.norm(f)/np.sqrt(len(f)) if self.iprint > 0: if step % self.iprint == 0: self.logger.info("fire: %s E %s rms %s", step, E, rms) for event in self.events: event(coords=self.coords, energy=E, rms=rms) step += 1 res.nsteps = step res.nfev = step res.coords = self.coords res.energy = E res.grad = -f res.rms = np.linalg.norm(res.grad)/np.sqrt(len(res.grad)) self.result = res return res
def bfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for the scipy BFGS algorithm """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_bfgs(pot.getEnergy, coords, fprime=pot.getGradient, gtol=tol, full_output=True, disp=iprint>0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] res.energy = ret[1] res.grad = ret[2] res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) res.nfev = ret[4] + ret[5] res.nsteps = res.nfev # not correct, but no better information res.success = np.max(np.abs(res.grad)) < tol return res
def steepest_descent(x0, pot, iprint=-1, dx=1e-4, nsteps=100000, tol=1e-3, maxstep=-1., event=None): if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) N = len(x0) x=x0.copy() E, V = pot.getEnergyGradient(x) funcalls = 1 for k in xrange(nsteps): stp = -V * dx if maxstep > 0: stpsize = np.max(np.abs(V)) if stpsize > maxstep: stp *= maxstep / stpsize x += stp E, V = pot.getEnergyGradient(x) funcalls += 1 rms = np.linalg.norm(V)/np.sqrt(N) if iprint > 0: if funcalls % iprint == 0: print "step %8d energy %20.12g rms gradient %20.12g" % (funcalls, E, rms) if event != None: event(E, x, rms) if rms < tol: break res = Result() res.coords = x res.energy = E res.rms = rms res.grad = V res.nfev = funcalls res.nsteps = k res.success = res.rms <= tol return res
def run(self): """ the main loop of the algorithm """ res = Result() res.message = [] tol = self.tol iprint = self.iprint nsteps = self.nsteps #iprint =40 X = self.X sqrtN = np.sqrt(self.N) i = 1 self.funcalls += 1 e, G = self.pot.getEnergyGradient(X) rms = np.linalg.norm(G) / sqrtN res.success = False while i < nsteps: stp = self.getStep(X, G) try: X, e, G = self.adjustStepSize(X, e, G, stp) except LineSearchError: self.logger.error("problem with adjustStepSize, ending quench") rms = np.linalg.norm(G) / sqrtN self.logger.error(" on failure: quench step %s %s %s %s", i, e, rms, self.funcalls) res.message.append( "problem with adjustStepSize" ) break #e, G = self.pot.getEnergyGradient(X) rms = np.linalg.norm(G) / sqrtN if iprint > 0: if i % iprint == 0: self.logger.info("lbfgs: %s %s %s %s %s %s %s %s %s", i, "E", e, "rms", rms, "funcalls", self.funcalls, "stepsize", self.stepsize) for event in self.events: event(coords=X, energy=e, rms=rms) if self.alternate_stop_criterion is None: i_am_done = rms < self.tol else: i_am_done = self.alternate_stop_criterion(energy=e, gradient=G, tol=self.tol) if i_am_done: res.success = True break i += 1 res.nsteps = i res.nfev = self.funcalls res.coords = X res.energy = e res.rms = rms res.grad = G res.H0 = self.H0 return res