def make_result(self, coords, energy): from pygmin.optimize import Result res = Result() res.coords = coords res.energy = energy res.eigenval = 1. res.eigenvec = coords.copy() return res
def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for conjugate gradient routine in scipy """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_cg(pot.getEnergy, coords, pot.getGradient, gtol=tol, full_output=True, disp=iprint > 0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] #e = ret[1] res.nfev = ret[2] res.nfev += ret[3] #calls to gradient res.success = True warnflag = ret[4] if warnflag > 0: # print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "Maximum number of iterations exceeded" if warnflag == 2: print "Gradient and/or function calls not changing" res.energy, res.grad = pot.getEnergyGradient(res.coords) g = res.grad res.rms = np.linalg.norm(g) / np.sqrt(len(g)) return res
def bfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for the scipy BFGS algorithm """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_bfgs(pot.getEnergy, coords, fprime=pot.getGradient, gtol=tol, full_output=True, disp=iprint > 0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] res.energy = ret[1] res.grad = ret[2] res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) res.nfev = ret[4] + ret[5] res.nsteps = res.nfev # not correct, but no better information res.success = np.max(np.abs(res.grad)) < tol return res
def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for conjugate gradient routine in scipy """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_cg(pot.getEnergy, coords, pot.getGradient, gtol=tol, full_output=True, disp=iprint>0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] #e = ret[1] res.nfev = ret[2] res.nfev += ret[3] #calls to gradient res.success = True warnflag = ret[4] if warnflag > 0: # print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "Maximum number of iterations exceeded" if warnflag == 2: print "Gradient and/or function calls not changing" res.energy, res.grad = pot.getEnergyGradient(res.coords) g = res.grad res.rms = np.linalg.norm(g)/np.sqrt(len(g)) return res
def run(self, fmax=1e-3, steps=100000): """Run structure optimization algorithm. This method will return when the forces on all individual atoms are less than *fmax* or when the number of steps exceeds *steps*.""" self.fmax = fmax step = 0 res = Result() res.success = False while step < steps: E, f = self.potential.getEnergyGradient(self.coords) #self.call_observers() #print E if self.alternate_stop_criterion is None: i_am_done = self.converged(f) else: i_am_done = self.alternate_stop_criterion(energy=E, gradient=f, tol=self.fmax) if i_am_done: res.success = True break self.step(-f) self.nsteps += 1 rms = np.linalg.norm(f) / np.sqrt(len(f)) if self.iprint > 0: if step % self.iprint == 0: self.logger.info("fire: %s E %s rms %s", step, E, rms) for event in self.events: event(coords=self.coords, energy=E, rms=rms) step += 1 res.nsteps = step res.nfev = step res.coords = self.coords res.energy = E res.grad = -f res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) self.result = res return res
def read_minimum(self, fin): """ the minima and transition states are stored as 1 line with energy 1 line with point group info natoms lines with eigenvalues natoms lines with coords """ res = Result() #read energy line = self.get_next_line() # print line res.energy = float(line.split()[0]) #ignore the line with the point group line = self.get_next_line() res.eigenvalues = self.read_coords(fin) res.coords = self.read_coords(fin) return res
def steepest_descent(x0, pot, iprint=-1, dx=1e-4, nsteps=100000, tol=1e-3, maxstep=-1., event=None): if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) N = len(x0) x = x0.copy() E, V = pot.getEnergyGradient(x) funcalls = 1 for k in xrange(nsteps): stp = -V * dx if maxstep > 0: stpsize = np.max(np.abs(V)) if stpsize > maxstep: stp *= maxstep / stpsize x += stp E, V = pot.getEnergyGradient(x) funcalls += 1 rms = np.linalg.norm(V) / np.sqrt(N) if iprint > 0: if funcalls % iprint == 0: print "step %8d energy %20.12g rms gradient %20.12g" % ( funcalls, E, rms) if event != None: event(E, x, rms) if rms < tol: break res = Result() res.coords = x res.energy = E res.rms = rms res.grad = V res.nfev = funcalls res.nsteps = k res.success = res.rms <= tol return res
def run(self, fmax=1e-3, steps=100000): """Run structure optimization algorithm. This method will return when the forces on all individual atoms are less than *fmax* or when the number of steps exceeds *steps*.""" self.fmax = fmax step = 0 res = Result() res.success = False while step < steps: E, f = self.potential.getEnergyGradient(self.coords) #self.call_observers() #print E if self.alternate_stop_criterion is None: i_am_done = self.converged(f) else: i_am_done = self.alternate_stop_criterion(energy=E, gradient=f, tol=self.fmax) if i_am_done: res.success = True break self.step(-f) self.nsteps += 1 rms = np.linalg.norm(f)/np.sqrt(len(f)) if self.iprint > 0: if step % self.iprint == 0: self.logger.info("fire: %s E %s rms %s", step, E, rms) for event in self.events: event(coords=self.coords, energy=E, rms=rms) step += 1 res.nsteps = step res.nfev = step res.coords = self.coords res.energy = E res.grad = -f res.rms = np.linalg.norm(res.grad)/np.sqrt(len(res.grad)) self.result = res return res
def __init__( self, coords, potential, takeStep, storage=None, event_after_step=[], acceptTest=None, temperature=1.0, confCheck=[], outstream=sys.stdout, store_initial=True, iprint=1, ): #note: make a local copy of lists of events so that an inputted list is not modified. self.coords = np.copy(coords) self.storage = storage self.potential = potential self.takeStep = takeStep self.event_after_step = copy.copy(event_after_step) #not deepcopy self.temperature = temperature self.naccepted = 0 self.outstream = outstream self.printfrq = iprint #controls how often printing is done self.confCheck = confCheck if acceptTest: self.acceptTest = acceptTest else: self.acceptTest = metropolis.Metropolis(self.temperature) self.stepnum = 0 ######################################################################### #store intial structure ######################################################################### energy = self.potential.getEnergy(self.coords) if (self.storage and store_initial): self.storage(energy, self.coords) self.markovE = energy self.result = Result() self.result.energy = self.markovE self.result.coords = self.coords.copy()
def bfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for the scipy BFGS algorithm """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_bfgs(pot.getEnergy, coords, fprime=pot.getGradient, gtol=tol, full_output=True, disp=iprint>0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] res.energy = ret[1] res.grad = ret[2] res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) res.nfev = ret[4] + ret[5] res.nsteps = res.nfev # not correct, but no better information res.success = np.max(np.abs(res.grad)) < tol return res
def steepest_descent(x0, pot, iprint=-1, dx=1e-4, nsteps=100000, tol=1e-3, maxstep=-1., event=None): if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) N = len(x0) x=x0.copy() E, V = pot.getEnergyGradient(x) funcalls = 1 for k in xrange(nsteps): stp = -V * dx if maxstep > 0: stpsize = np.max(np.abs(V)) if stpsize > maxstep: stp *= maxstep / stpsize x += stp E, V = pot.getEnergyGradient(x) funcalls += 1 rms = np.linalg.norm(V)/np.sqrt(N) if iprint > 0: if funcalls % iprint == 0: print "step %8d energy %20.12g rms gradient %20.12g" % (funcalls, E, rms) if event != None: event(E, x, rms) if rms < tol: break res = Result() res.coords = x res.energy = E res.rms = rms res.grad = V res.nfev = funcalls res.nsteps = k res.success = res.rms <= tol return res
def __init__(self, pot, mindist, tsSearchParams=dict(), verbosity=1, NEBparams=dict(), nrefine_max=100, reoptimize_climbing=0, pushoff_params=dict(), create_neb=NEBDriver): self.pot = pot self.mindist = mindist self.tsSearchParams = tsSearchParams self.verbosity = int(verbosity) self.nrefine_max = nrefine_max self.NEBparams = NEBparams self.reoptimize_climbing = reoptimize_climbing self.pushoff_params = pushoff_params self.res = Result() self.res.new_transition_states = [] self.create_neb = create_neb
def optimize(self, quenchRoutine=None, **kwargs): """ Optimize the band Notes ----- the potential for the NEB optimization is not Hamiltonian. This means that there is no meaningful energy associated with the potential. Therefore, during the optimization, we can do gradient following, but we can't rely on the energy for, e.g. determining step size, which is the default behavior for many optimizers. This can be worked around by choosing a small step size and a large maxErise, or by using an optimizer that uses only gradients. scipy.lbfgs_b seems to work with NEB pretty well, but lbfgs_py and mylbfgs tend to fail. If you must use one of those try, e.g. maxErise = 1., maxstep=0.01, tol=1e-2 :quenchRoutine: quench algorithm to use for optimization. :quenchParams: parameters for the quench """ if quenchRoutine is None: if self.quenchRoutine is None: quenchRoutine = mylbfgs else: quenchRoutine = self.quenchRoutine #combine default and passed params. passed params will overwrite default quenchParams = dict([("nsteps", 300)] + self.quenchParams.items() + kwargs.items()) if quenchParams.has_key("iprint"): self.iprint = quenchParams["iprint"] if not quenchParams.has_key("logger"): quenchParams["logger"] = logging.getLogger("pygmin.connect.neb.quench") if self.use_minimizer_callback: quenchParams["events"]=[self._step] self.step = 0 qres = quenchRoutine( self.active.reshape(self.active.size), self, **quenchParams) # if isinstance(qres, tuple): # for compatability with old and new quenchers # qres = qres[4] self.active[:,:] = qres.coords.reshape(self.active.shape) if self.copy_potential: for i in xrange(0,self.nimages): pot = self.potential_list[i] self.energies[i] = pot.getEnergy(self.coords[i,:]) else: for i in xrange(0,self.nimages): self.energies[i] = self.potential.getEnergy(self.coords[i,:]) res = Result() res.path = self.coords res.nsteps = qres.nsteps res.energy = self.energies res.rms = qres.rms res.success = False if qres.rms < quenchParams["tol"]: res.success = True return res
def run(self): """ the main loop of the algorithm """ res = Result() res.message = [] tol = self.tol iprint = self.iprint nsteps = self.nsteps #iprint =40 X = self.X sqrtN = np.sqrt(self.N) i = 1 self.funcalls += 1 e, G = self.pot.getEnergyGradient(X) res.success = False while i < nsteps: stp = self.getStep(X, G) try: X, e, G = self.adjustStepSize(X, e, G, stp) except LineSearchError: print "Warning: problem with adjustStepSize, ending quench" rms = np.linalg.norm(G) / sqrtN print " on failure: quench step", i, e, rms, self.funcalls res.message.append( "problem with adjustStepSize" ) break #e, G = self.pot.getEnergyGradient(X) rms = np.linalg.norm(G) / sqrtN if iprint > 0: if i % iprint == 0: print "lbfgs:", i, e, rms, self.funcalls, self.stepsize for event in self.events: event(coords=X, energy=e, rms=rms) if self.alternate_stop_criterion is None: i_am_done = rms < self.tol else: i_am_done = self.alternate_stop_criterion(energy=e, gradient=G, tol=self.tol) if i_am_done: res.success = True break i += 1 res.nsteps = i res.nfev = self.funcalls res.coords = X res.energy = e res.rms = rms res.grad = G res.H0 = self.H0 return res
def run(self): """ the main loop of the algorithm """ res = Result() res.message = [] tol = self.tol iprint = self.iprint nsteps = self.nsteps #iprint =40 X = self.X sqrtN = np.sqrt(self.N) i = 1 self.funcalls += 1 e, G = self.pot.getEnergyGradient(X) rms = np.linalg.norm(G) / sqrtN res.success = False while i < nsteps: stp = self.getStep(X, G) try: X, e, G = self.adjustStepSize(X, e, G, stp) except LineSearchError: self.logger.error("problem with adjustStepSize, ending quench") rms = np.linalg.norm(G) / sqrtN self.logger.error(" on failure: quench step %s %s %s %s", i, e, rms, self.funcalls) res.message.append( "problem with adjustStepSize" ) break #e, G = self.pot.getEnergyGradient(X) rms = np.linalg.norm(G) / sqrtN if iprint > 0: if i % iprint == 0: self.logger.info("lbfgs: %s %s %s %s %s %s %s %s %s", i, "E", e, "rms", rms, "funcalls", self.funcalls, "stepsize", self.stepsize) for event in self.events: event(coords=X, energy=e, rms=rms) if self.alternate_stop_criterion is None: i_am_done = rms < self.tol else: i_am_done = self.alternate_stop_criterion(energy=e, gradient=G, tol=self.tol) if i_am_done: res.success = True break i += 1 res.nsteps = i res.nfev = self.funcalls res.coords = X res.energy = e res.rms = rms res.grad = G res.H0 = self.H0 return res
def run(self): """The main loop of the algorithm""" coords = np.copy(self.coords) res = Result() # return object res.message = [] for i in xrange(self.nsteps): #get the lowest eigenvalue and eigenvector self.overlap = self._getLowestEigenVector(coords, i) overlap = self.overlap #check to make sure the eigenvector is ok if i == 0 or self.eigenval <= 0: self._saveState(coords) self.reduce_step = 0 else: self.nnegative += 1 if self.nnegative > self.nnegative_max: print "warning: negative eigenvalue found too many times. ending", self.nnegative res.message.append( "negative eigenvalue found too many times %d" % self.nnegative ) break if self.verbosity > 2: print "the eigenvalue turned positive.", self.eigenval, "Resetting last good values and taking smaller steps" coords = self._resetState(coords) self.reduce_step += 1 #step uphill along the direction of the lowest eigenvector coords = self._stepUphill(coords) if False: #maybe we want to update the lowest eigenvector now that we've moved? #david thinks this is a bad idea overlap = self._getLowestEigenVector(coords, i) #minimize the coordinates in the space perpendicular to the lowest eigenvector coords, tangentrms = self._minimizeTangentSpace(coords) #check if we are done and print some stuff E, grad = self.pot.getEnergyGradient(coords) rms = np.linalg.norm(grad) * self.rmsnorm gradpar = np.dot(grad, self.eigenvec) / np.linalg.norm(self.eigenvec) if self.iprint > 0: if (i+1) % self.iprint == 0: ostring = "findTransitionState: %3d E %g rms %g eigenvalue %g rms perp %g grad par %g overlap %g" % ( i, E, rms, self.eigenval, tangentrms, gradpar, overlap) extra = " Evec search: %d rms %g" % (self.leig_result.nfev, self.leig_result.rms) extra += " Tverse search: %d step %g" % (self.tangent_result.nfev, self.tangent_move_step) extra += " Uphill step:%g" % (self.uphill_step_size,) print ostring, extra if callable(self.event): self.event(E, coords, rms) if rms < self.tol: break if self.nfail >= self.nfail_max: print "stopping findTransitionState. too many failures in eigenvector search", self.nfail res.message.append( "too many failures in eigenvector search %d" % self.nfail ) break if i == 0 and self.eigenval > 0.: print "WARNING *** initial eigenvalue is positive - increase NEB spring constant?" if self.demand_initial_negative_vec: print " aborting transition state search" res.message.append( "initial eigenvalue is positive %f" % self.eigenval ) break #done. do one last eigenvector search because coords may have changed self._getLowestEigenVector(coords, i) #done, print some data print "findTransitionState done:", i, E, rms, "eigenvalue", self.eigenval success = True #check if results make sense if self.eigenval >= 0.: if self.verbosity > 2: print "warning: transition state is ending with positive eigenvalue", self.eigenval success = False if rms > self.tol: if self.verbosity > 2: print "warning: transition state search appears to have failed: rms", rms success = False if i >= self.nsteps: res.message.append( "maximum iterations reached %d" % i ) #return results res.coords = coords res.energy = E res.eigenval = self.eigenval res.eigenvec = self.eigenvec res.grad = grad res.rms = rms res.nsteps = i res.success = success return res
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=15000): """ a wrapper function for lbfgs routine in scipy .. warn:: the scipy version of lbfgs uses linesearch based only on energy which can make the minimization stop early. When the step size is so small that the energy doesn't change to within machine precision (times the parameter `factr`) the routine declares success and stops. This sounds fine, but if the gradient is analytical the gradient can still be not converged. This is because in the vicinity of the minimum the gradient changes much more rapidly then the energy. Thus we want to make factr as small as possible. Unfortunately, if we make it too small the routine realizes that the linesearch routine isn't working and declares failure and exits. So long story short, if your tolerance is very small (< 1e-6) this routine will probably stop before truly reaching that tolerance. If you reduce `factr` too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly. """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize res = Result() res.coords, res.energy, dictionary = scipy.optimize.fmin_l_bfgs_b( pot.getEnergyGradient, coords, iprint=iprint, pgtol=tol, maxfun=nsteps, factr=10.) res.grad = dictionary["grad"] res.nfev = dictionary["funcalls"] warnflag = dictionary['warnflag'] #res.nsteps = dictionary['nit'] # new in scipy version 0.12 res.nsteps = res.nfev res.message = dictionary['task'] res.success = True if warnflag > 0: print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "too many function evaluations" else: res.message = str(dictionary['task']) print res.message #note: if the linesearch fails the lbfgs may fail without setting warnflag. Check #tolerance exactly if False: if res.success: maxV = np.max(np.abs(res.grad)) if maxV > tol: print "warning: gradient seems too large", maxV, "tol =", tol, ". This is a known, but not understood issue of scipy_lbfgs" print res.message res.rms = res.grad.std() return res
def run(self): """The main loop of the algorithm""" coords = np.copy(self.coords) res = Result() # return object res.message = [] for i in xrange(self.nsteps): # get the lowest eigenvalue and eigenvector self.overlap = self._getLowestEigenVector(coords, i) overlap = self.overlap # check to make sure the eigenvector is ok if i == 0 or self.eigenval <= 0: self._saveState(coords) self.reduce_step = 0 else: self.npositive += 1 if self.npositive > self.npositive_max: logger.warning( "positive eigenvalue found too many times. ending %s", self.npositive) res.message.append( "positive eigenvalue found too many times %d" % self.npositive) break if self.verbosity > 2: logger.info( "the eigenvalue turned positive.", self.eigenval, "Resetting last good values and taking smaller steps") coords = self._resetState(coords) self.reduce_step += 1 # step uphill along the direction of the lowest eigenvector coords = self._stepUphill(coords) if False: # maybe we want to update the lowest eigenvector now that we've moved? # david thinks this is a bad idea overlap = self._getLowestEigenVector(coords, i) # minimize the coordinates in the space perpendicular to the lowest eigenvector coords, tangentrms = self._minimizeTangentSpace(coords) # check if we are done and print some stuff E, grad = self.pot.getEnergyGradient(coords) rms = np.linalg.norm(grad) * self.rmsnorm gradpar = np.dot(grad, self.eigenvec) / np.linalg.norm( self.eigenvec) if self.iprint > 0: if (i + 1) % self.iprint == 0: ostring = "findTS: %3d E %9g rms %8g eigenvalue %9g rms perp %8g grad par %9g overlap %g" % ( i, E, rms, self.eigenval, tangentrms, gradpar, overlap) extra = " Evec search: %d rms %g" % ( self.leig_result.nfev, self.leig_result.rms) extra += " Tverse search: %d step %g" % ( self.tangent_result.nfev, self.tangent_move_step) extra += " Uphill step:%g" % (self.uphill_step_size, ) logger.info("%s %s", ostring, extra) if callable(self.event): self.event(energy=E, coords=coords, rms=rms, eigenval=self.eigenval, stepnum=i) if rms < self.tol: break if self.nfail >= self.nfail_max: logger.warning( "stopping findTransitionState. too many failures in eigenvector search %s", self.nfail) res.message.append( "too many failures in eigenvector search %d" % self.nfail) break if i == 0 and self.eigenval > 0.: logger.warning( "initial eigenvalue is positive - increase NEB spring constant?" ) if self.demand_initial_negative_vec: logger.warning( " aborting transition state search") res.message.append("initial eigenvalue is positive %f" % self.eigenval) break # done. do one last eigenvector search because coords may have changed self._getLowestEigenVector(coords, i) # print some data logger.info("findTransitionState done: %s %s %s %s %s", i, E, rms, "eigenvalue", self.eigenval) success = True # check if results make sense if self.eigenval >= 0.: if self.verbosity > 2: logger.info( "warning: transition state is ending with positive eigenvalue %s", self.eigenval) success = False if rms > self.tol: if self.verbosity > 2: logger.info( "warning: transition state search appears to have failed: rms %s", rms) success = False if i >= self.nsteps: res.message.append("maximum iterations reached %d" % i) #return results res.coords = coords res.energy = E res.eigenval = self.eigenval res.eigenvec = self.eigenvec res.grad = grad res.rms = rms res.nsteps = i res.success = success return res
def optimize(self, quenchRoutine=None, **kwargs): """ Optimize the band Notes ----- the potential for the NEB optimization is not Hamiltonian. This means that there is no meaningful energy associated with the potential. Therefore, during the optimization, we can do gradient following, but we can't rely on the energy for, e.g. determining step size, which is the default behavior for many optimizers. This can be worked around by choosing a small step size and a large maxErise, or by using an optimizer that uses only gradients. scipy.lbfgs_b seems to work with NEB pretty well, but lbfgs_py and mylbfgs tend to fail. If you must use one of those try, e.g. maxErise = 1., maxstep=0.01, tol=1e-2 :quenchRoutine: quench algorithm to use for optimization. :quenchParams: parameters for the quench """ if quenchRoutine is None: if self.quenchRoutine is None: quenchRoutine = mylbfgs else: quenchRoutine = self.quenchRoutine #combine default and passed params. passed params will overwrite default quenchParams = dict([("nsteps", 300)] + self.quenchParams.items() + kwargs.items()) if quenchParams.has_key("iprint"): self.iprint = quenchParams["iprint"] if not quenchParams.has_key("logger"): quenchParams["logger"] = logging.getLogger( "pygmin.connect.neb.quench") if self.use_minimizer_callback: quenchParams["events"] = [self._step] self.step = 0 qres = quenchRoutine(self.active.reshape(self.active.size), self, **quenchParams) # if isinstance(qres, tuple): # for compatability with old and new quenchers # qres = qres[4] self.active[:, :] = qres.coords.reshape(self.active.shape) if self.copy_potential: for i in xrange(0, self.nimages): pot = self.potential_list[i] self.energies[i] = pot.getEnergy(self.coords[i, :]) else: for i in xrange(0, self.nimages): self.energies[i] = self.potential.getEnergy(self.coords[i, :]) res = Result() res.path = self.coords res.nsteps = qres.nsteps res.energy = self.energies res.rms = qres.rms res.success = False if qres.rms < quenchParams["tol"]: res.success = True return res
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=15000): """ a wrapper function for lbfgs routine in scipy .. warn:: the scipy version of lbfgs uses linesearch based only on energy which can make the minimization stop early. When the step size is so small that the energy doesn't change to within machine precision (times the parameter `factr`) the routine declares success and stops. This sounds fine, but if the gradient is analytical the gradient can still be not converged. This is because in the vicinity of the minimum the gradient changes much more rapidly then the energy. Thus we want to make factr as small as possible. Unfortunately, if we make it too small the routine realizes that the linesearch routine isn't working and declares failure and exits. So long story short, if your tolerance is very small (< 1e-6) this routine will probably stop before truly reaching that tolerance. If you reduce `factr` too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly. """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize res = Result() res.coords, res.energy, dictionary = scipy.optimize.fmin_l_bfgs_b(pot.getEnergyGradient, coords, iprint=iprint, pgtol=tol, maxfun=nsteps, factr=10.) res.grad = dictionary["grad"] res.nfev = dictionary["funcalls"] warnflag = dictionary['warnflag'] #res.nsteps = dictionary['nit'] # new in scipy version 0.12 res.nsteps = res.nfev res.message = dictionary['task'] res.success = True if warnflag > 0: print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "too many function evaluations" else: res.message = str(dictionary['task']) print res.message #note: if the linesearch fails the lbfgs may fail without setting warnflag. Check #tolerance exactly if False: if res.success: maxV = np.max( np.abs(res.grad) ) if maxV > tol: print "warning: gradient seems too large", maxV, "tol =", tol, ". This is a known, but not understood issue of scipy_lbfgs" print res.message res.rms = res.grad.std() return res