def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for conjugate gradient routine in scipy """ import scipy.optimize ret = scipy.optimize.fmin_cg(pot.getEnergy, coords, pot.getGradient, gtol=tol, full_output=True, disp=iprint > 0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] res.nfev = ret[2] res.nfev += ret[3] # calls to gradient res.success = True warnflag = ret[4] if warnflag > 0: # print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "Maximum number of iterations exceeded" if warnflag == 2: print "Gradient and/or function calls not changing" res.energy, res.grad = pot.getEnergyGradient(res.coords) res.nfev += 1 g = res.grad res.rms = np.linalg.norm(g) / np.sqrt(len(g)) return res
def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for conjugate gradient routine in scipy """ if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) import scipy.optimize ret = scipy.optimize.fmin_cg(pot.getEnergy, coords, pot.getGradient, gtol=tol, full_output=True, disp=iprint>0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] #e = ret[1] res.nfev = ret[2] res.nfev += ret[3] #calls to gradient res.success = True warnflag = ret[4] if warnflag > 0: # print "warning: problem with quench: ", res.success = False if warnflag == 1: res.message = "Maximum number of iterations exceeded" if warnflag == 2: print "Gradient and/or function calls not changing" res.energy, res.grad = pot.getEnergyGradient(res.coords) res.nfev += 1 g = res.grad res.rms = np.linalg.norm(g)/np.sqrt(len(g)) return res
def make_result(self, coords, energy): from pele.optimize import Result res = Result() res.coords = coords res.energy = energy res.eigenval = 1. res.eigenvec = coords.copy() return res
def run(self, Emax): self.Emax = Emax res = Result() res.mciter = 100 res.nsteps = 100 res.naccept = 70 res.x = self.system.get_random_configuration_Emax(self.Emax) res.energy = self.pot.getEnergy(res.x) return res
def ode_julia_naive(coords, pot, tol=1e-4, nsteps=2000, convergence_check=None, solver_type=de.ROCK2(), reltol=1e-5, abstol=1e-5, **kwargs): class feval_pot: """ wrapper class that makes sure the function is right """ def __init__(self): self.nfev = 0 def get_negative_grad(self, x, p, t): self.nfev += 1 return -pot.getEnergyGradient(x.copy())[1] def get_energy_gradient(self, x): self.nfev += 1 return pot.getEnergyGradient(x.copy()) function_evaluate_pot = feval_pot() converged = False n = 0 if convergence_check == None: convergence_check = lambda g: np.max(g) < tol # initialize ode problem tspan = (0.0, 10.0) # tspan # tspan = (0.0, 10.0) prob = de.ODEProblem(function_evaluate_pot.get_negative_grad, coords, tspan) integrator = de.init(prob, solver_type, reltol=reltol, abstol=abstol) x_ = np.full(len(coords), np.nan) while not converged and n < nsteps: xold = x_ de.step_b(integrator) x_ = integrator.u n += 1 converged = convergence_check(de.get_du(integrator)) if converged: print(de.get_du(integrator)) res = Result() res.coords = x_ res.energy = pot.getEnergy(x_) res.rms = 0 res.grad = 0 res.nfev = function_evaluate_pot.nfev res.nsteps = n res.success = converged return res
def __call__(self, x0, stepsize, Emax, energy, seed=None): if seed is None: seed = np.random.randint(0, sys.maxint) x, energy, naccept = lj_mc_cython(x0, self.mciter, stepsize, Emax, self.radius, seed) # print ret res = Result() res.x0 = x0 res.x = x res.nsteps = self.mciter res.naccept = naccept res.energy = energy return res
def get_result(self): res = Result() res.energy = self.energy res.gradient = self.gradient res.coords = self.coords res.nsteps = self.iter_number res.rms = np.linalg.norm(res.gradient) / np.sqrt(len(res.gradient)) res.nfev = self.transverse_potential.nfev res.eigenval = self.eigenval res.eigenvec = self.get_eigenvector() res.success = self.stop_criterion_satisfied() return res
def get_results(self): res = Result() with self.g.as_default(), self.g.device(self.device): with self.session.as_default(): g, x = self.compute_gradients(self.model.gloss, var_list=[self.model.x])[0] g, x = self.process_grad(g, x) res.energy = self.model.gloss.eval() res.coords = x.eval() res.grad = g.eval() res.nfev = self.nfev.eval() res.rms = self.rms.eval() res.success = self.success.eval() res.nsteps = self.neval return res
def steepest_descent(x0, pot, iprint=-1, dx=1e-4, nsteps=100000, tol=1e-3, maxstep=-1., events=None): """steepest descent minimization Notes ----- this should never be used except for testing purposes. It is a bad implementation of a terrible minimization routine. It will be very slow. """ N = len(x0) x = x0.copy() E, V = pot.getEnergyGradient(x) funcalls = 1 for k in xrange(nsteps): stp = -V * dx if maxstep > 0: stpsize = np.max(np.abs(V)) if stpsize > maxstep: stp *= maxstep / stpsize x += stp E, V = pot.getEnergyGradient(x) funcalls += 1 rms = np.linalg.norm(V) / np.sqrt(N) if iprint > 0: if funcalls % iprint == 0: print "step %8d energy %20.12g rms gradient %20.12g" % ( funcalls, E, rms) if events is not None: for event in events: event(energy=E, coords=x, rms=rms) if rms < tol: break res = Result() res.coords = x res.energy = E res.rms = rms res.grad = V res.nfev = funcalls res.nsteps = k res.success = res.rms <= tol return res
def bfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for the scipy BFGS algorithm """ import scipy.optimize ret = scipy.optimize.fmin_bfgs(pot.getEnergy, coords, fprime=pot.getGradient, gtol=tol, full_output=True, disp=iprint>0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] res.energy = ret[1] res.grad = ret[2] res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) res.nfev = ret[4] + ret[5] res.nsteps = res.nfev # not correct, but no better information res.success = np.max(np.abs(res.grad)) < tol return res
def run(self, fmax=1e-3, steps=100000): """Run structure optimization algorithm. This method will return when the forces on all individual atoms are less than *fmax* or when the number of steps exceeds *steps*.""" self.fmax = fmax step = 0 res = Result() res.success = False while step < steps: E, f = self.potential.getEnergyGradient(self.coords) self.nfev += 1 #self.call_observers() #print E if self.alternate_stop_criterion is None: i_am_done = self.converged(f) else: i_am_done = self.alternate_stop_criterion(energy=E, gradient=f, tol=self.fmax) if i_am_done: res.success = True break self.step(-f) self.nsteps += 1 rms = np.linalg.norm(f)/np.sqrt(len(f)) if self.iprint > 0: if step % self.iprint == 0: self.logger.info("fire: %s E %s rms %s", step, E, rms) for event in self.events: event(coords=self.coords, energy=E, rms=rms) step += 1 res.nsteps = step res.nfev = self.nfev res.coords = self.coords res.energy = E res.grad = -f res.rms = np.linalg.norm(res.grad)/np.sqrt(len(res.grad)) self.result = res return res
def run(self, fmax=1e-3, steps=100000): """Run structure optimization algorithm. This method will return when the forces on all individual atoms are less than *fmax* or when the number of steps exceeds *steps*. """ self.fmax = fmax step = 0 res = Result() res.success = False while step < steps: E, f = self.potential.getEnergyGradient(self.coords) self.nfev += 1 if self.alternate_stop_criterion is None: i_am_done = self.converged(f) else: i_am_done = self.alternate_stop_criterion(energy=E, gradient=f, tol=self.fmax) if i_am_done: res.success = True break self.step(-f) self.nsteps += 1 rms = np.linalg.norm(f) / np.sqrt(len(f)) if self.iprint > 0: if step % self.iprint == 0: self.logger.info("fire: %s E %s rms %s", step, E, rms) for event in self.events: event(coords=self.coords, energy=E, rms=rms) step += 1 res.nsteps = step res.nfev = self.nfev res.coords = self.coords res.energy = E res.grad = -f res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) self.result = res return res
def get_result(self): """return a results object""" trans_res = self.translator.get_result() rot_result = self.rotator.get_result() res = Result() res.eigenval = rot_result.eigenval res.eigenvec = rot_result.eigenvec res.coords = trans_res.coords res.energy, res.grad = self.get_true_energy_gradient(res.coords) res.rms = trans_res.rms res.nfev = rot_result.nfev + trans_res.nfev res.nsteps = self.iter_number res.success = self.stop_criterion_satisfied() if res.eigenval > 0: res.success = False return res
def steepest_descent(x0, pot, iprint=-1, dx=1e-4, nsteps=100000, tol=1e-3, maxstep=-1., events=None): """steepest descent minimization Notes ----- this should never be used except for testing purposes. It is a bad implementation of a terrible minimization routine. It will be very slow. """ N = len(x0) x = x0.copy() E, V = pot.getEnergyGradient(x) funcalls = 1 for k in range(nsteps): stp = -V * dx if maxstep > 0: stpsize = np.max(np.abs(V)) if stpsize > maxstep: stp *= maxstep / stpsize x += stp E, V = pot.getEnergyGradient(x) funcalls += 1 rms = np.linalg.norm(V) / np.sqrt(N) if iprint > 0: if funcalls % iprint == 0: print("step %8d energy %20.12g rms gradient %20.12g" % (funcalls, E, rms)) if events is not None: for event in events: event(energy=E, coords=x, rms=rms) if rms < tol: break res = Result() res.coords = x res.energy = E res.rms = rms res.grad = V res.nfev = funcalls res.nsteps = k res.success = res.rms <= tol return res
def read_minimum(self, fin): """ the minima and transition states are stored as 1 line with energy 1 line with point group info natoms lines with eigenvalues natoms lines with coords """ res = Result() # read energy line = self.get_next_line() # print line res.energy = float(line.split()[0]) # ignore the line with the point group line = self.get_next_line() res.eigenvalues = self.read_coords(fin) res.coords = self.read_coords(fin) return res
def bfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs): """ a wrapper function for the scipy BFGS algorithm """ import scipy.optimize ret = scipy.optimize.fmin_bfgs(pot.getEnergy, coords, fprime=pot.getGradient, gtol=tol, full_output=True, disp=iprint > 0, maxiter=nsteps, **kwargs) res = Result() res.coords = ret[0] res.energy = ret[1] res.grad = ret[2] res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad)) res.nfev = ret[4] + ret[5] res.nsteps = res.nfev # not correct, but no better information res.success = np.max(np.abs(res.grad)) < tol return res
def steepest_descent(x0, pot, iprint=-1, dx=1e-4, nsteps=100000, tol=1e-3, maxstep=-1., events=None): if not hasattr(pot, "getEnergyGradient"): # for compatibility with old quenchers. # assume pot is a getEnergyGradient function pot = _getEnergyGradientWrapper(pot) N = len(x0) x=x0.copy() E, V = pot.getEnergyGradient(x) funcalls = 1 for k in xrange(nsteps): stp = -V * dx if maxstep > 0: stpsize = np.max(np.abs(V)) if stpsize > maxstep: stp *= maxstep / stpsize x += stp E, V = pot.getEnergyGradient(x) funcalls += 1 rms = np.linalg.norm(V)/np.sqrt(N) if iprint > 0: if funcalls % iprint == 0: print "step %8d energy %20.12g rms gradient %20.12g" % (funcalls, E, rms) if events is not None: for event in events: event(energy=E, coords=x, rms=rms) if rms < tol: break res = Result() res.coords = x res.energy = E res.rms = rms res.grad = V res.nfev = funcalls res.nsteps = k res.success = res.rms <= tol return res
def optimize(self, quenchRoutine=None, **kwargs): """ Optimize the band Notes ----- the potential for the NEB optimization is not Hamiltonian. This means that there is no meaningful energy associated with the potential. Therefore, during the optimization, we can do gradient following, but we can't rely on the energy for, e.g. determining step size, which is the default behavior for many optimizers. This can be worked around by choosing a small step size and a large maxErise, or by using an optimizer that uses only gradients. scipy.lbfgs_b seems to work with NEB pretty well, but lbfgs_py and mylbfgs tend to fail. If you must use one of those try, e.g. maxErise = 1., maxstep=0.01, tol=1e-2 :quenchRoutine: quench algorithm to use for optimization. :quenchParams: parameters for the quench """ if quenchRoutine is None: if self.quenchRoutine is None: quenchRoutine = mylbfgs else: quenchRoutine = self.quenchRoutine # combine default and passed params. passed params will overwrite default quenchParams = dict([("nsteps", 300)] + self.quenchParams.items() + kwargs.items()) if quenchParams.has_key("iprint"): self.iprint = quenchParams["iprint"] if not quenchParams.has_key("logger"): quenchParams["logger"] = logging.getLogger("pele.connect.neb.quench") if self.use_minimizer_callback: quenchParams["events"]=[self._step] self.step = 0 qres = quenchRoutine( self.active.reshape(self.active.size), self, **quenchParams) # if isinstance(qres, tuple): # for compatability with old and new quenchers # qres = qres[4] self.active[:,:] = qres.coords.reshape(self.active.shape) if self.copy_potential: for i in xrange(0,self.nimages): pot = self.potential_list[i] self.energies[i] = pot.getEnergy(self.coords[i,:]) else: for i in xrange(0,self.nimages): self.energies[i] = self.potential.getEnergy(self.coords[i,:]) res = Result() res.path = self.coords res.nsteps = qres.nsteps res.energy = self.energies res.rms = qres.rms res.success = False if qres.rms < quenchParams["tol"]: res.success = True return res
def optimize(self, quenchRoutine=None, **kwargs): """ Optimize the band Notes ----- the potential for the NEB optimization is not Hamiltonian. This means that there is no meaningful energy associated with the potential. Therefore, during the optimization, we can do gradient following, but we can't rely on the energy for, e.g. determining step size, which is the default behavior for many optimizers. This can be worked around by choosing a small step size and a large maxErise, or by using an optimizer that uses only gradients. scipy.lbfgs_b seems to work with NEB pretty well, but lbfgs_py and mylbfgs tend to fail. If you must use one of those try, e.g. maxErise = 1., maxstep=0.01, tol=1e-2 :quenchRoutine: quench algorithm to use for optimization. :quenchParams: parameters for the quench """ if quenchRoutine is None: if self.quenchRoutine is None: quenchRoutine = mylbfgs else: quenchRoutine = self.quenchRoutine # combine default and passed params. passed params will overwrite default quenchParams = dict([("nsteps", 300)] + list(self.quenchParams.items()) + list(kwargs.items())) if "iprint" in quenchParams: self.iprint = quenchParams["iprint"] if "logger" not in quenchParams: quenchParams["logger"] = logging.getLogger( "pele.connect.neb.quench") if self.use_minimizer_callback: quenchParams["events"] = [self._step] self.step = 0 qres = quenchRoutine(self.active.reshape(self.active.size), self, **quenchParams) # if isinstance(qres, tuple): # for compatability with old and new quenchers # qres = qres[4] self.active[:, :] = qres.coords.reshape(self.active.shape) if self.copy_potential: for i in range(0, self.nimages): pot = self.potential_list[i] self.energies[i] = pot.getEnergy(self.coords[i, :]) else: for i in range(0, self.nimages): self.energies[i] = self.potential.getEnergy(self.coords[i, :]) res = Result() res.path = self.coords res.nsteps = qres.nsteps res.energy = self.energies res.rms = qres.rms res.success = False if qres.rms < quenchParams["tol"]: res.success = True return res
def ode_julia_naive(coords, pot, tol=1e-4, nsteps=20000, convergence_check=None, solver_type=de.CVODE_BDF(), rtol=1e-4, atol=1e-4, **kwargs): class feval_pot: """ wrapper class that interfaces base potential to functions for ode solver """ def __init__(self): self.nfev = 0 self.nhev = 0 def get_negative_grad(self, x, p, t): """ negative grad is f(u, p, t) """ self.nfev += 1 return -pot.getEnergyGradient(x.copy())[1] def get_energy_gradient(self, x): self.nfev += 1 return pot.getEnergyGradient(x.copy()) def get_jacobian(self, x, p, t): self.nhev += 1 return -pot.getEnergyGradientHessian(x.copy())[2] function_evaluate_pot = feval_pot() converged = False n = 0 if convergence_check == None: convergence_check = lambda g: np.linalg.norm(g) < tol # odefunc = de.ODEFunction(function_evaluate_pot.get_negative_grad, function_evaluate_pot.get_jacobian) # initialize ode problem tspan = (0, 10000.0) f_bound = de.ODEFunction(function_evaluate_pot.get_negative_grad) # f_free = de.ODEFunction(get_negative_grad,jac = get_jacobian) prob = de.ODEProblem(f_bound, coords, tspan) solver = Main.eval("CVODE_BDF(linear_solver=:GMRES)") integrator = de.init(prob, solver, reltol=rtol, abstol=atol) x_ = np.full(len(coords), np.nan) while not converged and n < nsteps: xold = x_ de.step_b(integrator) x_ = integrator.u n += 1 converged = convergence_check(de.get_du(integrator)) res = Result() res.coords = x_ res.energy = pot.getEnergy(x_) res.grad = 0 res.nfev = function_evaluate_pot.nfev res.nsteps = n res.nhev = function_evaluate_pot.nhev res.success = converged # res.nhev = function_evaluate_pot.nhev return res
def run(self): """The main loop of the algorithm""" coords = np.copy(self.coords) res = Result() # return object res.message = [] self._compute_gradients(coords) iend = 0 for i in xrange(self.nsteps): iend = i # get the lowest eigenvalue and eigenvector self.overlap = self._getLowestEigenVector(coords, i) overlap = self.overlap if self.eigenval < 0: self.negatives_before_check -= 1 # determine whether everything looks OK. all_ok = self.eigenval < 0 or not self.check_negative if not all_ok: if i == 0: # we need to accept because we haven't saved the state yet # Also, demand_initial_negative_vec will stop later if needed all_ok = True if not all_ok: if self.negatives_before_check > 0 and not self.demand_initial_negative_vec: print " positive before check. setting all ok" all_ok = True # if everything is OK, then continue, else revert the step if all_ok: self._saveState(coords) self.reduce_step = 0 else: self.npositive += 1 if self.npositive > self.npositive_max: logger.warning( "positive eigenvalue found too many times. ending %s", self.npositive) res.message.append( "positive eigenvalue found too many times %d" % self.npositive) break if self.verbosity > 2: logger.info( "the eigenvalue turned positive. %s %s", self.eigenval, "Resetting last good values and taking smaller steps") coords = self._resetState() self.reduce_step += 1 # step uphill along the direction of the lowest eigenvector coords = self._stepUphill(coords) # minimize the coordinates in the space perpendicular to the lowest eigenvector tangent_ret = self._minimizeTangentSpace( coords, energy=self.get_energy(), gradient=self.get_gradient()) coords = tangent_ret.coords tangentrms = tangent_ret.rms # check if we are done and print some stuff # self._compute_gradients(coords) # this is unnecessary E = self.get_energy() grad = self.get_gradient() rms = np.linalg.norm(grad) * self.rmsnorm gradpar = np.dot(grad, self.eigenvec) / np.linalg.norm( self.eigenvec) if self.iprint > 0: if (i + 1) % self.iprint == 0: ostring = "findTS: %3d E %9g rms %8g eigenvalue %9g rms perp %8g grad par %9g overlap %g" % ( i, E, rms, self.eigenval, tangentrms, gradpar, overlap) extra = " Evec search: %d rms %g" % ( self.leig_result.nfev, self.leig_result.rms) extra += " Tverse search: %d step %g" % ( self.tangent_result.nfev, self.tangent_move_step) extra += " Uphill step:%g" % (self.uphill_step_size, ) logger.info("%s %s", ostring, extra) if callable(self.event): self.event(energy=E, coords=coords, rms=rms, eigenval=self.eigenval, stepnum=i) if rms < self.tol: break if self.nfail >= self.nfail_max: logger.warning( "stopping findTransitionState. too many failures in eigenvector search %s", self.nfail) res.message.append( "too many failures in eigenvector search %d" % self.nfail) break if i == 0 and self.eigenval > 0.: if self.verbosity > 1: logger.warning( "initial eigenvalue is positive - increase NEB spring constant?" ) if self.demand_initial_negative_vec: logger.warning( " aborting transition state search") res.message.append("initial eigenvalue is positive %f" % self.eigenval) break # done. do one last eigenvector search because coords may have changed self._getLowestEigenVector(coords, iend) # print some data if self.verbosity > 0 or self.iprint > 0: logger.info("findTransitionState done: %s %s %s %s %s", iend, E, rms, "eigenvalue", self.eigenval) success = True # check if results make sense if self.eigenval >= 0.: if self.verbosity > 2: logger.info( "warning: transition state is ending with positive eigenvalue %s", self.eigenval) success = False if rms > self.tol: if self.verbosity > 2: logger.info( "warning: transition state search appears to have failed: rms %s", rms) success = False if iend >= self.nsteps: res.message.append("maximum iterations reached %d" % iend) # update nfev with the number of calls from the transverse walker if self._transverse_walker is not None: twres = self._transverse_walker.get_result() self.nfev += twres.nfev res.coords = coords res.energy = E res.eigenval = self.eigenval res.eigenvec = self.eigenvec res.grad = grad res.rms = rms res.nsteps = iend res.success = success res.nfev = self.nfev return res
def ode_scipy_naive(coords, pot, t_bound=1000, tol=1e-4, nsteps=20000, convergence_check=None, solver_type='rk45', **kwargs): """ This uses rk45 in a minimizer like approach to find the ode The idea is to solve for the path dx/dt = - \\grad{E} Parameters ---------- coords: array coords input for the quench pot: array potential class that has a getenergygradient function that we need for stepping nsteps: int maximum number of steps tol: float gives a tolerance for making sure that we have found the minima kwargs: ... extra arguments for the integrator Return ---------- result container with steps and function evaluations """ class feval_pot: """ wrapper class that calculates function evaluations and makes sure to point the gradient in the right direction """ def __init__(self): self.nfev = 0 def get_gradient(self, t, x): self.nfev += 1 return -pot.getEnergyGradient(x.copy())[1] def get_energy_gradient(self, x): self.nfev += 1 return pot.getEnergyGradient(x.copy()) # This code seems to work for these two solvers # if solver_type == 'RK23': # solver = RK23_LS # else: # solver = RK23_LS solver = Radau # def get_gradient(t, x): # # gets just the gradient from the potential # return pot.getEnergyGradient(x)[1] function_evaluate_pot = feval_pot() solver = solver( function_evaluate_pot.get_gradient, # function_evaluate_pot.get_energy_gradient, 0, coords, t_bound, rtol=1e-3, atol=1e-3) # min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) converged = False n = 0 if convergence_check == None: convergence_check = lambda g: np.linalg.norm(g) < tol x_ = np.full(len(coords), np.nan) while not converged and n < nsteps: xold = x_ solver.step() x_ = solver.dense_output().y_old n += 1 converged = convergence_check(pot.getEnergyGradient(x_)[1]) res = Result() res.coords = x_ res.nfev = function_evaluate_pot.nfev print(res.nfev) res.coords = x_ res.energy = pot.getEnergy(x_) res.rms = 0 res.grad = 0 res.nsteps = n res.success = converged return res
def run(self): """The main loop of the algorithm""" coords = np.copy(self.coords) res = Result() # return object res.message = [] # if starting with positive curvature, disable negative eigenvalue check # this will be reenabled as soon as the eigenvector becomes negative negative_before_check = 10 self._compute_gradients(coords) for i in xrange(self.nsteps): # get the lowest eigenvalue and eigenvector self.overlap = self._getLowestEigenVector(coords, i) overlap = self.overlap #print self.eigenval if self.eigenval < 0: negative_before_check -= 1 # check to make sure the eigenvector is ok if (i == 0 or self.eigenval <= 0 or not self.check_negative or (negative_before_check > 0 and not self.demand_initial_negative_vec)): self._saveState(coords) self.reduce_step = 0 else: self.npositive += 1 if self.npositive > self.npositive_max: logger.warning( "positive eigenvalue found too many times. ending %s", self.npositive) res.message.append( "positive eigenvalue found too many times %d" % self.npositive ) break if self.verbosity > 2: logger.info("the eigenvalue turned positive. %s %s", self.eigenval, "Resetting last good values and taking smaller steps") coords = self._resetState() self.reduce_step += 1 # step uphill along the direction of the lowest eigenvector coords = self._stepUphill(coords) # minimize the coordinates in the space perpendicular to the lowest eigenvector tangent_ret = self._minimizeTangentSpace(coords, energy=self.get_energy(), gradient=self.get_gradient()) coords = tangent_ret.coords tangentrms = tangent_ret.rms # check if we are done and print some stuff # self._compute_gradients(coords) # this is unnecessary E = self.get_energy() grad = self.get_gradient() rms = np.linalg.norm(grad) * self.rmsnorm gradpar = np.dot(grad, self.eigenvec) / np.linalg.norm(self.eigenvec) if self.iprint > 0: if (i+1) % self.iprint == 0: ostring = "findTS: %3d E %9g rms %8g eigenvalue %9g rms perp %8g grad par %9g overlap %g" % ( i, E, rms, self.eigenval, tangentrms, gradpar, overlap) extra = " Evec search: %d rms %g" % (self.leig_result.nfev, self.leig_result.rms) extra += " Tverse search: %d step %g" % (self.tangent_result.nfev, self.tangent_move_step) extra += " Uphill step:%g" % (self.uphill_step_size,) logger.info("%s %s", ostring, extra) if callable(self.event): self.event(energy=E, coords=coords, rms=rms, eigenval=self.eigenval, stepnum=i) if rms < self.tol: break if self.nfail >= self.nfail_max: logger.warning("stopping findTransitionState. too many failures in eigenvector search %s", self.nfail) res.message.append( "too many failures in eigenvector search %d" % self.nfail ) break if i == 0 and self.eigenval > 0.: if self.verbosity > 1: logger.warning("initial eigenvalue is positive - increase NEB spring constant?") if self.demand_initial_negative_vec: logger.warning(" aborting transition state search") res.message.append( "initial eigenvalue is positive %f" % self.eigenval ) break # done. do one last eigenvector search because coords may have changed self._getLowestEigenVector(coords, i) # print some data if self.verbosity > 0 or self.iprint > 0: logger.info("findTransitionState done: %s %s %s %s %s", i, E, rms, "eigenvalue", self.eigenval) success = True # check if results make sense if self.eigenval >= 0.: if self.verbosity > 2: logger.info( "warning: transition state is ending with positive eigenvalue %s", self.eigenval) success = False if rms > self.tol: if self.verbosity > 2: logger.info("warning: transition state search appears to have failed: rms %s", rms) success = False if i >= self.nsteps: res.message.append( "maximum iterations reached %d" % i ) # update nfev with the number of calls from the transverse walker if self._transverse_walker is not None: twres = self._transverse_walker.get_result() self.nfev += twres.nfev #return results res.coords = coords res.energy = E res.eigenval = self.eigenval res.eigenvec = self.eigenvec res.grad = grad res.rms = rms res.nsteps = i res.success = success res.nfev = self.nfev return res