def optimize(self): # Header print("\n---------------------------------------------" + "---------------------------------------------") print("Run_Name = %s" % str(self.name)) target_function = self if self.opt == "sd": output = steepest_descent(np.array(self.coords_start), self.get_gradient, NEB_obj=target_function, new_opt_params=self.new_opt_params) elif self.opt == "bfgs": output = bfgs(np.array(self.coords_start), self.get_gradient, NEB_obj=target_function, new_opt_params=self.new_opt_params) elif self.opt == "lbfgs": output = lbfgs(np.array(self.coords_start), self.get_gradient, NEB_obj=target_function, new_opt_params=self.new_opt_params) elif self.opt == "qm": output = quick_min(np.array(self.coords_start), self.get_gradient, NEB_obj=target_function, new_opt_params=self.new_opt_params) elif self.opt == "fire": output = fire(np.array(self.coords_start), self.get_gradient, NEB_obj=target_function, new_opt_params=self.new_opt_params) elif self.opt == "cg": output = conjugate_gradient(np.array(self.coords_start), self.get_gradient, NEB_obj=target_function, new_opt_params=self.new_opt_params) else: print("\nERROR - %s optimizations method does not exist! Choose \ from the following:" % str(self.opt)) print("\t1. BFGS") print("\t2. LBFGS") print("\t3. QM") print("\t4. SD") print("\t5. FIRE") print("\t6. CG") sys.exit() FINAL_PARAMS, CODE, ITERS = output if CODE == FAIL_CONVERGENCE: print("\nNEB failed to converge.") elif CODE == MAXITER_CONVERGENCE: print("\nNEB quit after reaching the specified maximum number \ of iterations.") elif CODE == G_MAX_CONVERGENCE: print("\nNEB converged the maximum force.") elif CODE == G_RMS_CONVERGENCE: print("\nNEB converged the RMS force.") elif CODE == STEP_SIZE_TOO_SMALL: print("\nNEB failed to converge. Step size either started too \ small, or was backtracked to being too small.") else: print("\nSomething unknown happened during NEB optimization, and \ no flag was returned.") print("---------------------------------------------" + "---------------------------------------------\n\n") return FINAL_PARAMS, ITERS
def optimize(self): # Try seeing if neb was run for <= 2 frames if (isinstance(self.states, list) and not isinstance(self.states[0], list)): print("Error - Only one frame in NEB calculation. Did you mean to \ run an optimization instead?") sys.exit() elif (isinstance(self.states, type(self.states[0])) and len(self.states) <= 2): print( "Error - NEB requires at least 3 frames to run. You have \ entered only %d frames." % len(self.states)) sys.exit() # Set which atoms will be affected by virtual springs if not self.spring_atoms: self.spring_atoms = range(len(self.states[0])) elif isinstance(self.spring_atoms, str): # A list of element names elements = self.spring_atoms.split() self.spring_atoms = [ i for i, a in enumerate(self.states[0]) if a.element in elements ] # NEB Header print("\n---------------------------------------------" + "---------------------------------------------") print("Run_Name = %s" % str(self.name)) print("DFT Package = %s" % self.DFT) print("Spring Constant for NEB: %lg Ha/Ang = %lg eV/Ang" % (self.k, units.convert_energy("Ha", "eV", self.k))) if self.no_energy: print("Running NEB with old tangent approximation") if self.ci_neb: print("Running Climbing Image, starting at iteration %d" % self.ci_N) if self.opt == "sd": output = steepest_descent(np.array(self.coords_start), self.get_gradient, NEB_obj=self, new_opt_params=self.new_opt_params) elif self.opt == "bfgs": output = bfgs(np.array(self.coords_start), self.get_gradient, NEB_obj=self, new_opt_params=self.new_opt_params) elif self.opt == "lbfgs": output = lbfgs(np.array(self.coords_start), self.get_gradient, NEB_obj=self, new_opt_params=self.new_opt_params) elif self.opt == "qm": output = quick_min(np.array(self.coords_start), self.get_gradient, NEB_obj=self, new_opt_params=self.new_opt_params) elif self.opt == "fire": output = fire(np.array(self.coords_start), self.get_gradient, NEB_obj=self, new_opt_params=self.new_opt_params) elif self.opt == "cg": output = conjugate_gradient(np.array(self.coords_start), self.get_gradient, NEB_obj=self, new_opt_params=self.new_opt_params) elif self.opt.startswith("scipy"): print("\nRunning neb with optimization method " + self.opt) params = np.array(self.coords_start) output = minimize(self.get_error, params, jac=self.get_gradient, method=self.opt.split("_")[-1], options=self.new_opt_params) else: print( "\nERROR - %s optimizations method does not exist! Choose \ from the following:" % str(self.opt)) print("\t1. BFGS") print("\t2. LBFGS") print("\t3. QM") print("\t4. SD") print("\t5. FIRE") print("\t6. CG") print("\t7. scipy_X where X is a valid scipy minimization method.") sys.exit() if not self.opt.startswith("scipy"): FINAL_PARAMS, CODE, ITERS = output if CODE == FAIL_CONVERGENCE: print("\nNEB failed to converge.") elif CODE == MAXITER_CONVERGENCE: print("\nNEB quit after reaching the specified maximum number \ of iterations.") elif CODE == G_MAX_CONVERGENCE: print("\nNEB converged the maximum force.") elif CODE == G_RMS_CONVERGENCE: print("\nNEB converged the RMS force.") elif CODE == STEP_SIZE_TOO_SMALL: print("\nNEB failed to converge. Step size either started too \ small, or was backtracked to being too small.") else: print( "\nSomething unknown happened during NEB optimization, and \ no flag was returned.") print("---------------------------------------------" + "---------------------------------------------\n\n") return FINAL_PARAMS, ITERS, self.states else: return output, self.states
def optimize(self): # Try seeing if ANEB was run for <= 2 frames if (isinstance(self.states, list) and not isinstance(self.states[0], list)): print( "Error - Only one frame in ANEB calculation. Did you mean to \ run an optimization instead?") sys.exit() elif (isinstance(self.states, type(self.states[0])) and len(self.states) <= 2): print( "Error - ANEB requires at least 3 frames to run. You have \ entered only %d frames." % len(self.states)) sys.exit() # Set which atoms will be affected by virtual springs set_spring_atoms(self) # ANEB Header print("\n---------------------------------------------" + "---------------------------------------------") print("Run_Name = %s" % str(self.name)) print("DFT Package = %s" % self.DFT) print("Spring Constant for ANEB: %lg Ha/Ang = %lg eV/Ang" % (self.k_0, units.convert_energy("Ha", "eV", self.k_0))) if self.opt == "lbfgs": self.ci_ANEB = False while len(self.all_states) < self.ANEB_Nmax: print("\nRunning for N_sim = %d of %d frames (N_max = %d)..." % (self.ANEB_Nsim, len(self.all_states), self.ANEB_Nmax)) _ = lbfgs(np.array(self.flattened_states), self.get_gradient, NEB_obj=self, new_opt_params=self.new_auto_opt_params) add_frame(self) self.prv_RMS = None self.prv_MAX = None self.prv_MAX_E = None self.step = 0 self.nframes = len(self.states) set_spring_atoms(self) self.ci_ANEB = True self.states = copy.deepcopy(self.all_states) self.flattened_states = flattened(self.states) self.nframes = len(self.states) self.k = [self.k_0 for i in self.all_states] print("\nRunning final CI-ANEB with %d frames" % len(self.all_states)) output = lbfgs(np.array(self.flattened_states), self.get_gradient, NEB_obj=self, new_opt_params=self.new_opt_params) else: print( "\nERROR - %s optimizations method does not exist! Choose \ from the following:" % str(self.opt)) print("\t1. LBFGS") sys.exit() if not self.opt.startswith("scipy"): FINAL_PARAMS, CODE, ITERS = output if CODE == FAIL_CONVERGENCE: print("\nANEB failed to converge.") elif CODE == MAXITER_CONVERGENCE: print( "\nANEB quit after reaching the specified maximum number \ of iterations.") elif CODE == G_MAX_CONVERGENCE: print("\nANEB converged the maximum force.") elif CODE == G_RMS_CONVERGENCE: print("\nANEB converged the RMS force.") elif CODE == STEP_SIZE_TOO_SMALL: print( "\nANEB failed to converge. Step size either started too \ small, or was backtracked to being too small.") else: print( "\nSomething unknown happened during ANEB optimization, and \ no flag was returned.") print("---------------------------------------------" + "---------------------------------------------\n\n") return FINAL_PARAMS, ITERS, self.states else: return output, self.states