def quench(atoms, name=None, fmax=0.05, method='QN'): """ Quench given Atoms object with given attached calculator. """ if name == None: try: name = atoms.get_chemical_formula(mode="hill") except: raise ValueError('name not specified') traj = ase.Trajectory(name + '_quenching.trj', 'w', atoms) if method == 'QN': qn = ase.QuasiNewton(atoms) elif method == 'FIRE': qn = ase.FIRE(atoms) qn.attach(traj.write) qn.run(fmax=fmax) e = atoms.get_potential_energy() ase.write(name + '_quenched.xyz', atoms) return e
a = ase.molecule('C6H6') a.center(vacuum=6.0) c = Hotbit(elements={ 'H': dir2 + 'H-H.skf', 'C': dir2 + 'C-C.skf' }, tables={ 'HH': dir2 + 'H-H.skf', 'CH': dir2 + 'C-H.skf', 'CC': dir2 + 'C-C.skf' }, SCC=False) a.set_calculator(c) ase.FIRE(a).run(fmax=0.01) ase.write('C6H6.cfg', a) ### for i in c.el.get_present(): el = c.el.get_element(i) sym = el.get_symbol() orbs = el.get_valence_orbitals() for orb in orbs: print sym, orb, el.get_epsilon(orb), el.get_epsilon(orb) * Hartree x = np.linspace(1.0, 12.0, 1000) y, dy = c.ia.h['CC'](x)
def runopt_inner(name, CoS, ftol, maxit, callback, maxstep=0.2, **kwargs): global opt if name == 'scipy_lbfgsb': def fun(x): CoS.state_vec = x return CoS.obj_func() def fprime(x): CoS.state_vec = x # Attention: here it is expected that only one # gradient call per iteration step is done return CoS.obj_func_grad() class nums: def __init__(self): pass def get_number_of_steps(self): return lbfgs.n_function_evals opt = nums() opt2, energy, dict = lbfgs.fmin_l_bfgs_b( fun, CoS.get_state_as_array(), fprime=fprime, callback=callback, maxfun=maxit, pgtol=ftol, factr=10, # stops when step is < factr*machine_precision maxstep=maxstep) return dict elif name == 'multiopt': from pts.cosopt.multiopt import MultiOpt opt = MultiOpt(CoS, maxstep=maxstep, **kwargs) opt.string = CoS.string opt.attach(lambda: callback(None), interval=1) opt.run(steps=max_it) # convergence handled by callback return None elif name == 'conj_grad': from pts.cosopt.conj_grad import conj_grad_opt opt = conj_grad_opt(CoS, maxstep=maxstep, **kwargs) opt.attach(lambda: callback(None), interval=1) opt.run(steps=max_it) # convergence handled by callback return None elif name == 'steep_des': from pts.cosopt.conj_grad import conj_grad_opt opt = conj_grad_opt(CoS, maxstep=maxstep, reduce_to_steepest_descent=True, **kwargs) opt.attach(lambda: callback(None), interval=1) opt.run() # convergence handled by callback return None elif name == 'fire': from pts.cosopt.fire import fire_opt opt = fire_opt(CoS, maxstep=maxstep, **kwargs) opt.attach(lambda: callback(None), interval=1) opt.run(steps=maxit) # convergence handled by callback return None elif name[0:4] == 'ase_': if name == 'ase_lbfgs': opt = ase.LBFGS(CoS, maxstep=maxstep, **kwargs) elif name == 'ase_bfgs': opt = ase.BFGS(CoS, maxstep=maxstep, **kwargs) elif name == 'ase_lbfgs_line': opt = ase.LineSearchLBFGS(CoS, maxstep=maxstep) elif name == 'ase_fire': opt = ase.FIRE(CoS, maxmove=maxstep) elif name == 'ase_scipy_cg': opt = ase.SciPyFminCG(CoS) elif name == 'ase_scipy_lbfgsb': opt = pts.cosopt.optimizers.SciPyFminLBFGSB(CoS, alpha=400) else: assert False, ' '.join( ["Unrecognised algorithm", name, "not in"] + names) opt.string = CoS.string # attach optimiser to print out each step in opt.attach(lambda: callback(None), interval=1) opt.run(fmax=ftol, steps=maxit) return None else: assert False, ' '.join(["Unrecognised algorithm", name, "not in"] + names)