示例#1
0
    def __init__(self,
                 pot,
                 mindist,
                 tsSearchParams=None,
                 verbosity=1,
                 NEBparams=None,
                 nrefine_max=100,
                 reoptimize_climbing=0,
                 pushoff_params=None,
                 create_neb=NEBDriver):
        if pushoff_params is None: pushoff_params = dict()
        if NEBparams is None: NEBparams = dict()
        if tsSearchParams is None: tsSearchParams = dict()
        self.pot = pot
        self.mindist = mindist
        self.tsSearchParams = tsSearchParams
        self.verbosity = int(verbosity)
        self.nrefine_max = nrefine_max

        self.NEBparams = NEBparams
        self.reoptimize_climbing = reoptimize_climbing

        self.pushoff_params = pushoff_params

        self.res = Result()
        self.res.new_transition_states = []
        self.create_neb = create_neb
示例#2
0
def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs):
    """
    a wrapper function for conjugate gradient routine in scipy
    """
    import scipy.optimize

    ret = scipy.optimize.fmin_cg(pot.getEnergy,
                                 coords,
                                 pot.getGradient,
                                 gtol=tol,
                                 full_output=True,
                                 disp=iprint > 0,
                                 maxiter=nsteps,
                                 **kwargs)
    res = Result()
    res.coords = ret[0]
    res.nfev = ret[2]
    res.nfev += ret[3]  # calls to gradient
    res.success = True
    warnflag = ret[4]
    if warnflag > 0:
        # print "warning: problem with quench: ",
        res.success = False
        if warnflag == 1:
            res.message = "Maximum number of iterations exceeded"
        if warnflag == 2:
            print "Gradient and/or function calls not changing"
    res.energy, res.grad = pot.getEnergyGradient(res.coords)
    res.nfev += 1
    g = res.grad
    res.rms = np.linalg.norm(g) / np.sqrt(len(g))
    return res
示例#3
0
 def make_result(self, coords, energy):
     from pele.optimize import Result
     res = Result()
     res.coords = coords
     res.energy = energy
     res.eigenval = 1.
     res.eigenvec = coords.copy()
     return res
示例#4
0
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=15000):
    """
    a wrapper function for lbfgs routine in scipy
    
    .. warn::
        the scipy version of lbfgs uses linesearch based only on energy
        which can make the minimization stop early.  When the step size
        is so small that the energy doesn't change to within machine precision (times the
        parameter `factr`) the routine declares success and stops.  This sounds fine, but
        if the gradient is analytical the gradient can still be not converged.  This is
        because in the vicinity of the minimum the gradient changes much more rapidly then
        the energy.  Thus we want to make factr as small as possible.  Unfortunately,
        if we make it too small the routine realizes that the linesearch routine
        isn't working and declares failure and exits.
        
        So long story short, if your tolerance is very small (< 1e-6) this routine
        will probably stop before truly reaching that tolerance.  If you reduce `factr` 
        too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly.  
    """
    import scipy.optimize

    res = Result()
    res.coords, res.energy, dictionary = scipy.optimize.fmin_l_bfgs_b(
        pot.getEnergyGradient,
        coords,
        iprint=iprint,
        pgtol=tol,
        maxfun=nsteps,
        factr=10.)
    res.grad = dictionary["grad"]
    res.nfev = dictionary["funcalls"]
    warnflag = dictionary['warnflag']
    # res.nsteps = dictionary['nit'] #  new in scipy version 0.12
    res.nsteps = res.nfev
    res.message = dictionary['task']
    res.success = True
    if warnflag > 0:
        print("warning: problem with quench: ", end=' ')
        res.success = False
        if warnflag == 1:
            res.message = "too many function evaluations"
        else:
            res.message = str(dictionary['task'])
        print(res.message)
    # note: if the linesearch fails the lbfgs may fail without setting warnflag.  Check
    # tolerance exactly
    if False:
        if res.success:
            maxV = np.max(np.abs(res.grad))
            if maxV > tol:
                print(
                    "warning: gradient seems too large", maxV, "tol =", tol,
                    ". This is a known, but not understood issue of scipy_lbfgs"
                )
                print(res.message)
    res.rms = res.grad.std()
    return res
示例#5
0
文件: hparticle.py 项目: js850/sens
 def run(self, Emax):
     self.Emax = Emax
     res = Result()
     res.mciter = 100
     res.nsteps = 100
     res.naccept = 70
     res.x = self.system.get_random_configuration_Emax(self.Emax)
     res.energy = self.pot.getEnergy(res.x)
     return res
示例#6
0
def ode_julia_naive(coords,
                    pot,
                    tol=1e-4,
                    nsteps=2000,
                    convergence_check=None,
                    solver_type=de.ROCK2(),
                    reltol=1e-5,
                    abstol=1e-5,
                    **kwargs):
    class feval_pot:
        """ wrapper class that makes sure the function is right
        """
        def __init__(self):
            self.nfev = 0

        def get_negative_grad(self, x, p, t):
            self.nfev += 1
            return -pot.getEnergyGradient(x.copy())[1]

        def get_energy_gradient(self, x):
            self.nfev += 1
            return pot.getEnergyGradient(x.copy())

    function_evaluate_pot = feval_pot()
    converged = False
    n = 0
    if convergence_check == None:
        convergence_check = lambda g: np.max(g) < tol

    # initialize ode problem
    tspan = (0.0, 10.0)
    # tspan
    # tspan = (0.0, 10.0)
    prob = de.ODEProblem(function_evaluate_pot.get_negative_grad, coords,
                         tspan)
    integrator = de.init(prob, solver_type, reltol=reltol, abstol=abstol)
    x_ = np.full(len(coords), np.nan)
    while not converged and n < nsteps:
        xold = x_
        de.step_b(integrator)
        x_ = integrator.u
        n += 1
        converged = convergence_check(de.get_du(integrator))
        if converged:
            print(de.get_du(integrator))
    res = Result()
    res.coords = x_
    res.energy = pot.getEnergy(x_)
    res.rms = 0
    res.grad = 0
    res.nfev = function_evaluate_pot.nfev
    res.nsteps = n
    res.success = converged
    return res
示例#7
0
文件: _lj_tools.py 项目: js850/sens
 def __call__(self, x0, stepsize, Emax, energy, seed=None):
     if seed is None:
         seed = np.random.randint(0, sys.maxint)
     x, energy, naccept = lj_mc_cython(x0, self.mciter, stepsize, Emax,
                                       self.radius, seed)
     #        print ret
     res = Result()
     res.x0 = x0
     res.x = x
     res.nsteps = self.mciter
     res.naccept = naccept
     res.energy = energy
     return res
示例#8
0
    def get_result(self):
        res = Result()
        res.energy = self.energy
        res.gradient = self.gradient
        res.coords = self.coords
        res.nsteps = self.iter_number
        res.rms = np.linalg.norm(res.gradient) / np.sqrt(len(res.gradient))
        res.nfev = self.transverse_potential.nfev

        res.eigenval = self.eigenval
        res.eigenvec = self.get_eigenvector()

        res.success = self.stop_criterion_satisfied()
        return res
示例#9
0
文件: mc.py 项目: yangxi1209/pele
    def __init__(self,
                 coords,
                 potential,
                 takeStep,
                 storage=None,
                 event_after_step=None,
                 acceptTest=None,
                 temperature=1.0,
                 confCheck=None,
                 outstream=sys.stdout,
                 store_initial=True,
                 iprint=1):
        # note: make a local copy of lists of events so that an inputted list is not modified.
        if confCheck is None: confCheck = []
        if event_after_step is None: event_after_step = []
        self.coords = np.copy(coords)
        self.storage = storage
        self.potential = potential
        self.takeStep = takeStep
        self.event_after_step = copy.copy(event_after_step)  # not deepcopy
        self.temperature = temperature
        self.naccepted = 0

        self.result = Result()
        self.result.nfev = 0

        self.outstream = outstream
        self.printfrq = iprint  # controls how often printing is done
        self.confCheck = confCheck

        if acceptTest:
            self.acceptTest = acceptTest
        else:
            self.acceptTest = metropolis.Metropolis(self.temperature)

        self.stepnum = 0

        #########################################################################
        # store intial structure
        #########################################################################
        energy = self.potential.getEnergy(self.coords)
        self.result.nfev += 1
        if self.storage and store_initial:
            self.storage(energy, self.coords)

        self.markovE = energy

        self.result.energy = self.markovE
        self.result.coords = self.coords.copy()
示例#10
0
 def get_results(self):
     res = Result()
     with self.g.as_default(), self.g.device(self.device):
         with self.session.as_default():
             g, x = self.compute_gradients(self.model.gloss,
                                           var_list=[self.model.x])[0]
             g, x = self.process_grad(g, x)
             res.energy = self.model.gloss.eval()
             res.coords = x.eval()
             res.grad = g.eval()
             res.nfev = self.nfev.eval()
             res.rms = self.rms.eval()
             res.success = self.success.eval()
             res.nsteps = self.neval
     return res
 def _get_lowest_eigenvector_diagonalization(self, coords, **kwargs):
     """compute the lowest eigenvector by diagonalizing the Hessian
     
     This scales as N**3, so can be very slow for large systems.
     """
     if self.verbosity > 3:
         print "computing the lowest eigenvector by diagonalizing the Hessian"
     hess = self.pot.getHessian(coords)
     eigenval, evec = get_smallest_eig(hess)
     res = Result()
     res.eigenval = eigenval
     res.eigenvec = evec
     res.nfev = 1
     res.success = True
     res.rms = 0.
     return res
示例#12
0
def steepest_descent(x0,
                     pot,
                     iprint=-1,
                     dx=1e-4,
                     nsteps=100000,
                     tol=1e-3,
                     maxstep=-1.,
                     events=None):
    """steepest descent minimization
    
    Notes
    -----
    this should never be used except for testing purposes.  It is a bad implementation
    of a terrible minimization routine.  It will be very slow.
    """
    N = len(x0)
    x = x0.copy()
    E, V = pot.getEnergyGradient(x)
    funcalls = 1
    for k in xrange(nsteps):
        stp = -V * dx
        if maxstep > 0:
            stpsize = np.max(np.abs(V))
            if stpsize > maxstep:
                stp *= maxstep / stpsize
        x += stp
        E, V = pot.getEnergyGradient(x)
        funcalls += 1
        rms = np.linalg.norm(V) / np.sqrt(N)
        if iprint > 0:
            if funcalls % iprint == 0:
                print "step %8d energy %20.12g rms gradient %20.12g" % (
                    funcalls, E, rms)
        if events is not None:
            for event in events:
                event(energy=E, coords=x, rms=rms)
        if rms < tol:
            break
    res = Result()
    res.coords = x
    res.energy = E
    res.rms = rms
    res.grad = V
    res.nfev = funcalls
    res.nsteps = k
    res.success = res.rms <= tol
    return res
示例#13
0
文件: ising_model.py 项目: js850/sens
    def __call__(self, spins, mciter, stepsize, Emax, energy, seed=None):
        mciter = mciter * len(spins)
#        energy = self.pot.getEnergy(spins)
        newspins, Enew, naccept = mc_ising_c(spins,
                mciter, Emax, seed,
                self.neighbor_list,
                self.nbegin,
                self.nend,
                energy)
        
        if False:
            # test the returned energy
            etest = self.pot.getEnergy(newspins)
            if np.abs(etest - Enew) > 0.1:
                raise Exception("energy returned from c ising mc")
        res = Result(x=newspins, energy=Enew, nsteps=mciter, naccept=naccept)
        return res
示例#14
0
文件: _fire.py 项目: yangxi1209/pele
    def run(self, fmax=1e-3, steps=100000):
        """Run structure optimization algorithm.

        This method will return when the forces on all individual
        atoms are less than *fmax* or when the number of steps exceeds
        *steps*.
        """
        self.fmax = fmax
        step = 0
        res = Result()
        res.success = False
        while step < steps:
            E, f = self.potential.getEnergyGradient(self.coords)
            self.nfev += 1
            if self.alternate_stop_criterion is None:
                i_am_done = self.converged(f)
            else:
                i_am_done = self.alternate_stop_criterion(energy=E,
                                                          gradient=f,
                                                          tol=self.fmax)
            if i_am_done:
                res.success = True
                break
            self.step(-f)
            self.nsteps += 1
            rms = np.linalg.norm(f) / np.sqrt(len(f))
            if self.iprint > 0:
                if step % self.iprint == 0:
                    self.logger.info("fire: %s E %s rms %s", step, E, rms)
            for event in self.events:
                event(coords=self.coords, energy=E, rms=rms)

            step += 1

        res.nsteps = step
        res.nfev = self.nfev
        res.coords = self.coords
        res.energy = E
        res.grad = -f
        res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad))
        self.result = res
        return res
示例#15
0
    def get_result(self):
        """return a results object"""
        trans_res = self.translator.get_result()

        rot_result = self.rotator.get_result()

        res = Result()
        res.eigenval = rot_result.eigenval
        res.eigenvec = rot_result.eigenvec
        res.coords = trans_res.coords
        res.energy, res.grad = self.get_true_energy_gradient(res.coords)
        res.rms = trans_res.rms
        res.nfev = rot_result.nfev + trans_res.nfev
        res.nsteps = self.iter_number
        res.success = self.stop_criterion_satisfied()

        if res.eigenval > 0:
            res.success = False

        return res
示例#16
0
    def read_minimum(self, fin):
        """
        the minima and transition states are stored as
        
        1 line with energy
        1 line with point group info
        natoms lines with eigenvalues
        natoms lines with coords
        """
        res = Result()
        # read energy
        line = self.get_next_line()
        # print line
        res.energy = float(line.split()[0])

        # ignore the line with the point group
        line = self.get_next_line()

        res.eigenvalues = self.read_coords(fin)
        res.coords = self.read_coords(fin)

        return res
示例#17
0
def bfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs):
    """
    a wrapper function for the scipy BFGS algorithm
    """
    import scipy.optimize

    ret = scipy.optimize.fmin_bfgs(pot.getEnergy,
                                   coords,
                                   fprime=pot.getGradient,
                                   gtol=tol,
                                   full_output=True,
                                   disp=iprint > 0,
                                   maxiter=nsteps,
                                   **kwargs)
    res = Result()
    res.coords = ret[0]
    res.energy = ret[1]
    res.grad = ret[2]
    res.rms = np.linalg.norm(res.grad) / np.sqrt(len(res.grad))
    res.nfev = ret[4] + ret[5]
    res.nsteps = res.nfev  # not correct, but no better information
    res.success = np.max(np.abs(res.grad)) < tol
    return res
示例#18
0
def ode_julia_naive(coords,
                    pot,
                    tol=1e-4,
                    nsteps=20000,
                    convergence_check=None,
                    solver_type=de.CVODE_BDF(),
                    rtol=1e-4,
                    atol=1e-4,
                    **kwargs):
    class feval_pot:
        """ wrapper class that interfaces base potential to functions for ode solver
        """
        def __init__(self):
            self.nfev = 0
            self.nhev = 0

        def get_negative_grad(self, x, p, t):
            """
            negative grad is f(u, p, t)
            """
            self.nfev += 1
            return -pot.getEnergyGradient(x.copy())[1]

        def get_energy_gradient(self, x):
            self.nfev += 1
            return pot.getEnergyGradient(x.copy())

        def get_jacobian(self, x, p, t):
            self.nhev += 1
            return -pot.getEnergyGradientHessian(x.copy())[2]

    function_evaluate_pot = feval_pot()
    converged = False
    n = 0
    if convergence_check == None:
        convergence_check = lambda g: np.linalg.norm(g) < tol
    # odefunc = de.ODEFunction(function_evaluate_pot.get_negative_grad, function_evaluate_pot.get_jacobian)
    # initialize ode problem
    tspan = (0, 10000.0)
    f_bound = de.ODEFunction(function_evaluate_pot.get_negative_grad)
    # f_free = de.ODEFunction(get_negative_grad,jac = get_jacobian)
    prob = de.ODEProblem(f_bound, coords, tspan)
    solver = Main.eval("CVODE_BDF(linear_solver=:GMRES)")
    integrator = de.init(prob, solver, reltol=rtol, abstol=atol)
    x_ = np.full(len(coords), np.nan)
    while not converged and n < nsteps:
        xold = x_
        de.step_b(integrator)
        x_ = integrator.u
        n += 1
        converged = convergence_check(de.get_du(integrator))
    res = Result()
    res.coords = x_
    res.energy = pot.getEnergy(x_)
    res.grad = 0
    res.nfev = function_evaluate_pot.nfev
    res.nsteps = n
    res.nhev = function_evaluate_pot.nhev
    res.success = converged
    # res.nhev = function_evaluate_pot.nhev
    return res
    def run(self):
        """The main loop of the algorithm"""
        coords = np.copy(self.coords)
        res = Result()  # return object
        res.message = []

        self._compute_gradients(coords)
        iend = 0
        for i in xrange(self.nsteps):
            iend = i
            # get the lowest eigenvalue and eigenvector
            self.overlap = self._getLowestEigenVector(coords, i)
            overlap = self.overlap

            if self.eigenval < 0:
                self.negatives_before_check -= 1

            # determine whether everything looks OK.
            all_ok = self.eigenval < 0 or not self.check_negative
            if not all_ok:
                if i == 0:
                    # we need to accept because we haven't saved the state yet
                    # Also, demand_initial_negative_vec will stop later if needed
                    all_ok = True
            if not all_ok:
                if self.negatives_before_check > 0 and not self.demand_initial_negative_vec:
                    print "  positive before check. setting all ok"
                    all_ok = True

            # if everything is OK, then continue, else revert the step
            if all_ok:
                self._saveState(coords)
                self.reduce_step = 0
            else:
                self.npositive += 1
                if self.npositive > self.npositive_max:
                    logger.warning(
                        "positive eigenvalue found too many times. ending %s",
                        self.npositive)
                    res.message.append(
                        "positive eigenvalue found too many times %d" %
                        self.npositive)
                    break
                if self.verbosity > 2:
                    logger.info(
                        "the eigenvalue turned positive. %s %s", self.eigenval,
                        "Resetting last good values and taking smaller steps")
                coords = self._resetState()
                self.reduce_step += 1

            # step uphill along the direction of the lowest eigenvector
            coords = self._stepUphill(coords)

            # minimize the coordinates in the space perpendicular to the lowest eigenvector
            tangent_ret = self._minimizeTangentSpace(
                coords, energy=self.get_energy(), gradient=self.get_gradient())
            coords = tangent_ret.coords
            tangentrms = tangent_ret.rms

            # check if we are done and print some stuff
            # self._compute_gradients(coords) # this is unnecessary
            E = self.get_energy()
            grad = self.get_gradient()
            rms = np.linalg.norm(grad) * self.rmsnorm
            gradpar = np.dot(grad, self.eigenvec) / np.linalg.norm(
                self.eigenvec)

            if self.iprint > 0:
                if (i + 1) % self.iprint == 0:
                    ostring = "findTS: %3d E %9g rms %8g eigenvalue %9g rms perp %8g grad par %9g overlap %g" % (
                        i, E, rms, self.eigenval, tangentrms, gradpar, overlap)
                    extra = "  Evec search: %d rms %g" % (
                        self.leig_result.nfev, self.leig_result.rms)
                    extra += "  Tverse search: %d step %g" % (
                        self.tangent_result.nfev, self.tangent_move_step)
                    extra += "  Uphill step:%g" % (self.uphill_step_size, )
                    logger.info("%s %s", ostring, extra)

            if callable(self.event):
                self.event(energy=E,
                           coords=coords,
                           rms=rms,
                           eigenval=self.eigenval,
                           stepnum=i)
            if rms < self.tol:
                break
            if self.nfail >= self.nfail_max:
                logger.warning(
                    "stopping findTransitionState.  too many failures in eigenvector search %s",
                    self.nfail)
                res.message.append(
                    "too many failures in eigenvector search %d" % self.nfail)
                break

            if i == 0 and self.eigenval > 0.:
                if self.verbosity > 1:
                    logger.warning(
                        "initial eigenvalue is positive - increase NEB spring constant?"
                    )
                if self.demand_initial_negative_vec:
                    logger.warning(
                        "            aborting transition state search")
                    res.message.append("initial eigenvalue is positive %f" %
                                       self.eigenval)
                    break

        # done.  do one last eigenvector search because coords may have changed
        self._getLowestEigenVector(coords, iend)

        # print some data
        if self.verbosity > 0 or self.iprint > 0:
            logger.info("findTransitionState done: %s %s %s %s %s", iend, E,
                        rms, "eigenvalue", self.eigenval)

        success = True
        # check if results make sense
        if self.eigenval >= 0.:
            if self.verbosity > 2:
                logger.info(
                    "warning: transition state is ending with positive eigenvalue %s",
                    self.eigenval)
            success = False
        if rms > self.tol:
            if self.verbosity > 2:
                logger.info(
                    "warning: transition state search appears to have failed: rms %s",
                    rms)
            success = False
        if iend >= self.nsteps:
            res.message.append("maximum iterations reached %d" % iend)

        # update nfev with the number of calls from the transverse walker
        if self._transverse_walker is not None:
            twres = self._transverse_walker.get_result()
            self.nfev += twres.nfev

        res.coords = coords
        res.energy = E
        res.eigenval = self.eigenval
        res.eigenvec = self.eigenvec
        res.grad = grad
        res.rms = rms
        res.nsteps = iend
        res.success = success
        res.nfev = self.nfev
        return res
示例#20
0
    def __init__(self,
                 X,
                 pot,
                 maxstep=0.1,
                 maxErise=1e-4,
                 M=4,
                 rel_energy=False,
                 H0=0.1,
                 events=None,
                 alternate_stop_criterion=None,
                 debug=False,
                 iprint=-1,
                 nsteps=10000,
                 tol=1e-5,
                 logger=None,
                 energy=None,
                 gradient=None,
                 armijo=False,
                 armijo_c=1e-4,
                 fortran=False):
        X = X.copy()
        self.X = X
        self.N = len(X)
        self.M = M
        self.pot = pot
        self._use_wolfe = False  # this didn't work very well.  should probably remove
        self._armijo = bool(armijo)
        self._wolfe1 = armijo_c
        self._wolfe2 = 0.99
        self._cython = False  # we could make this passable
        self._fortran = bool(fortran)
        self.funcalls = 0
        if energy is not None and gradient is not None:
            self.energy = energy
            self.G = gradient
        else:
            self.energy, self.G = self.pot.getEnergyGradient(self.X)
            self.funcalls += 1
        self.rms = np.linalg.norm(self.G) / np.sqrt(self.N)

        self.maxstep = maxstep
        self.maxErise = maxErise
        self.rel_energy = rel_energy  # use relative energy comparison for maxErise
        self.events = events  # a list of events to run during the optimization
        if self.events is None: self.events = []
        self.iprint = iprint
        self.nsteps = nsteps
        self.tol = tol
        if logger is None:
            self.logger = _logger
        else:
            self.logger = logger

        self.alternate_stop_criterion = alternate_stop_criterion
        self.debug = debug  # print debug messages

        self.s = np.zeros([self.M, self.N])  # position updates
        self.y = np.zeros([self.M, self.N])  # gradient updates

        if H0 is None:
            self.H0 = 0.1
        else:
            self.H0 = H0
        if self.H0 < 1e-10:
            self.logger.warning(
                "initial guess for inverse Hessian diagonal is negative or too small %s %s",
                self.H0, "resetting it to 0.1")
            self.H0 = 0.1
        self.rho = np.zeros(M)
        self.k = 0

        self.s[0, :] = self.X
        self.y[0, :] = self.G
        self.rho[0] = 0.  # 1. / np.dot(X,G)

        self.dXold = np.zeros(self.X.size)
        self.dGold = np.zeros(self.X.size)
        self._have_dXold = False

        self.nfailed = 0

        self.iter_number = 0
        self.result = Result()
        self.result.message = []
示例#21
0
文件: _quench.py 项目: spraharsh/pele
def ode_scipy_naive(coords,
                    pot,
                    t_bound=1000,
                    tol=1e-4,
                    nsteps=20000,
                    convergence_check=None,
                    solver_type='rk45',
                    **kwargs):
    """ This uses rk45 in a minimizer like approach to find the ode
        The idea is to solve for the path
         dx/dt = - \\grad{E}
    Parameters
    ----------
    coords: array
        coords input for the quench
    pot: array
        potential class that has a
        getenergygradient function that we need for stepping
    nsteps: int
        maximum number of steps
    tol: float
        gives a tolerance for making sure that we have found the minima
    kwargs: ...
        extra arguments for the integrator
    Return
    ----------
    result container with steps and function evaluations
    """
    class feval_pot:
        """ wrapper class that calculates function evaluations
            and makes sure to point the gradient in the right direction
        """
        def __init__(self):
            self.nfev = 0

        def get_gradient(self, t, x):
            self.nfev += 1
            return -pot.getEnergyGradient(x.copy())[1]

        def get_energy_gradient(self, x):
            self.nfev += 1
            return pot.getEnergyGradient(x.copy())

    # This code seems to work for these two solvers
    # if solver_type == 'RK23':
    #     solver = RK23_LS
    # else:
    #     solver = RK23_LS
    solver = Radau
    # def get_gradient(t, x):
    #     # gets just the gradient from the potential
    #     return pot.getEnergyGradient(x)[1]
    function_evaluate_pot = feval_pot()
    solver = solver(
        function_evaluate_pot.get_gradient,
        # function_evaluate_pot.get_energy_gradient,
        0,
        coords,
        t_bound,
        rtol=1e-3,
        atol=1e-3)
    # min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
    converged = False
    n = 0
    if convergence_check == None:
        convergence_check = lambda g: np.linalg.norm(g) < tol

    x_ = np.full(len(coords), np.nan)
    while not converged and n < nsteps:
        xold = x_
        solver.step()
        x_ = solver.dense_output().y_old
        n += 1
        converged = convergence_check(pot.getEnergyGradient(x_)[1])
    res = Result()
    res.coords = x_
    res.nfev = function_evaluate_pot.nfev
    print(res.nfev)
    res.coords = x_
    res.energy = pot.getEnergy(x_)
    res.rms = 0
    res.grad = 0
    res.nsteps = n
    res.success = converged
    return res
示例#22
0
文件: _NEB.py 项目: spraharsh/pele
    def optimize(self, quenchRoutine=None, **kwargs):
        """
        Optimize the band

        Notes
        -----
        the potential for the NEB optimization is not Hamiltonian.  This
        means that there is no meaningful energy associated with the
        potential.  Therefore, during the optimization, we can do gradient
        following, but we can't rely on the energy for, e.g. determining
        step size, which is the default behavior for many optimizers.  This
        can be worked around by choosing a small step size and a large
        maxErise, or by using an optimizer that uses only gradients.

        scipy.lbfgs_b seems to work with NEB pretty well, but lbfgs_py and
        mylbfgs tend to fail.  If you must use one of those try, e.g.
        maxErise = 1., maxstep=0.01, tol=1e-2

        :quenchRoutine: quench algorithm to use for optimization.

        :quenchParams: parameters for the quench """
        if quenchRoutine is None:
            if self.quenchRoutine is None:
                quenchRoutine = mylbfgs
            else:
                quenchRoutine = self.quenchRoutine
                # combine default and passed params.  passed params will overwrite default
        quenchParams = dict([("nsteps", 300)] +
                            list(self.quenchParams.items()) +
                            list(kwargs.items()))

        if "iprint" in quenchParams:
            self.iprint = quenchParams["iprint"]
        if "logger" not in quenchParams:
            quenchParams["logger"] = logging.getLogger(
                "pele.connect.neb.quench")

        if self.use_minimizer_callback:
            quenchParams["events"] = [self._step]

        self.step = 0
        qres = quenchRoutine(self.active.reshape(self.active.size), self,
                             **quenchParams)
        # if isinstance(qres, tuple): # for compatability with old and new quenchers
        # qres = qres[4]

        self.active[:, :] = qres.coords.reshape(self.active.shape)
        if self.copy_potential:
            for i in range(0, self.nimages):
                pot = self.potential_list[i]
                self.energies[i] = pot.getEnergy(self.coords[i, :])
        else:
            for i in range(0, self.nimages):
                self.energies[i] = self.potential.getEnergy(self.coords[i, :])

        res = Result()
        res.path = self.coords
        res.nsteps = qres.nsteps
        res.energy = self.energies
        res.rms = qres.rms
        res.success = False
        if qres.rms < quenchParams["tol"]:
            res.success = True

        return res