Esempio n. 1
0
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=15000):
    """
    a wrapper function for lbfgs routine in scipy
    
    .. warn::
        the scipy version of lbfgs uses linesearch based only on energy
        which can make the minimization stop early.  When the step size
        is so small that the energy doesn't change to within machine precision (times the
        parameter `factr`) the routine declares success and stops.  This sounds fine, but
        if the gradient is analytical the gradient can still be not converged.  This is
        because in the vicinity of the minimum the gradient changes much more rapidly then
        the energy.  Thus we want to make factr as small as possible.  Unfortunately,
        if we make it too small the routine realizes that the linesearch routine
        isn't working and declares failure and exits.
        
        So long story short, if your tolerance is very small (< 1e-6) this routine
        will probably stop before truly reaching that tolerance.  If you reduce `factr` 
        too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly.  
    """
    import scipy.optimize

    res = Result()
    res.coords, res.energy, dictionary = scipy.optimize.fmin_l_bfgs_b(
        pot.getEnergyGradient,
        coords,
        iprint=iprint,
        pgtol=tol,
        maxfun=nsteps,
        factr=10.)
    res.grad = dictionary["grad"]
    res.nfev = dictionary["funcalls"]
    warnflag = dictionary['warnflag']
    # res.nsteps = dictionary['nit'] #  new in scipy version 0.12
    res.nsteps = res.nfev
    res.message = dictionary['task']
    res.success = True
    if warnflag > 0:
        print("warning: problem with quench: ", end=' ')
        res.success = False
        if warnflag == 1:
            res.message = "too many function evaluations"
        else:
            res.message = str(dictionary['task'])
        print(res.message)
    # note: if the linesearch fails the lbfgs may fail without setting warnflag.  Check
    # tolerance exactly
    if False:
        if res.success:
            maxV = np.max(np.abs(res.grad))
            if maxV > tol:
                print(
                    "warning: gradient seems too large", maxV, "tol =", tol,
                    ". This is a known, but not understood issue of scipy_lbfgs"
                )
                print(res.message)
    res.rms = res.grad.std()
    return res
Esempio n. 2
0
def lbfgs_scipy(coords, pot, iprint=-1, tol=1e-3, nsteps=15000):
    """
    a wrapper function for lbfgs routine in scipy
    
    .. warn::
        the scipy version of lbfgs uses linesearch based only on energy
        which can make the minimization stop early.  When the step size
        is so small that the energy doesn't change to within machine precision (times the
        parameter `factr`) the routine declares success and stops.  This sounds fine, but
        if the gradient is analytical the gradient can still be not converged.  This is
        because in the vicinity of the minimum the gradient changes much more rapidly then
        the energy.  Thus we want to make factr as small as possible.  Unfortunately,
        if we make it too small the routine realizes that the linesearch routine
        isn't working and declares failure and exits.
        
        So long story short, if your tolerance is very small (< 1e-6) this routine
        will probably stop before truly reaching that tolerance.  If you reduce `factr` 
        too much to mitigate this lbfgs will stop anyway, but declare failure misleadingly.  
    """
    if not hasattr(pot, "getEnergyGradient"):
        # for compatibility with old quenchers.
        # assume pot is a getEnergyGradient function
        pot = _getEnergyGradientWrapper(pot)
    import scipy.optimize
    res = Result()
    res.coords, res.energy, dictionary = scipy.optimize.fmin_l_bfgs_b(pot.getEnergyGradient, 
            coords, iprint=iprint, pgtol=tol, maxfun=nsteps, factr=10.)
    res.grad = dictionary["grad"]
    res.nfev = dictionary["funcalls"]
    warnflag = dictionary['warnflag']
    #res.nsteps = dictionary['nit'] #  new in scipy version 0.12
    res.nsteps = res.nfev
    res.message = dictionary['task']
    res.success = True
    if warnflag > 0:
        print "warning: problem with quench: ",
        res.success = False
        if warnflag == 1:
            res.message = "too many function evaluations"
        else:
            res.message = str(dictionary['task'])
        print res.message
    #note: if the linesearch fails the lbfgs may fail without setting warnflag.  Check
    #tolerance exactly
    if False:
        if res.success:
            maxV = np.max( np.abs(res.grad) )
            if maxV > tol:
                print "warning: gradient seems too large", maxV, "tol =", tol, ". This is a known, but not understood issue of scipy_lbfgs"
                print res.message
    res.rms = res.grad.std()
    return res
Esempio n. 3
0
def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs):
    """
    a wrapper function for conjugate gradient routine in scipy
    """
    import scipy.optimize

    ret = scipy.optimize.fmin_cg(pot.getEnergy,
                                 coords,
                                 pot.getGradient,
                                 gtol=tol,
                                 full_output=True,
                                 disp=iprint > 0,
                                 maxiter=nsteps,
                                 **kwargs)
    res = Result()
    res.coords = ret[0]
    res.nfev = ret[2]
    res.nfev += ret[3]  # calls to gradient
    res.success = True
    warnflag = ret[4]
    if warnflag > 0:
        # print "warning: problem with quench: ",
        res.success = False
        if warnflag == 1:
            res.message = "Maximum number of iterations exceeded"
        if warnflag == 2:
            print "Gradient and/or function calls not changing"
    res.energy, res.grad = pot.getEnergyGradient(res.coords)
    res.nfev += 1
    g = res.grad
    res.rms = np.linalg.norm(g) / np.sqrt(len(g))
    return res
Esempio n. 4
0
def cg(coords, pot, iprint=-1, tol=1e-3, nsteps=5000, **kwargs):
    """
    a wrapper function for conjugate gradient routine in scipy
    """
    if not hasattr(pot, "getEnergyGradient"):
        # for compatibility with old quenchers.
        # assume pot is a getEnergyGradient function
        pot = _getEnergyGradientWrapper(pot)
    import scipy.optimize
    ret = scipy.optimize.fmin_cg(pot.getEnergy, coords, pot.getGradient, 
                                 gtol=tol, full_output=True, disp=iprint>0, 
                                 maxiter=nsteps, **kwargs)
    res = Result()
    res.coords = ret[0]
    #e = ret[1]
    res.nfev = ret[2]
    res.nfev += ret[3] #calls to gradient
    res.success = True
    warnflag = ret[4]
    if warnflag > 0:
#        print "warning: problem with quench: ",
        res.success = False
        if warnflag == 1:
            res.message = "Maximum number of iterations exceeded"
        if warnflag == 2:
            print "Gradient and/or function calls not changing"
    res.energy, res.grad = pot.getEnergyGradient(res.coords)
    res.nfev += 1
    g = res.grad
    res.rms = np.linalg.norm(g)/np.sqrt(len(g))
    return res 
    def run(self):
        """The main loop of the algorithm"""
        coords = np.copy(self.coords)
        res = Result()  # return object
        res.message = []

        self._compute_gradients(coords)
        iend = 0
        for i in xrange(self.nsteps):
            iend = i
            # get the lowest eigenvalue and eigenvector
            self.overlap = self._getLowestEigenVector(coords, i)
            overlap = self.overlap

            if self.eigenval < 0:
                self.negatives_before_check -= 1

            # determine whether everything looks OK.
            all_ok = self.eigenval < 0 or not self.check_negative
            if not all_ok:
                if i == 0:
                    # we need to accept because we haven't saved the state yet
                    # Also, demand_initial_negative_vec will stop later if needed
                    all_ok = True
            if not all_ok:
                if self.negatives_before_check > 0 and not self.demand_initial_negative_vec:
                    print "  positive before check. setting all ok"
                    all_ok = True

            # if everything is OK, then continue, else revert the step
            if all_ok:
                self._saveState(coords)
                self.reduce_step = 0
            else:
                self.npositive += 1
                if self.npositive > self.npositive_max:
                    logger.warning(
                        "positive eigenvalue found too many times. ending %s",
                        self.npositive)
                    res.message.append(
                        "positive eigenvalue found too many times %d" %
                        self.npositive)
                    break
                if self.verbosity > 2:
                    logger.info(
                        "the eigenvalue turned positive. %s %s", self.eigenval,
                        "Resetting last good values and taking smaller steps")
                coords = self._resetState()
                self.reduce_step += 1

            # step uphill along the direction of the lowest eigenvector
            coords = self._stepUphill(coords)

            # minimize the coordinates in the space perpendicular to the lowest eigenvector
            tangent_ret = self._minimizeTangentSpace(
                coords, energy=self.get_energy(), gradient=self.get_gradient())
            coords = tangent_ret.coords
            tangentrms = tangent_ret.rms

            # check if we are done and print some stuff
            # self._compute_gradients(coords) # this is unnecessary
            E = self.get_energy()
            grad = self.get_gradient()
            rms = np.linalg.norm(grad) * self.rmsnorm
            gradpar = np.dot(grad, self.eigenvec) / np.linalg.norm(
                self.eigenvec)

            if self.iprint > 0:
                if (i + 1) % self.iprint == 0:
                    ostring = "findTS: %3d E %9g rms %8g eigenvalue %9g rms perp %8g grad par %9g overlap %g" % (
                        i, E, rms, self.eigenval, tangentrms, gradpar, overlap)
                    extra = "  Evec search: %d rms %g" % (
                        self.leig_result.nfev, self.leig_result.rms)
                    extra += "  Tverse search: %d step %g" % (
                        self.tangent_result.nfev, self.tangent_move_step)
                    extra += "  Uphill step:%g" % (self.uphill_step_size, )
                    logger.info("%s %s", ostring, extra)

            if callable(self.event):
                self.event(energy=E,
                           coords=coords,
                           rms=rms,
                           eigenval=self.eigenval,
                           stepnum=i)
            if rms < self.tol:
                break
            if self.nfail >= self.nfail_max:
                logger.warning(
                    "stopping findTransitionState.  too many failures in eigenvector search %s",
                    self.nfail)
                res.message.append(
                    "too many failures in eigenvector search %d" % self.nfail)
                break

            if i == 0 and self.eigenval > 0.:
                if self.verbosity > 1:
                    logger.warning(
                        "initial eigenvalue is positive - increase NEB spring constant?"
                    )
                if self.demand_initial_negative_vec:
                    logger.warning(
                        "            aborting transition state search")
                    res.message.append("initial eigenvalue is positive %f" %
                                       self.eigenval)
                    break

        # done.  do one last eigenvector search because coords may have changed
        self._getLowestEigenVector(coords, iend)

        # print some data
        if self.verbosity > 0 or self.iprint > 0:
            logger.info("findTransitionState done: %s %s %s %s %s", iend, E,
                        rms, "eigenvalue", self.eigenval)

        success = True
        # check if results make sense
        if self.eigenval >= 0.:
            if self.verbosity > 2:
                logger.info(
                    "warning: transition state is ending with positive eigenvalue %s",
                    self.eigenval)
            success = False
        if rms > self.tol:
            if self.verbosity > 2:
                logger.info(
                    "warning: transition state search appears to have failed: rms %s",
                    rms)
            success = False
        if iend >= self.nsteps:
            res.message.append("maximum iterations reached %d" % iend)

        # update nfev with the number of calls from the transverse walker
        if self._transverse_walker is not None:
            twres = self._transverse_walker.get_result()
            self.nfev += twres.nfev

        res.coords = coords
        res.energy = E
        res.eigenval = self.eigenval
        res.eigenvec = self.eigenvec
        res.grad = grad
        res.rms = rms
        res.nsteps = iend
        res.success = success
        res.nfev = self.nfev
        return res
    def run(self):
        """The main loop of the algorithm"""
        coords = np.copy(self.coords)
        res = Result() #  return object
        res.message = []
        
        # if starting with positive curvature, disable negative eigenvalue check
        # this will be reenabled as soon as the eigenvector becomes negative
        negative_before_check =  10

        self._compute_gradients(coords)
        for i in xrange(self.nsteps):
            
            # get the lowest eigenvalue and eigenvector
            self.overlap = self._getLowestEigenVector(coords, i)
            overlap = self.overlap
            #print self.eigenval
            
            if self.eigenval < 0:
                negative_before_check -= 1
            
            # check to make sure the eigenvector is ok
            if (i == 0 or self.eigenval <= 0 or not self.check_negative or 
                (negative_before_check > 0 and not self.demand_initial_negative_vec)):
                self._saveState(coords)
                self.reduce_step = 0
            else:
                self.npositive += 1
                if self.npositive > self.npositive_max:
                    logger.warning( "positive eigenvalue found too many times. ending %s", self.npositive)
                    res.message.append( "positive eigenvalue found too many times %d" % self.npositive )
                    break
                if self.verbosity > 2:
                    logger.info("the eigenvalue turned positive. %s %s", self.eigenval, "Resetting last good values and taking smaller steps")
                coords = self._resetState()
                self.reduce_step += 1
            
            # step uphill along the direction of the lowest eigenvector
            coords = self._stepUphill(coords)

            # minimize the coordinates in the space perpendicular to the lowest eigenvector
            tangent_ret = self._minimizeTangentSpace(coords, energy=self.get_energy(), gradient=self.get_gradient())
            coords = tangent_ret.coords
            tangentrms = tangent_ret.rms


            # check if we are done and print some stuff
#            self._compute_gradients(coords) # this is unnecessary
            E = self.get_energy()
            grad = self.get_gradient()
            rms = np.linalg.norm(grad) * self.rmsnorm
            gradpar = np.dot(grad, self.eigenvec) / np.linalg.norm(self.eigenvec)
            
            if self.iprint > 0:
                if (i+1) % self.iprint == 0:
                    ostring = "findTS: %3d E %9g rms %8g eigenvalue %9g rms perp %8g grad par %9g overlap %g" % (
                                    i, E, rms, self.eigenval, tangentrms, gradpar, overlap)
                    extra = "  Evec search: %d rms %g" % (self.leig_result.nfev, self.leig_result.rms)
                    extra += "  Tverse search: %d step %g" % (self.tangent_result.nfev, 
                                                                    self.tangent_move_step)
                    extra += "  Uphill step:%g" % (self.uphill_step_size,)
                    logger.info("%s %s", ostring, extra)
            
            if callable(self.event):
                self.event(energy=E, coords=coords, rms=rms, eigenval=self.eigenval, stepnum=i)
            if rms < self.tol:
                break
            if self.nfail >= self.nfail_max:
                logger.warning("stopping findTransitionState.  too many failures in eigenvector search %s", self.nfail)
                res.message.append( "too many failures in eigenvector search %d" % self.nfail )
                break

            if i == 0 and self.eigenval > 0.:
                if self.verbosity > 1:
                    logger.warning("initial eigenvalue is positive - increase NEB spring constant?")
                if self.demand_initial_negative_vec:
                    logger.warning("            aborting transition state search")
                    res.message.append( "initial eigenvalue is positive %f" % self.eigenval )
                    break

        # done.  do one last eigenvector search because coords may have changed
        self._getLowestEigenVector(coords, i)

        # print some data
        if self.verbosity > 0 or self.iprint > 0:
            logger.info("findTransitionState done: %s %s %s %s %s", i, E, rms, "eigenvalue", self.eigenval)
    
        success = True
        # check if results make sense
        if self.eigenval >= 0.:
            if self.verbosity > 2:
                logger.info( "warning: transition state is ending with positive eigenvalue %s", self.eigenval)
            success = False
        if rms > self.tol:
            if self.verbosity > 2:
                logger.info("warning: transition state search appears to have failed: rms %s", rms)
            success = False
        if i >= self.nsteps:
            res.message.append( "maximum iterations reached %d" % i )
            
        # update nfev with the number of calls from the transverse walker
        if self._transverse_walker is not None:
            twres = self._transverse_walker.get_result()
            self.nfev += twres.nfev 

        #return results
        res.coords = coords
        res.energy = E
        res.eigenval = self.eigenval
        res.eigenvec = self.eigenvec
        res.grad = grad
        res.rms = rms
        res.nsteps = i
        res.success = success
        res.nfev = self.nfev
        return res