コード例 #1
0
ファイル: bfgslinesearch.py プロジェクト: lqcata/ase
    def step(self, f):
        atoms = self.atoms
        from ase.neb import NEB
        assert not isinstance(atoms, NEB)
        r = atoms.get_positions()
        r = r.reshape(-1)
        g = -f.reshape(-1) / self.alpha
        p0 = self.p
        self.update(r, g, self.r0, self.g0, p0)
        #o,v = np.linalg.eigh(self.B)
        e = self.func(r)

        self.p = -np.dot(self.H, g)
        p_size = np.sqrt((self.p**2).sum())
        if self.nsteps != 0:
            p0_size = np.sqrt((p0**2).sum())
            delta_p = self.p / p_size + p0 / p0_size
        if p_size <= np.sqrt(len(atoms) * 1e-10):
            self.p /= (p_size / np.sqrt(len(atoms) * 1e-10))
        ls = LineSearch()
        self.alpha_k, e, self.e0, self.no_update = \
           ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
                           maxstep=self.maxstep, c1=self.c1,
                           c2=self.c2, stpmax=self.stpmax)
        if self.alpha_k is None:
            raise RuntimeError("LineSearch failed!")

        dr = self.alpha_k * self.p
        atoms.set_positions((r + dr).reshape(len(atoms), -1))
        self.r0 = r
        self.g0 = g
        self.dump((self.r0, self.g0, self.e0, self.task, self.H))
コード例 #2
0
    def step(self, f):
        atoms = self.atoms
        from ase.neb import NEB 
        if isinstance(atoms, NEB):
            raise TypeError('NEB calculations cannot use the BFGSLineSearch'
                            ' optimizer. Use BFGS or another optimizer.')
        r = atoms.get_positions()
        r = r.reshape(-1)
        g = -f.reshape(-1) / self.alpha
        p0 = self.p
        self.update(r, g, self.r0, self.g0, p0)
        #o,v = np.linalg.eigh(self.B)
        e = self.func(r)

        self.p = -np.dot(self.H,g)
        p_size = np.sqrt((self.p **2).sum())
        if self.nsteps != 0:
            p0_size = np.sqrt((p0 **2).sum())
            delta_p = self.p/p_size + p0/p0_size
        if p_size <= np.sqrt(len(atoms) * 1e-10):
            self.p /= (p_size / np.sqrt(len(atoms)*1e-10))
        ls = LineSearch()
        self.alpha_k, e, self.e0, self.no_update = \
           ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
                           maxstep=self.maxstep, c1=self.c1,
                           c2=self.c2, stpmax=self.stpmax)
        if self.alpha_k is None:
            raise RuntimeError("LineSearch failed!")

        dr = self.alpha_k * self.p
        atoms.set_positions((r+dr).reshape(len(atoms),-1))
        self.r0 = r
        self.g0 = g
        self.dump((self.r0, self.g0, self.e0, self.task, self.H))
コード例 #3
0
ファイル: bfgslinesearch.py プロジェクト: mattaadams/ase
    def step(self, f=None):
        atoms = self.atoms

        if f is None:
            f = atoms.get_forces()

        from ase.neb import NEB
        if isinstance(atoms, NEB):
            raise TypeError('NEB calculations cannot use the BFGSLineSearch'
                            ' optimizer. Use BFGS or another optimizer.')
        r = atoms.get_positions()
        r = r.reshape(-1)
        g = -f.reshape(-1) / self.alpha
        p0 = self.p
        self.update(r, g, self.r0, self.g0, p0)
        # o,v = np.linalg.eigh(self.B)
        e = self.func(r)

        self.p = -np.dot(self.H, g)
        p_size = np.sqrt((self.p**2).sum())
        if p_size <= np.sqrt(len(atoms) * 1e-10):
            self.p /= (p_size / np.sqrt(len(atoms)*1e-10))
        ls = LineSearch()
        self.alpha_k, e, self.e0, self.no_update = \
            ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
                            maxstep=self.maxstep, c1=self.c1,
                            c2=self.c2, stpmax=self.stpmax)
        if self.alpha_k is None:
            raise RuntimeError("LineSearch failed!")

        dr = self.alpha_k * self.p
        atoms.set_positions((r + dr).reshape(len(atoms), -1))
        self.r0 = r
        self.g0 = g
        self.dump((self.r0, self.g0, self.e0, self.task, self.H))
コード例 #4
0
ファイル: lbfgs.py プロジェクト: slabanja/ase
 def line_search(self, r, g, e):
     self.p = self.p.ravel()
     p_size = np.sqrt((self.p **2).sum())
     if p_size <= np.sqrt(len(self.atoms) * 1e-10):
         self.p /= (p_size / np.sqrt(len(self.atoms)*1e-10))
     g = g.ravel()
     r = r.ravel()
     ls = LineSearch()
     self.alpha_k, e, self.e0, self.no_update = \
        ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
                        maxstep=self.maxstep, c1=.23,
                        c2=.46, stpmax=50.)
コード例 #5
0
 def line_search(self, r, g, e):
     self.p = self.p.ravel()
     p_size = np.sqrt((self.p **2).sum())
     if p_size <= np.sqrt(len(self.atoms) * 1e-10):
         self.p /= (p_size / np.sqrt(len(self.atoms) * 1e-10))
     g = g.ravel()
     r = r.ravel()
     ls = LineSearch()
     self.alpha_k, e, self.e0, self.no_update = \
        ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
                        maxstep=self.maxstep, c1=.23,
                        c2=.46, stpmax=50.)
     if self.alpha_k is None:
         raise RuntimeError('LineSearch failed!')
コード例 #6
0
ファイル: lbfgs.py プロジェクト: wes-amat/ase
    def line_search(self, r, g, e, previously_reset_hessian):
        self.p = self.p.ravel()
        p_size = np.sqrt((self.p**2).sum())
        if p_size <= np.sqrt(len(self.atoms) * 1e-10):
            self.p /= (p_size / np.sqrt(len(self.atoms) * 1e-10))
        g = g.ravel()
        r = r.ravel()

        if self.use_armijo:
            try:
                # CO: modified call to ls.run
                # TODO: pass also the old slope to the linesearch
                #    so that the RumPath can extract a better starting guess?
                #    alternatively: we can adjust the rotation_factors
                #    out using some extrapolation tricks?
                ls = LineSearchArmijo(self.func, c1=self.c1, tol=1e-14)
                step, func_val, no_update = ls.run(
                    r,
                    self.p,
                    a_min=self.a_min,
                    func_start=e,
                    func_prime_start=g,
                    func_old=self.e0,
                    rigid_units=self.rigid_units,
                    rotation_factors=self.rotation_factors,
                    maxstep=self.maxstep)
                self.e0 = e
                self.e1 = func_val
                self.alpha_k = step
            except (ValueError, RuntimeError):
                if not previously_reset_hessian:
                    warnings.warn(
                        'Armijo linesearch failed, resetting Hessian and '
                        'trying again')
                    self.reset_hessian()
                    self.alpha_k = 0.0
                else:
                    raise RuntimeError(
                        'Armijo linesearch failed after reset of Hessian, '
                        'aborting')

        else:
            ls = LineSearch()
            self.alpha_k, e, self.e0, self.no_update = \
                ls._line_search(self.func, self.fprime, r, self.p, g,
                                e, self.e0, stpmin=self.a_min,
                                maxstep=self.maxstep, c1=self.c1,
                                c2=self.c2, stpmax=50.)
            self.e1 = e
            if self.alpha_k is None:
                raise RuntimeError('Wolff lineSearch failed!')
コード例 #7
0
ファイル: fmin_bfgs.py プロジェクト: JConwayAWT/PGSS14CC
def fmin_bfgs(
    f,
    x0,
    fprime=None,
    args=(),
    gtol=1e-5,
    norm=Inf,
    epsilon=_epsilon,
    maxiter=None,
    full_output=0,
    disp=1,
    retall=0,
    callback=None,
    maxstep=0.2,
):
    """Minimize a function using the BFGS algorithm.

    Parameters:

      f : callable f(x,*args)
          Objective function to be minimized.
      x0 : ndarray
          Initial guess.
      fprime : callable f'(x,*args)
          Gradient of f.
      args : tuple
          Extra arguments passed to f and fprime.
      gtol : float
          Gradient norm must be less than gtol before succesful termination.
      norm : float
          Order of norm (Inf is max, -Inf is min)
      epsilon : int or ndarray
          If fprime is approximated, use this value for the step size.
      callback : callable
          An optional user-supplied function to call after each
          iteration.  Called as callback(xk), where xk is the
          current parameter vector.

    Returns: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>)

        xopt : ndarray
            Parameters which minimize f, i.e. f(xopt) == fopt.
        fopt : float
            Minimum value.
        gopt : ndarray
            Value of gradient at minimum, f'(xopt), which should be near 0.
        Bopt : ndarray
            Value of 1/f''(xopt), i.e. the inverse hessian matrix.
        func_calls : int
            Number of function_calls made.
        grad_calls : int
            Number of gradient calls made.
        warnflag : integer
            1 : Maximum number of iterations exceeded.
            2 : Gradient and/or function calls not changing.
        allvecs  :  list
            Results at each iteration.  Only returned if retall is True.

    *Other Parameters*:
        maxiter : int
            Maximum number of iterations to perform.
        full_output : bool
            If True,return fopt, func_calls, grad_calls, and warnflag
            in addition to xopt.
        disp : bool
            Print convergence message if True.
        retall : bool
            Return a list of results at each iteration if True.

    Notes:

        Optimize the function, f, whose gradient is given by fprime
        using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
        and Shanno (BFGS) See Wright, and Nocedal 'Numerical
        Optimization', 1999, pg. 198.

    *See Also*:

      scikits.openopt : SciKit which offers a unified syntax to call
                        this and other solvers.

    """
    x0 = asarray(x0).squeeze()
    if x0.ndim == 0:
        x0.shape = (1,)
    if maxiter is None:
        maxiter = len(x0) * 200
    func_calls, f = wrap_function(f, args)
    if fprime is None:
        grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
    else:
        grad_calls, myfprime = wrap_function(fprime, args)
    gfk = myfprime(x0)
    k = 0
    N = len(x0)
    I = numpy.eye(N, dtype=int)
    Hk = I
    old_fval = f(x0)
    old_old_fval = old_fval + 5000
    xk = x0
    if retall:
        allvecs = [x0]
    sk = [2 * gtol]
    warnflag = 0
    gnorm = vecnorm(gfk, ord=norm)
    while (gnorm > gtol) and (k < maxiter):
        pk = -numpy.dot(Hk, gfk)
        ls = LineSearch()
        alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = ls._line_search(
            f, myfprime, xk, pk, gfk, old_fval, old_old_fval, maxstep=maxstep
        )
        if alpha_k is None:  # line search failed try different one.
            alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = line_search(
                f, myfprime, xk, pk, gfk, old_fval, old_old_fval
            )
            if alpha_k is None:
                # This line search also failed to find a better solution.
                warnflag = 2
                break
        xkp1 = xk + alpha_k * pk
        if retall:
            allvecs.append(xkp1)
        sk = xkp1 - xk
        xk = xkp1
        if gfkp1 is None:
            gfkp1 = myfprime(xkp1)

        yk = gfkp1 - gfk
        gfk = gfkp1
        if callback is not None:
            callback(xk)
        k += 1
        gnorm = vecnorm(gfk, ord=norm)
        if gnorm <= gtol:
            break

        try:  # this was handled in numeric, let it remaines for more safety
            rhok = 1.0 / (numpy.dot(yk, sk))
        except ZeroDivisionError:
            rhok = 1000.0
            print "Divide-by-zero encountered: rhok assumed large"
        if isinf(rhok):  # this is patch for numpy
            rhok = 1000.0
            print "Divide-by-zero encountered: rhok assumed large"
        A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
        A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
        Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + rhok * sk[:, numpy.newaxis] * sk[numpy.newaxis, :]

    if disp or full_output:
        fval = old_fval
    if warnflag == 2:
        if disp:
            print "Warning: Desired error not necessarily achieved" "due to precision loss"
            print "         Current function value: %f" % fval
            print "         Iterations: %d" % k
            print "         Function evaluations: %d" % func_calls[0]
            print "         Gradient evaluations: %d" % grad_calls[0]

    elif k >= maxiter:
        warnflag = 1
        if disp:
            print "Warning: Maximum number of iterations has been exceeded"
            print "         Current function value: %f" % fval
            print "         Iterations: %d" % k
            print "         Function evaluations: %d" % func_calls[0]
            print "         Gradient evaluations: %d" % grad_calls[0]
    else:
        if disp:
            print "Optimization terminated successfully."
            print "         Current function value: %f" % fval
            print "         Iterations: %d" % k
            print "         Function evaluations: %d" % func_calls[0]
            print "         Gradient evaluations: %d" % grad_calls[0]

    if full_output:
        retlist = xk, fval, gfk, Hk, func_calls[0], grad_calls[0], warnflag
        if retall:
            retlist += (allvecs,)
    else:
        retlist = xk
        if retall:
            retlist = (xk, allvecs)

    return retlist
コード例 #8
0
ファイル: fmin_bfgs.py プロジェクト: essil1/ase-laser
def fmin_bfgs(f,
              x0,
              fprime=None,
              args=(),
              gtol=1e-5,
              norm=Inf,
              epsilon=_epsilon,
              maxiter=None,
              full_output=0,
              disp=1,
              retall=0,
              callback=None,
              maxstep=0.2):
    """Minimize a function using the BFGS algorithm.

    Parameters:

      f : callable f(x,*args)
          Objective function to be minimized.
      x0 : ndarray
          Initial guess.
      fprime : callable f'(x,*args)
          Gradient of f.
      args : tuple
          Extra arguments passed to f and fprime.
      gtol : float
          Gradient norm must be less than gtol before successful termination.
      norm : float
          Order of norm (Inf is max, -Inf is min)
      epsilon : int or ndarray
          If fprime is approximated, use this value for the step size.
      callback : callable
          An optional user-supplied function to call after each
          iteration.  Called as callback(xk), where xk is the
          current parameter vector.

    Returns: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>)

        xopt : ndarray
            Parameters which minimize f, i.e. f(xopt) == fopt.
        fopt : float
            Minimum value.
        gopt : ndarray
            Value of gradient at minimum, f'(xopt), which should be near 0.
        Bopt : ndarray
            Value of 1/f''(xopt), i.e. the inverse hessian matrix.
        func_calls : int
            Number of function_calls made.
        grad_calls : int
            Number of gradient calls made.
        warnflag : integer
            1 : Maximum number of iterations exceeded.
            2 : Gradient and/or function calls not changing.
        allvecs  :  list
            Results at each iteration.  Only returned if retall is True.

    *Other Parameters*:
        maxiter : int
            Maximum number of iterations to perform.
        full_output : bool
            If True,return fopt, func_calls, grad_calls, and warnflag
            in addition to xopt.
        disp : bool
            Print convergence message if True.
        retall : bool
            Return a list of results at each iteration if True.

    Notes:

        Optimize the function, f, whose gradient is given by fprime
        using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
        and Shanno (BFGS) See Wright, and Nocedal 'Numerical
        Optimization', 1999, pg. 198.

    *See Also*:

      scikits.openopt : SciKit which offers a unified syntax to call
                        this and other solvers.

    """
    x0 = asarray(x0).squeeze()
    if x0.ndim == 0:
        x0.shape = (1, )
    if maxiter is None:
        maxiter = len(x0) * 200
    func_calls, f = wrap_function(f, args)
    if fprime is None:
        grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
    else:
        grad_calls, myfprime = wrap_function(fprime, args)
    gfk = myfprime(x0)
    k = 0
    N = len(x0)
    I = numpy.eye(N, dtype=int)
    Hk = I
    old_fval = f(x0)
    old_old_fval = old_fval + 5000
    xk = x0
    if retall:
        allvecs = [x0]
    sk = [2 * gtol]
    warnflag = 0
    gnorm = vecnorm(gfk, ord=norm)
    while (gnorm > gtol) and (k < maxiter):
        pk = -numpy.dot(Hk, gfk)
        ls = LineSearch()
        alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
           ls._line_search(f,myfprime,xk,pk,gfk,
                                  old_fval,old_old_fval,maxstep=maxstep)
        if alpha_k is None:  # line search failed try different one.
            alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
                     line_search(f,myfprime,xk,pk,gfk,
                                 old_fval,old_old_fval)
            if alpha_k is None:
                # This line search also failed to find a better solution.
                warnflag = 2
                break
        xkp1 = xk + alpha_k * pk
        if retall:
            allvecs.append(xkp1)
        sk = xkp1 - xk
        xk = xkp1
        if gfkp1 is None:
            gfkp1 = myfprime(xkp1)

        yk = gfkp1 - gfk
        gfk = gfkp1
        if callback is not None:
            callback(xk)
        k += 1
        gnorm = vecnorm(gfk, ord=norm)
        if (gnorm <= gtol):
            break

        try:  # this was handled in numeric, let it remaines for more safety
            rhok = 1.0 / (numpy.dot(yk, sk))
        except ZeroDivisionError:
            rhok = 1000.0
            print("Divide-by-zero encountered: rhok assumed large")
        if isinf(rhok):  # this is patch for numpy
            rhok = 1000.0
            print("Divide-by-zero encountered: rhok assumed large")
        A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
        A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
        Hk = numpy.dot(A1,numpy.dot(Hk,A2)) + rhok * sk[:,numpy.newaxis] \
                 * sk[numpy.newaxis,:]

    if disp or full_output:
        fval = old_fval
    if warnflag == 2:
        if disp:
            print("Warning: Desired error not necessarily achieved" \
                  "due to precision loss")
            print("         Current function value: %f" % fval)
            print("         Iterations: %d" % k)
            print("         Function evaluations: %d" % func_calls[0])
            print("         Gradient evaluations: %d" % grad_calls[0])

    elif k >= maxiter:
        warnflag = 1
        if disp:
            print("Warning: Maximum number of iterations has been exceeded")
            print("         Current function value: %f" % fval)
            print("         Iterations: %d" % k)
            print("         Function evaluations: %d" % func_calls[0])
            print("         Gradient evaluations: %d" % grad_calls[0])
    else:
        if disp:
            print("Optimization terminated successfully.")
            print("         Current function value: %f" % fval)
            print("         Iterations: %d" % k)
            print("         Function evaluations: %d" % func_calls[0])
            print("         Gradient evaluations: %d" % grad_calls[0])

    if full_output:
        retlist = xk, fval, gfk, Hk, func_calls[0], grad_calls[0], warnflag
        if retall:
            retlist += (allvecs, )
    else:
        retlist = xk
        if retall:
            retlist = (xk, allvecs)

    return retlist