Example #1
0
def SymOrth(S):
    """Symmetric orthogonalization of the real symmetric matrix S.
    This is given by Ut(1/sqrt(lambda))U, where lambda,U are the
    eigenvalues/vectors."""
    val,vec = eigh(S)
    n = vec.shape[0]
    shalf = identity(n,'d')
    for i in xrange(n):
        shalf[i,i] /= sqrt(val[i])
    X = simx(shalf,vec,'T')
    return X
Example #2
0
def SymOrth(S):
    """Symmetric orthogonalization of the real symmetric matrix S.
    This is given by Ut(1/sqrt(lambda))U, where lambda,U are the
    eigenvalues/vectors."""
    val, vec = eigh(S)
    n = vec.shape[0]
    shalf = identity(n, 'd')
    for i in xrange(n):
        shalf[i, i] /= sqrt(val[i])
    X = simx(shalf, vec, 'T')
    return X
Example #3
0
def SymOrthCutoff(S,scut=1e-5):
    """Symmetric orthogonalization of the real symmetric matrix S.
    This is given by Ut(1/sqrt(lambda))U, where lambda,U are the
    eigenvalues/vectors.

    Only eigenvectors with eigenvalues greater that a cutoff are kept.
    This approximation is useful in cases where the basis set has
    linear dependencies.
    """
    val,vec = eigh(S)
    n = vec.shape[0]
    shalf = identity(n,'d')
    for i in xrange(n):
        if val[i] > scut:
            shalf[i,i] /= sqrt(val[i])
        else:
            shalf[i,i] = 0.
    X = simx(shalf,vec,'T')
    return X
Example #4
0
def SymOrthCutoff(S, scut=1e-5):
    """Symmetric orthogonalization of the real symmetric matrix S.
    This is given by Ut(1/sqrt(lambda))U, where lambda,U are the
    eigenvalues/vectors.

    Only eigenvectors with eigenvalues greater that a cutoff are kept.
    This approximation is useful in cases where the basis set has
    linear dependencies.
    """
    val, vec = eigh(S)
    n = vec.shape[0]
    shalf = identity(n, 'd')
    for i in xrange(n):
        if val[i] > scut:
            shalf[i, i] /= sqrt(val[i])
        else:
            shalf[i, i] = 0.
    X = simx(shalf, vec, 'T')
    return X
Example #5
0
def fminBFGS(f, x0, fprime=None, args=(), avegtol=1e-5, maxiter=None, fulloutput=0):
    """xopt = fminBFGS(f, x0, fprime=None, args=(), avegtol=1e-5,
                       maxiter=None, fulloutput=0)

    Optimize the function, f, whose gradient is given by fprime using the
    quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS)
    See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
    """

    app_fprime = 0
    if fprime is None:
        app_fprime = 1

    x0 = Num.asarray(x0)
    if maxiter is None:
        maxiter = len(x0)*200
    func_calls = 0
    grad_calls = 0
    k = 0
    N = len(x0)
    gtol = N*avegtol
    #I = Numeric.eye(N)
    I = identity(N,'d')
    Hk = I

    if app_fprime:
        gfk = apply(approx_fprime,(x0,f)+args)
        func_calls = func_calls + len(x0) + 1
    else:
        gfk = apply(fprime,(x0,)+args)
        grad_calls = grad_calls + 1
    xk = x0
    sk = [2*gtol]
    while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter):
        #print "BFGS Convergence: ",Num.add.reduce(abs(gfk)),gtol,k
        pk = -Num.dot(Hk,gfk)
        alpha_k, fc, gc = line_search_BFGS(f,xk,pk,gfk,args)
        func_calls = func_calls + fc
        xkp1 = xk + alpha_k * pk
        sk = xkp1 - xk
        xk = xkp1
        if app_fprime:
            gfkp1 = apply(approx_fprime,(xkp1,f)+args)
            func_calls = func_calls + gc + len(x0) + 1
        else:
            gfkp1 = apply(fprime,(xkp1,)+args)
            grad_calls = grad_calls + gc + 1

        yk = gfkp1 - gfk
        k = k + 1

        rhok = 1 / Num.dot(yk,sk)
        A1 = I - sk[:,NewAxis] * yk[NewAxis,:] * rhok
        A2 = I - yk[:,NewAxis] * sk[NewAxis,:] * rhok
        Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,NewAxis] *\
             sk[NewAxis,:]
        gfk = gfkp1


    fval = apply(f,(xk,)+args)
    if k >= maxiter:
        warnflag = 1
        logger.info("Warning: Maximum number of iterations has been exceeded")
        logger.info("         Current function value: %f" % fval)
        logger.info("         Iterations: %d" % k)
        logger.info("         Function evaluations: %d" % func_calls)
        logger.info("         Gradient evaluations: %d" % grad_calls)
    else:
        warnflag = 0
        logger.info("Optimization terminated successfully.")
        logger.info("         Current function value: %f" % fval)
        logger.info("         Iterations: %d" % k)
        logger.info("         Function evaluations: %d" % func_calls)
        logger.info("         Gradient evaluations: %d" % grad_calls)

    if fulloutput:
        return xk, fval, func_calls, grad_calls, warnflag
    else:        
        return xk
Example #6
0
def fminBFGS(f,
             x0,
             fprime=None,
             args=(),
             avegtol=1e-5,
             maxiter=None,
             fulloutput=0):
    """xopt = fminBFGS(f, x0, fprime=None, args=(), avegtol=1e-5,
                       maxiter=None, fulloutput=0)

    Optimize the function, f, whose gradient is given by fprime using the
    quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS)
    See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
    """

    app_fprime = 0
    if fprime is None:
        app_fprime = 1

    x0 = Num.asarray(x0)
    if maxiter is None:
        maxiter = len(x0) * 200
    func_calls = 0
    grad_calls = 0
    k = 0
    N = len(x0)
    gtol = N * avegtol
    #I = MLab.eye(N)
    I = identity(N, 'd')
    Hk = I

    if app_fprime:
        gfk = apply(approx_fprime, (x0, f) + args)
        func_calls = func_calls + len(x0) + 1
    else:
        gfk = apply(fprime, (x0, ) + args)
        grad_calls = grad_calls + 1
    xk = x0
    sk = [2 * gtol]
    while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter):
        #print "BFGS Convergence: ",Num.add.reduce(abs(gfk)),gtol,k
        pk = -Num.dot(Hk, gfk)
        alpha_k, fc, gc = line_search_BFGS(f, xk, pk, gfk, args)
        func_calls = func_calls + fc
        xkp1 = xk + alpha_k * pk
        sk = xkp1 - xk
        xk = xkp1
        if app_fprime:
            gfkp1 = apply(approx_fprime, (xkp1, f) + args)
            func_calls = func_calls + gc + len(x0) + 1
        else:
            gfkp1 = apply(fprime, (xkp1, ) + args)
            grad_calls = grad_calls + gc + 1

        yk = gfkp1 - gfk
        k = k + 1

        rhok = 1 / Num.dot(yk, sk)
        A1 = I - sk[:, NewAxis] * yk[NewAxis, :] * rhok
        A2 = I - yk[:, NewAxis] * sk[NewAxis, :] * rhok
        Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,NewAxis] *\
             sk[NewAxis,:]
        gfk = gfkp1

    fval = apply(f, (xk, ) + args)
    if k >= maxiter:
        warnflag = 1
        logger.info("Warning: Maximum number of iterations has been exceeded")
        logger.info("         Current function value: %f" % fval)
        logger.info("         Iterations: %d" % k)
        logger.info("         Function evaluations: %d" % func_calls)
        logger.info("         Gradient evaluations: %d" % grad_calls)
    else:
        warnflag = 0
        logger.info("Optimization terminated successfully.")
        logger.info("         Current function value: %f" % fval)
        logger.info("         Iterations: %d" % k)
        logger.info("         Function evaluations: %d" % func_calls)
        logger.info("         Gradient evaluations: %d" % grad_calls)

    if fulloutput:
        return xk, fval, func_calls, grad_calls, warnflag
    else:
        return xk