示例#1
0
    def _diagonalize(self):
        if self.sparse is not None:
            from MMTK_sparseev import sparseMatrixEV
            eigenvalues, eigenvectors = sparseMatrixEV(self.fc, self.nmodes)
            self.array = eigenvectors[:self.nmodes]
            return eigenvalues

        # Calculate eigenvalues and eigenvectors of self.array
        if symeig is not None:
            _symmetrize(self.array)
            ev, modes = symeig(self.array, overwrite=True)
            self.array = N.transpose(modes)
        elif dsyevd is None:
            ev, modes = Heigenvectors(self.array)
            ev = ev.real
            modes = modes.real
            self.array = modes
        else:
            ev = N.zeros((self.nmodes,), N.Float)
            work = N.zeros((1,), N.Float)
            iwork = N.zeros((1,), int_type)
            results = dsyevd('V', 'L', self.nmodes, self.array, self.nmodes,
                             ev, work, -1, iwork, -1, 0)
            lwork = int(work[0])
            liwork = iwork[0]
            work = N.zeros((lwork,), N.Float)
            iwork = N.zeros((liwork,), int_type)
            results = dsyevd('V', 'L', self.nmodes, self.array, self.nmodes,
                             ev, work, lwork, iwork, liwork, 0)
            if results['info'] > 0:
                raise ValueError('Eigenvalue calculation did not converge')

        if self.basis is not None:
            self.array = N.dot(self.array, self.basis)
        return ev
示例#2
0
    def _diagonalize(self):
        if self.sparse is not None:
            from MMTK_sparseev import sparseMatrixEV
            eigenvalues, eigenvectors = sparseMatrixEV(self.fc, self.nmodes)
            self.array = eigenvectors[:self.nmodes]
            return eigenvalues

        # Calculate eigenvalues and eigenvectors of self.array
        if symeig is not None:
            _symmetrize(self.array)
            ev, modes = symeig(self.array, overwrite=True)
            self.array = N.transpose(modes)
        elif dsyevd is None:
            ev, modes = Heigenvectors(self.array)
            ev = ev.real
            modes = modes.real
            self.array = modes
        else:
            ev = N.zeros((self.nmodes, ), N.Float)
            work = N.zeros((1, ), N.Float)
            iwork = N.zeros((1, ), int_type)
            results = dsyevd('V', 'L', self.nmodes, self.array, self.nmodes,
                             ev, work, -1, iwork, -1, 0)
            lwork = int(work[0])
            liwork = iwork[0]
            work = N.zeros((lwork, ), N.Float)
            iwork = N.zeros((liwork, ), int_type)
            results = dsyevd('V', 'L', self.nmodes, self.array, self.nmodes,
                             ev, work, lwork, iwork, liwork, 0)
            if results['info'] > 0:
                raise ValueError('Eigenvalue calculation did not converge')

        if self.basis is not None:
            self.array = N.dot(self.array, self.basis)
        return ev
示例#3
0
    tt = time.clock()
    (LorU, lower) = linalg.cho_factor(A, lower=0, overwrite_a=0)
    eigs, vecs = lobpcg.lobpcg(X,
                               A,
                               B,
                               operatorT=precond,
                               residualTolerance=1e-4,
                               maxIterations=40)
    data1.append(time.clock() - tt)
    eigs = sort(eigs)
    print
    print 'Results by LOBPCG'
    print
    print n, eigs

    tt = time.clock()
    w, v = symeig(A, B, range=(1, m))
    data2.append(time.clock() - tt)
    print
    print 'Results by symeig'
    print
    print n, w

xlabel(r'Size $n$')
ylabel(r'Elapsed time $t$')
plot(N, data1, label='LOBPCG')
plot(N, data2, label='SYMEIG')
legend()
show()
示例#4
0
def test2(n):
  x = arange(1,n+1)
  B = diag(1./x)
  y = arange(n-1,0,-1)
  z = arange(2*n-1,0,-2)
  A = diag(z)-diag(y,-1)-diag(y,1)
  return A,B

n = 100 # Dimension

A,B = test1(n) # Fixed-free elastic rod
A,B = test2(n) # Mikota pair acts as a nice test since the eigenvalues are the squares of the integers n, n=1,2,...  

m = 20
V = rand(n,m)
X = linalg.orth(V)

eigs,vecs = lobpcg.lobpcg(X,A,B)
eigs = sort(eigs)

w,v=symeig(A,B)


plot(arange(0,len(w[:m])),w[:m],'bx',label='Results by symeig')
plot(arange(0,len(eigs)),eigs,'r+',label='Results by lobpcg')
legend()
xlabel(r'Eigenvalue $i$')
ylabel(r'$\lambda_i$')
show()
示例#5
0
    print('******', n)
    A,B = test(n)  # Mikota pair
    X = rand(n,m)
    X = linalg.orth(X)

    tt = time.clock()
    (LorU, lower) = linalg.cho_factor(A, lower=0, overwrite_a=0)
    eigs,vecs = lobpcg.lobpcg(X,A,B,operatorT=precond,
                              residualTolerance=1e-4, maxIterations=40)
    data1.append(time.clock()-tt)
    eigs = sort(eigs)
    print()
    print('Results by LOBPCG')
    print()
    print(n,eigs)

    tt = time.clock()
    w,v = symeig(A,B,range=(1,m))
    data2.append(time.clock()-tt)
    print()
    print('Results by symeig')
    print()
    print(n, w)

xlabel(r'Size $n$')
ylabel(r'Elapsed time $t$')
plot(N,data1,label='LOBPCG')
plot(N,data2,label='SYMEIG')
legend()
show()
示例#6
0
def Eigensystem(Heff, Olap):
    """ General eigenvalue problem solved"""
    w, Z = symeig.symeig(Heff, Olap,
                         type=1)  # symmetric generalized eigenvalue problem
    Energy = w[:2]
    return (Energy, Z[:, :2])
示例#7
0
文件: H2.py 项目: mike4999/phython
def Eigensystem(Heff, Olap):
    """ General eigenvalue problem solved"""
    w,Z = symeig.symeig(Heff, Olap, type=1) # symmetric generalized eigenvalue problem
    Energy = w[:2]
    return (Energy, Z[:,:2])
示例#8
0
def lobpcg( blockVectorX, operatorA,
            operatorB = None, operatorT = None, blockVectorY = None,
            residualTolerance = None, maxIterations = 20,
            largest = True, verbosityLevel = 0,
            retLambdaHistory = False, retResidualNormsHistory = False ):
    """
    LOBPCG solves symmetric partial eigenproblems using preconditioning.

    Required input: 

    blockVectorX - initial approximation to eigenvectors, full or sparse matrix
    n-by-blockSize

    operatorA - the operator of the problem, can be given as a matrix or as an
    M-file


    Optional input:

    operatorB - the second operator, if solving a generalized eigenproblem; by
    default, or if empty, operatorB = I.

    operatorT - preconditioner; by default, operatorT = I.


    Optional constraints input:

    blockVectorY - n-by-sizeY matrix of constraints, sizeY < n.  The iterations
    will be performed in the (operatorB-) orthogonal complement of the
    column-space of blockVectorY. blockVectorY must be full rank.


    Optional scalar input parameters:

    residualTolerance - tolerance, by default, residualTolerance=n*sqrt(eps)

    maxIterations - max number of iterations, by default, maxIterations =
    min(n,20)

    largest - when true, solve for the largest eigenvalues, otherwise for the
    smallest

    verbosityLevel - by default, verbosityLevel = 0.

    retLambdaHistory - return eigenvalue history

    retResidualNormsHistory - return history of residual norms

    Output:

    blockVectorX and lambda are computed blockSize eigenpairs, where
    blockSize=size(blockVectorX,2) for the initial guess blockVectorX if it is
    full rank.

    If both retLambdaHistory and retResidualNormsHistory are True, the
    return tuple has the flollowing order:

    lambda, blockVectorX, lambda history, residual norms history
    """
    failureFlag = True

    if blockVectorY is not None:
        sizeY = blockVectorY.shape[1]
    else:
        sizeY = 0

    # Block size.
    n, sizeX = blockVectorX.shape
    if sizeX > n:
        raise ValueError,\
              'the first input argument blockVectorX must be tall, not fat' +\
              ' (%d, %d)' % blockVectorX.shape

    if n < 1:
        raise ValueError,\
              'the matrix size is wrong (%d)' % n
        
    operatorA = makeOperator( operatorA, (n, n) )

    if operatorB is not None:
        operatorB = makeOperator( operatorB, (n, n) )

    if (n - sizeY) < (5 * sizeX):
        print 'The problem size is too small, compared to the block size,  for LOBPCG to run.'
        print 'Trying to use symeig instead, without preconditioning.'
        if blockVectorY is not None:
            print 'symeig does not support constraints'
            raise ValueError

        if largest:
            lohi = (n - sizeX, n)
        else:
            lohi = (1, sizeX)

        if operatorA.kind == 'function':
            print 'symeig does not support matrix A given by function'

        if operatorB is not None:
            if operatorB.kind == 'function':
                print 'symeig does not support matrix B given by function'

            _lambda, eigBlockVector = symeig( operatorA.asMatrix(),
                                              operatorB.asMatrix(),
                                              range = lohi )
        else:
            _lambda, eigBlockVector = symeig( operatorA.asMatrix(),
                                              range = lohi )
        return _lambda, eigBlockVector

    if operatorT is not None:
        operatorT = makeOperator( operatorT, (n, n) )
##     if n != operatorA.shape[0]:
##         aux = 'The size (%d, %d) of operatorA is not the same as\n'+\
##               '%d - the number of rows of blockVectorX' % operatorA.shape + (n,)
##         raise ValueError, aux

##     if operatorA.shape[0] != operatorA.shape[1]:
##         raise ValueError, 'operatorA must be a square matrix (%d, %d)' %\
##               operatorA.shape

    if residualTolerance is None:
        residualTolerance = nm.sqrt( 1e-15 ) * n

    maxIterations = min( n, maxIterations )

    if verbosityLevel:
        aux = "Solving "
        if operatorB is None:
            aux += "standard"
        else:
            aux += "generalized"
        aux += " eigenvalue problem with"
        if operatorT is None:
            aux += "out"
        aux += " preconditioning\n\n"
        aux += "matrix size %d\n" % n
        aux += "block size %d\n\n" % sizeX
        if blockVectorY is None:
            aux += "No constraints\n\n"
        else:
            if sizeY > 1:
                aux += "%d constraints\n\n" % sizeY
            else:
                aux += "%d constraint\n\n" % sizeY
        print aux

    ##
    # Apply constraints to X.
    if blockVectorY is not None:

        if operatorB is not None:
            blockVectorBY = operatorB( blockVectorY )
        else:
            blockVectorBY = blockVectorY
    
        # gramYBY is a dense array.
        gramYBY = sc.dot( blockVectorY.T, blockVectorBY )
        try:
            # gramYBY is a Cholesky factor from now on...
            gramYBY = la.cho_factor( gramYBY )
        except:
            print 'cannot handle linear dependent constraints'
            raise

        applyConstraints( blockVectorX, gramYBY, blockVectorBY, blockVectorY )

    ##
    # B-orthonormalize X.
    blockVectorX, blockVectorBX = b_orthonormalize( operatorB, blockVectorX )

    ##
    # Compute the initial Ritz vectors: solve the eigenproblem.
    blockVectorAX = operatorA( blockVectorX )
    gramXAX = sc.dot( blockVectorX.T, blockVectorAX )
    # gramXBX is X^T * X.
    gramXBX = sc.dot( blockVectorX.T, blockVectorX )
    _lambda, eigBlockVector = symeig( gramXAX )
    ii = nm.argsort( _lambda )[:sizeX]
    if largest:
        ii = ii[::-1]
    _lambda = _lambda[ii]
    eigBlockVector = nm.asarray( eigBlockVector[:,ii] )
#    pause()
    blockVectorX = sc.dot( blockVectorX, eigBlockVector )
    blockVectorAX = sc.dot( blockVectorAX, eigBlockVector )
    if operatorB is not None:
        blockVectorBX = sc.dot( blockVectorBX, eigBlockVector )
    
    ##
    # Active index set.
    activeMask = nm.ones( (sizeX,), dtype = nm.bool )

    lambdaHistory = [_lambda]
    residualNormsHistory = []

    previousBlockSize = sizeX
    ident = nm.eye( sizeX, dtype = operatorA.dtype )
    ident0 = nm.eye( sizeX, dtype = operatorA.dtype )
    
    ##
    # Main iteration loop.
    for iterationNumber in xrange( maxIterations ):
        if verbosityLevel > 0:
            print 'iteration %d' %  iterationNumber

        aux = blockVectorBX * _lambda[nm.newaxis,:]
        blockVectorR = blockVectorAX - aux

        aux = nm.sum( blockVectorR.conjugate() * blockVectorR, 0 )
        residualNorms = nm.sqrt( aux )

        
##         if iterationNumber == 2:
##             print blockVectorAX
##             print blockVectorBX
##             print blockVectorR
##             pause()

        residualNormsHistory.append( residualNorms )

        ii = nm.where( residualNorms > residualTolerance, True, False )
        activeMask = activeMask & ii
        if verbosityLevel > 2:
            print activeMask

        currentBlockSize = activeMask.sum()
        if currentBlockSize != previousBlockSize:
            previousBlockSize = currentBlockSize
            ident = nm.eye( currentBlockSize, dtype = operatorA.dtype )

        if currentBlockSize == 0:
            failureFlag = False # All eigenpairs converged.
            break

        if verbosityLevel > 0:
            print 'current block size:', currentBlockSize
            print 'eigenvalue:', _lambda
            print 'residual norms:', residualNorms
        if verbosityLevel > 10:
            print eigBlockVector

        activeBlockVectorR = as2d( blockVectorR[:,activeMask] )
        
        if iterationNumber > 0:
            activeBlockVectorP = as2d( blockVectorP[:,activeMask] )
            activeBlockVectorAP = as2d( blockVectorAP[:,activeMask] )
            activeBlockVectorBP = as2d( blockVectorBP[:,activeMask] )

#        print activeBlockVectorR
        if operatorT is not None:
            ##
            # Apply preconditioner T to the active residuals.
            activeBlockVectorR = operatorT( activeBlockVectorR )

#        assert nm.all( blockVectorR == activeBlockVectorR )

        ##
        # Apply constraints to the preconditioned residuals.
        if blockVectorY is not None:
            applyConstraints( activeBlockVectorR,
                              gramYBY, blockVectorBY, blockVectorY )

#        assert nm.all( blockVectorR == activeBlockVectorR )

        ##
        # B-orthonormalize the preconditioned residuals.
#        print activeBlockVectorR

        aux = b_orthonormalize( operatorB, activeBlockVectorR )
        activeBlockVectorR, activeBlockVectorBR = aux
#        print activeBlockVectorR

        activeBlockVectorAR = operatorA( activeBlockVectorR )

        if iterationNumber > 0:
            aux = b_orthonormalize( operatorB, activeBlockVectorP,
                                    activeBlockVectorBP, retInvR = True )
            activeBlockVectorP, activeBlockVectorBP, invR = aux
            activeBlockVectorAP = sc.dot( activeBlockVectorAP, invR )

        ##
        # Perform the Rayleigh Ritz Procedure:
        # Compute symmetric Gram matrices:

        xaw = sc.dot( blockVectorX.T, activeBlockVectorAR )
        waw = sc.dot( activeBlockVectorR.T, activeBlockVectorAR )
        xbw = sc.dot( blockVectorX.T, activeBlockVectorBR )
        
        if iterationNumber > 0:
            xap = sc.dot( blockVectorX.T, activeBlockVectorAP )
            wap = sc.dot( activeBlockVectorR.T, activeBlockVectorAP )
            pap = sc.dot( activeBlockVectorP.T, activeBlockVectorAP )
            xbp = sc.dot( blockVectorX.T, activeBlockVectorBP )
            wbp = sc.dot( activeBlockVectorR.T, activeBlockVectorBP )
            
            gramA = nm.bmat( [[nm.diag( _lambda ), xaw, xap],
                              [xaw.T, waw, wap],
                              [xap.T, wap.T, pap]] )
            try:
                gramB = nm.bmat( [[ident0, xbw, xbp],
                                  [xbw.T, ident, wbp],
                                  [xbp.T, wbp.T, ident]] )
            except:
                print ident
                print xbw
                raise
        else:
            gramA = nm.bmat( [[nm.diag( _lambda ), xaw],
                              [xaw.T, waw]] )
            gramB = nm.bmat( [[ident0, xbw],
                              [xbw.T, ident0]] )
        try:
            assert nm.allclose( gramA.T, gramA )
        except:
            print gramA.T - gramA
            raise

        try:
            assert nm.allclose( gramB.T, gramB )
        except:
            print gramB.T - gramB
            raise

##         print nm.diag( _lambda )
##         print xaw
##         print waw
##         print xbw
##         try:
##             print xap
##             print wap
##             print pap
##             print xbp
##             print wbp
##         except:
##             pass
##         pause()

        if verbosityLevel > 10:
            save( gramA, 'gramA' )
            save( gramB, 'gramB' )
        ##
        # Solve the generalized eigenvalue problem.
#        _lambda, eigBlockVector = la.eig( gramA, gramB )
        _lambda, eigBlockVector = symeig( gramA, gramB )
        ii = nm.argsort( _lambda )[:sizeX]
        if largest:
            ii = ii[::-1]
        if verbosityLevel > 10:
            print ii
        
        _lambda = _lambda[ii].astype( nm.float64 )
        eigBlockVector = nm.asarray( eigBlockVector[:,ii].astype( nm.float64 ) )

        lambdaHistory.append( _lambda )

        if verbosityLevel > 10:
            print 'lambda:', _lambda
##         # Normalize eigenvectors!
##         aux = nm.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
##         eigVecNorms = nm.sqrt( aux )
##         eigBlockVector = eigBlockVector / eigVecNorms[nm.newaxis,:]
#        eigBlockVector, aux = b_orthonormalize( operatorB, eigBlockVector )

        if verbosityLevel > 10:
            print eigBlockVector
            pause()
        ##
        # Compute Ritz vectors.
        if iterationNumber > 0:
            eigBlockVectorX = eigBlockVector[:sizeX]
            eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
            eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]

            pp = sc.dot( activeBlockVectorR, eigBlockVectorR )\
                 + sc.dot( activeBlockVectorP, eigBlockVectorP )

            app = sc.dot( activeBlockVectorAR, eigBlockVectorR )\
                  + sc.dot( activeBlockVectorAP, eigBlockVectorP )

            bpp = sc.dot( activeBlockVectorBR, eigBlockVectorR )\
                  + sc.dot( activeBlockVectorBP, eigBlockVectorP )
        else:
            eigBlockVectorX = eigBlockVector[:sizeX]
            eigBlockVectorR = eigBlockVector[sizeX:]

            pp = sc.dot( activeBlockVectorR, eigBlockVectorR )

            app = sc.dot( activeBlockVectorAR, eigBlockVectorR )

            bpp = sc.dot( activeBlockVectorBR, eigBlockVectorR )

        if verbosityLevel > 10:
            print pp
            print app
            print bpp
            pause()
#        print pp.shape, app.shape, bpp.shape

        blockVectorX = sc.dot( blockVectorX, eigBlockVectorX ) + pp
        blockVectorAX = sc.dot( blockVectorAX, eigBlockVectorX ) + app
        blockVectorBX = sc.dot( blockVectorBX, eigBlockVectorX ) + bpp

        blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
        
    aux = blockVectorBX * _lambda[nm.newaxis,:]
    blockVectorR = blockVectorAX - aux

    aux = nm.sum( blockVectorR.conjugate() * blockVectorR, 0 )
    residualNorms = nm.sqrt( aux )


    if verbosityLevel > 0:
        print 'final eigenvalue:', _lambda
        print 'final residual norms:', residualNorms

    if retLambdaHistory:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
        else:
            return _lambda, blockVectorX, lambdaHistory
    else:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, residualNormsHistory
        else:
            return _lambda, blockVectorX