def generalized_inverse(a, rcond = 1.e-10): u, s, vt = singular_value_decomposition(a, 0) m = u.getshape()[0] n = vt.getshape()[1] cutoff = rcond*num.maximum.reduce(s) for i in range(min(n,m)): if s[i] > cutoff: s[i] = 1./s[i] else: s[i] = 0.; return num.dot(num.transpose(vt), s[:, num.NewAxis]*num.transpose(u))
def cholesky_decomposition(a): _assertRank2(a) _assertSquareness(a) t =_commonType(a) a = _castCopyAndTranspose(t, a) m = a.getshape()[0] n = a.getshape()[1] if _array_kind[t] == 1: lapack_routine = lapack_lite2.zpotrf else: lapack_routine = lapack_lite2.dpotrf results = lapack_routine('L', n, a, m, 0) if results['info'] > 0: raise LinAlgError, 'Matrix is not positive definite - Cholesky decomposition cannot be computed' return copy.copy(num.transpose(mlab.triu(a,k=0)))
def linear_least_squares(a, b, rcond=1.e-10): """solveLinearLeastSquares(a,b) returns x,resids,rank,s where x minimizes 2-norm(|b - Ax|) resids is the sum square residuals rank is the rank of A s is an rank of the singual values of A in desending order If b is a matrix then x is also a matrix with corresponding columns. If the rank of A is less than the number of columns of A or greater than the numer of rows, then residuals will be returned as an empty array otherwise resids = sum((b-dot(A,x)**2). Singular values less than s[0]*rcond are treated as zero. """ one_eq = len(b.getshape()) == 1 if one_eq: b = b[:, num.NewAxis] _assertRank2(a, b) m = a.getshape()[0] n = a.getshape()[1] n_rhs = b.getshape()[1] ldb = max(n,m) if m != b.getshape()[0]: raise LinAlgError, 'Incompatible dimensions' t =_commonType(a, b) real_t = _array_type[0][_array_precision[t]] bstar = num.zeros((ldb,n_rhs),t) bstar[:b.getshape()[0],:n_rhs] = copy.copy(b) a,bstar = _castCopyAndTranspose(t, a, bstar) s = num.zeros((min(m,n),),real_t) nlvl = max( 0, int( math.log( float(min( m,n ))/2. ) ) + 1 ) iwork = num.zeros((3*min(m,n)*nlvl+11*min(m,n),), 'l') if _array_kind[t] == 1: # Complex routines take different arguments lapack_routine = lapack_lite2.zgelsd lwork = 1 rwork = num.zeros((lwork,), real_t) work = num.zeros((lwork,),t) results = lapack_routine( m, n, n_rhs, a, m, bstar,ldb , s, rcond, 0,work,-1,rwork,iwork,0 ) lwork = int(abs(work[0])) rwork = num.zeros((lwork,),real_t) a_real = num.zeros((m,n),real_t) bstar_real = num.zeros((ldb,n_rhs,),real_t) results = lapack_lite2.dgelsd( m, n, n_rhs, a_real, m, bstar_real,ldb , s, rcond, 0,rwork,-1,iwork,0 ) lrwork = int(rwork[0]) work = num.zeros((lwork,), t) rwork = num.zeros((lrwork,), real_t) results = lapack_routine( m, n, n_rhs, a, m, bstar,ldb , s, rcond, 0,work,lwork,rwork,iwork,0 ) else: lapack_routine = lapack_lite2.dgelsd lwork = 1 work = num.zeros((lwork,), t) results = lapack_routine( m, n, n_rhs, a, m, bstar,ldb , s, rcond, 0,work,-1,iwork,0 ) lwork = int(work[0]) work = num.zeros((lwork,), t) results = lapack_routine( m, n, n_rhs, a, m, bstar,ldb , s, rcond, 0,work,lwork,iwork,0 ) if results['info'] > 0: raise LinAlgError, 'SVD did not converge in Linear Least Squares' resids = num.array([],type=t) if one_eq: x = copy.copy(num.ravel(bstar)[:n]) if (results['rank']==n) and (m>n): resids = num.array([num.sum((num.ravel(bstar)[n:])**2)]) else: x = copy.copy(num.transpose(bstar)[:n,:]) if (results['rank']==n) and (m>n): resids = copy.copy(num.sum((num.transpose(bstar)[n:,:])**2)) return x,resids,results['rank'],copy.copy(s[:min(n,m)])
def qr_decomposition(a, mode='full'): """ calculates A=QR, Q orthonormal, R upper triangle matrix. mode: 'full' ==> (Q,R) as return value 'r' ==> (None, R) as return value 'economic' ==> (None, A') where the diagonal + upper triangle part of A' is R. This is faster if one only requires R. """ _assertRank2(a) t=_commonType(a) m = a.getshape()[0] n = a.getshape()[1] mn = min(m,n) tau = num.zeros((mn,), t) # a: convert num storing order to fortran storing order a = _castCopyAndTranspose(t, a) if _array_kind[t] == 1: lapack_routine = lapack_lite2.zgeqrf routine_name='ZGEQRF' else: lapack_routine = lapack_lite2.dgeqrf routine_name='DGEQRF' # calculate optimal size of work data 'work' lwork = 1 work = num.zeros((lwork,), t) results=lapack_routine(m, n, a, m, tau, work, -1, 0) if results['info'] > 0: raise LinAlgError, '%s returns %d' % (routine_name, results['info']) # do qr decomposition lwork = int(abs(work[0])) work = num.zeros((lwork,),t) results=lapack_routine(m, n, a, m, tau, work, lwork, 0) if results['info'] > 0: raise LinAlgError, '%s returns %d' % (routine_name, results['info']) # atemp: convert fortrag storing order to num storing order atemp = num.transpose(a) # economic mode if mode[0]=='e': return None, atemp # generate r r = num.zeros((mn,n), t) for i in range(mn): r[i, i:] = atemp[i, i:] # 'r'-mode, that is, calculate only r if mode[0]=='r': return None, r # from here on: build orthonormal matrix q from a if _array_kind[t] == 1: lapack_routine = lapack_lite2.zungqr routine_name = "ZUNGQR" else: lapack_routine = lapack_lite2.dorgqr routine_name = "DORGQR" # determine optimal lwork lwork = 1 work=num.zeros((lwork,), t) results=lapack_routine(m,mn,mn, a, m, tau, work, -1, 0) if results['info'] > 0: raise LinAlgError, '%s returns %d' % (routine_name, results['info']) # compute q lwork = int(abs(work[0])) work=num.zeros((lwork,), t) results=lapack_routine(m,mn,mn, a, m, tau, work, lwork, 0) if results['info'] > 0: raise LinAlgError, '%s returns %d' % (routine_name, results['info']) q = num.transpose(a[:mn,:]) return q,r