コード例 #1
0
def random_SVD_test(k, q):
	m = 100000
	n = 200

	U0, R = la.qr(np.random.randn(m, n))
	V, R = la.qr(np.random.randn(n, n))
	U = U0[:, 0:n]

	S = np.ones((n))
	t = 2.0

	j = 1
	while j < n:
		S[j] = 1/t
		t *= 2
		j += 1

	A = np.dot(U, np.dot(np.diag(S), V))

	U, S, V = random_SVD_iterated(A, k, k, q)
	dNormSigma = (la.norm(A-np.dot(U, np.dot(np.diag(S), V))), S[k+1])
	print dNormSigma

	U, S, V = random_SVD_fast(A, k, k)
	dNormSigma = (la.norm(A-np.dot(U, np.dot(np.diag(S), V))), S[k+1])
	print dNormSigma
コード例 #2
0
def principal_angle(A,B):
    """
    Find the principal angle between two subspaces
    spanned by columns of A and B
    """
    from numpy.linalg import qr, svd
    qA, _ = qr(A)
    qB, _ = qr(B)
    U,S,V = svd(qA.T.dot(qB))
    return np.arccos(min(S.min(), 1.0))
コード例 #3
0
def random_rot_matrix(D,c):
    A=npr.randn(D,D);
    P,R=qr(A);
    A=npr.randn(D,D);
    Q,R=qr(A);
    u=npr.rand(D);
    D=c**((u-np.min(u))/(np.max(u)-np.min(u)));
    D=diag(D);
    M=dot(P,dot(D,Q))
    return M
コード例 #4
0
    def blockpower(self, ell, eps=1):
        n, d = self.getShape()
        init_mat = randn(d, ell)
        num_of_iter = int(10 * ceil(log(d / eps) / eps))

        for i in xrange(num_of_iter):
            [init_mat, _] = qr(init_mat)
            init_mat = self.covarianceMult(init_mat)

        K = self.leftMult(init_mat)
        [Q, _] = qr(K)
        del K
        del init_mat
        return Q
コード例 #5
0
ファイル: krank.py プロジェクト: TPNguyen/libskylark
 def __subspace_iteration(self):
      A = self.A
      s = self.kwargs['s']
      q = self.kwargs['q']
      
      m, n = A.shape
      S = random.randn(n, s)
      Y = np.dot(A, S)
      Q, R = linalg.qr(Y)
      for i in range(q):
          Y = np.dot(A.T, Q)
          Q, R = linalg.qr(Y)
          Y = np.dot(A, Q)
          Q, R = linalg.qr(Y)
      return Q
コード例 #6
0
ファイル: utilities.py プロジェクト: wjw12/emc
def randomMatrices(n):
    """
    Save and return two lists of random rotation matrices and their inverse
    """
    from numpy.linalg import qr
    import cPickle
    rot = []
    inv = []
    for i in range(n):
        q, r = qr(np.random.randn(3,3))
        d = np.diagonal(r)
        d = d/np.abs(d)
        q = np.multiply(q,d)
        if np.linalg.det(q) < 0:
            q = np.fliplr(q) # make sure det > 0
        try:
            iq = np.linalg.inv(q)
        except: # in case of q is singular
            i -=1
            continue
        rot.append(q)
        inv.append(iq)
    t = (rot,inv)
    with open('_'.join(['rotation', str(n)]), 'wb') as f:
            cPickle.dump(t, f)
    return t
コード例 #7
0
def main():
    from matplotlib.pyplot import figure,plot, close
    from numpy.random import standard_normal,choice
    from numpy.linalg import qr
    from numpy import dot
    import CAMP_C
    #from myOmp import omp_naive as omp
    N=2000
    M=900
    K=100
    sigma_n=0.001
    A=standard_normal((N,N))+1j*standard_normal((N,N))
    (Q,R)=qr(A)
    i=choice(N,M,False)  
    A=Q[i,:]

    x=(standard_normal((N,1))+1j*standard_normal((N,1)))/sqrt(2)
    j=choice(N,N-K,False)
    x[j,:]=0
    
    y=dot(A,x)+sigma_n*standard_normal((M,1))
    xhat=CAMP_C.CAMP(A,y,1,True)
    print norm(x-xhat)/N
    close('all')
    plot(real(x))
    plot(real(xhat))
    figure()
    plot(imag(x))
    plot(imag(xhat))
コード例 #8
0
ファイル: test_nystrom.py プロジェクト: dvbhagavathi/megaman
def test_nystrom_extension(seed=123):
    """ Test Nystrom Extension: low rank approximation is exact when
    G is itself low rank
    """
    n = 5
    s = 2
    rng = np.random.RandomState(seed)
    X = rng.randn(n, s)
    G = np.dot(X, X.T) # has rank s

    # find the linearly independent columns of 
    q = qr(G)[1] 
    q = absolute(q)
    sums = np.sum(q,axis=1)
    i = 0
    dims = list()
    while( i < n ): #dim is the matrix dimension
        if(sums[i] > 1.e-10):
            dims.append(i)
        i += 1
    
    # Find the eigendecomposition of the full rank portion:
    W = G[dims,:]
    W = W[:,dims]
    eval, evec = np.linalg.eigh(W)
    
    # pass the dims columns of G 
    C = G[:,dims]
    # Find the estimated eigendecomposition using Nystrom 
    eval_nystrom, evec_nystrom = nystrom_extension(C, evec, eval)
        
    # reconstruct G using Nystrom Approximatiuon 
    G_nystrom = np.dot(np.dot(evec_nystrom, np.diag(eval_nystrom)),evec_nystrom.T)
    # since rank(W) = rank(G) = s the nystrom approximation of G is exact:
    assert_array_almost_equal(G_nystrom, G)
コード例 #9
0
def left_qr_maxvol(nd):
    cr = nd.core.copy()
    r1, n1, r2 = cr.shape
    cr = np.tensordot(nd.edges[0].Ru, cr, 1)
    #nd.edges[0].Ru = np.ones((1, 1))
    r1 = cr.shape[0]
    cr = reshape(cr, (r1 * n1, r2))
    q, Ru = qr(cr)
    ind, c = maxvol(q)
    Ru = np.dot(q[ind, :], Ru)
    q = c.copy()
    nd.core = reshape(q, (r1, n1, r2)).copy()
    nd.edges[1].Ru = Ru.copy()
    nd.maxvol_left = np.unravel_index(ind, (r1, n1), order='F')
    #The philosophical question if this index should be stored on the edge or in the node
    #The typical recomputation:
    #Take left index somewhere and update. For the first node it comes from the left edge
    #So, we can store ind_left on an edge, whereas ind_left_add in the node
    """ This is a logically separate function """
    i_left = nd.edges[0].ind_left
    #Update index
    w1 = mkron(np.ones((n1, 1), dtype=np.int32), i_left)
    w2 = mkron(reshape(np.arange(n1, dtype=np.int32),(-1, 1)), np.ones((r1, 1), dtype=np.int32))
    i_next = np.hstack((w1, w2))
    i_next = reshape(i_next, (r1 * n1, -1))
    i_next = i_next[ind, :]
    
    nd.edges[1].ind_left = i_next.copy()
    nd.edges[1].ind_left_add = i_next.copy()
コード例 #10
0
ファイル: misc.py プロジェクト: ghorvath78/butools
def Linsolve(A,b):
    """
    Solves the linear system A*x=b (if b is a column vector), or x*A=b (if b is 
    a row vector).
    
    Matrix "A" does not need to be square, this function uses rank-revealing
    QR decomposition to solve the system.
    
    Parameters
    ----------
    A : matrix, shape (M,N)
        The coefficient matrix of the linear system.
    b : matrix, shape (M,1) or (1,N)
        The right hand side of the linear system
        
    Returns
    -------
    x : matrix, shape (M,1) or (1,N)
        If b is a column vector, then x is the solution of A*x=b.       
        If b is a row vector, it returns the solution of x*A=b.
    """
    if b.shape[0]==1:
        x = Linsolve(np.conj(A.T), np.conj(b.T))
        return np.conj(x.T)
    elif b.shape[1]==1:
        Q,R = la.qr(A)
        N = A.shape[1]
        return ml.matrix(la.solve(R[0:N,0:N], np.array(np.conj(Q.T)*b).flatten()[0:N])).T
コード例 #11
0
def nullSpaceMethod(A, b, C, d):
    """
    Solves the constrained least squares problem min ||Ax-b||_2 subject to Cx=d via the null space method.
    :param numpy ndarray A: an m-by-n A matrix
    :param numpy ndarray b: an m-by-1 b matrix

    :return: x, the coefficients of the least squares problem.
    :rtype: ndarray
    :return: cond, the condition number of the final matrix on which least squares is performed
    :rtype: float
    """
    m, n = A.shape
    p, n = C.shape
    Q, R = qr(C.T, 'complete')
    #Q, R = qr_Householder(C.T)
    Q1 = Q[0:n, 0:p]
    Q2 = Q[0:n, p:n]
    # Lower triangular matrix!
    L = R.T
    L = L[0:p, 0:p]
    y1, not_required = solveLSQ(L, d)
    c = b - (A * Q1) * y1
    AQ2 = A * Q2
    y2, not_required = solveLSQ(AQ2 , np.mat(c) )
    x = (Q1 * y1) + (Q2 * y2)
    cond = np.linalg.cond(AQ2)
    return x, cond
コード例 #12
0
ファイル: krylov.py プロジェクト: nepstad/iterativemethods
def GMRES(A, b, krylovSize=10, useQR = True):
    def MultiplyMatrix(x):
        return dot(A, x)

    arnoldi = ArnoldiIterations(A, MultiplyMatrix, krylovSize)
    arnoldi.Setup(startVector = b)
    arnoldi.ArnoldiIterations()

    #converged = False
    #while not converged:
        #arnoldi step

        #check residual

    #Solve least square problem
    x = None
    bdStep = arnoldi.BreakdownStep
    if useQR:
        Q,R = linalg.qr(arnoldi.Hessenberg[:bdStep+1,:bdStep])
        Qb = dot(transpose(arnoldi.ArnoldiVectors[:,:bdStep+1]), b)
        Qbb = dot(transpose(Q), Qb)
        y = linalg.solve(R[:bdStep+1,:bdStep], Qbb)
        x = dot(arnoldi.ArnoldiVectors[:,:bdStep], y)
    else:
        HH = dot(transpose(arnoldi.Hessenberg), arnoldi.Hessenberg)
        bb = dot(transpose(arnoldi.Hessenberg), dot(transpose(arnoldi.ArnoldiVectors), b))
        y = linalg.solve(HH, bb)
        x = dot(arnoldi.ArnoldiVectors[:,:-1], y)

    return x
def solve1():
    x = np.array([-1.02494, -0.949898, -0.866114,-0.773392, -0.671372,
                  -0.559524,-0.437067,-0.302909, -0.155493, -0.007464], dtype = "float_")
    y = np.array([-0.389269, -0.322894, -0.265256, -0.216557, -0.177152,
                  -0.147582, -0.128618, -0.121353, -0.127348,-0.148885], dtype = "float_")
    rhs = x**2
    col1 = y*y
    col2 = x*y
    col3 = x
    col4 = y
    col5 = np.ones(len(x))
    A = np.array([col1,col2,col3,col4,col5]).T
    Q, R = la.qr(A)
    Qt = Q.T
    c1 = np.dot(Qt, rhs)
    result = la.solve(R, c1)
    residual = rhs - np.dot(A, result)
    norm_residual = la.norm(residual, np.inf)
    print "Coefficients:"
    print result
    print "Residual vector: "
    print residual
    print "Residual inf norm:"
    print norm_residual
    return result
コード例 #14
0
ファイル: GLRTest.py プロジェクト: SenthilKumarasamy/Rage
    def ComputeProjectionMatrix(self,B,Bcol,Glen):
        q,r=qr(B,mode='complete')
        P=q[:,Bcol:Glen]
#         if (not P):
#             print 'Projection matrix is empty'
#             exit()
        return P.T
コード例 #15
0
ファイル: util.py プロジェクト: zdelrosario/pyutil
def comp(M):
    """Returns a basis for the space orthogonal
    to the range of M
    """
    I = eye(M.shape[0])
    Q,R = qr(concatenate((M,I),axis=1))
    return Q[:,matrix_rank(M):]
コード例 #16
0
ファイル: random_unitary.py プロジェクト: callumwilkes/qy
 def get_unitary(self):
     ''' build the unitary '''
     real=np.random.normal(0, 1, [self.nmodes, self.nmodes])
     imag=1j*np.random.normal(0, 1, [self.nmodes, self.nmodes])
     self.unitary=real+imag
     self.unitary, r = qr(self.unitary)
     return self.unitary
コード例 #17
0
ファイル: meshutils.py プロジェクト: tbetcke/PyPWDG
    def quadweights(self, eltid):
        """ return the quadrature weights on face faceid"""
        # The area of a simplex is 1/n! times the area of a parallepiped;
#        area = abs(nl.det(self.__mesh.directions[self.__mesh.etof[eltid][0]][1:]))# / math.factorial(self.__mesh.dim))
        area = abs(np.prod(nl.qr(self.__mesh.directions[self.__mesh.etof[eltid][0]][1:].T, mode='r').diagonal()))
        
        return self.__qw * area
コード例 #18
0
 def initBeforeMake(self, dimension, \
                    signal_dimension=0, \
                    signal_to_noise_ratio=0,\
                    signal_singular_value_decay_factor=0, \
                    signal_singular_value_decay_type='exp'):
     
     self.dimension = dimension
     self.signal_dimension = signal_dimension
     self.signal_to_noise_ratio = signal_to_noise_ratio
     self.signal_singular_value_decay_factor = signal_singular_value_decay_factor
     self.signal_singular_value_decay_type = signal_singular_value_decay_type
 
     # setting a random singular space    
     [Q,R] = qr( randn(self.dimension, self.signal_dimension) )
     self.signal_row_space = Q.transpose()
     del Q,R
     
     # setting the singular values  
     eta = self.signal_singular_value_decay_factor
     if self.signal_singular_value_decay_type == 'exp':
         self.signal_singular_values = [exp(-10*eta*i/self.signal_dimension) for i in xrange(self.signal_dimension)] 
     elif self.signal_singular_value_decay_type == 'lin':
         self.signal_singular_values = [max(1.0 - eta*float(i)/self.signal_dimension,0.0) for i in xrange(self.signal_dimension)]
     else:
         self.signal_singular_values = ones(self.signal_dimension)
     # done initializing 
     self.wasInitForMake = True
コード例 #19
0
ファイル: util.py プロジェクト: nickgravish/pymvg
def rq(A):
    # see first comment at
    # http://leohart.wordpress.com/2010/07/23/rq-decomposition-from-qr-decomposition/
    from numpy.linalg import qr
    from numpy import flipud
    Q,R = qr(flipud(A).T)
    R = flipud(R.T)
    Q = Q.T
    return R[:,::-1],Q[::-1,:]
コード例 #20
0
ファイル: weight_init.py プロジェクト: ybzhou/Gemini
 def init(self, n_in, n_out):
     W = self.rng.randn(n_in, n_out, *args, **kwargs)
     trans = False
     if n_in < n_out:
         W = W.T
         trans = True
     W, _ = linalg.qr(W)
     ret_W = W.T if trans else W
     return numpy.asarray(ret_W, dtype='float32')
コード例 #21
0
ファイル: krank.py プロジェクト: TPNguyen/libskylark
 def __fast_generic(self):
     A = self.A
     s = self.kwargs['s']
     
     m, n = A.shape
     SRFT = SRFT_matrix(n, s)
     Y = np.dot(A, SRFT)
     Q, R = linalg.qr(Y)
     return Q
コード例 #22
0
def orthAxes(e1, e2, e3):
    n = e1.shape[0]
    x = np.zeros((n, 3))
    x[:, 0] = e1
    x[:, 1] = e2
    x[:, 2] = e3
    q, r = LA.qr(x)

    return q[:, 0], q[:, 1], q[:, 2]
コード例 #23
0
ファイル: krank.py プロジェクト: TPNguyen/libskylark
    def __generic(self):
        A = self.A
        s = self.kwargs['s']

        m, n = A.shape
        S = random.randn(n, s)
        Y = np.dot(A, S)
        Q, R = linalg.qr(Y)
        return Q
コード例 #24
0
ファイル: problem1.py プロジェクト: haoranyu/CS450-HW3
def qr_iteration(A, tol):
	m, n = A.shape
	for i in range(0,n-1):
		corner = A[n-i-1][n-i-1]
		q, r = la.qr(A - corner * np.eye(n))
		A = np.dot(r,q)+corner * np.eye(n)
		if la.norm(A[n-i-2][:n-i-2]) < tol:
			break
	return np.diag(A)
コード例 #25
0
ファイル: test_multi.py プロジェクト: nkoep/pymanopt
 def test_multilog(self):
     A = np.zeros((self.k, self.m, self.m))
     l = np.zeros((self.k, self.m, self.m))
     for i in range(self.k):
         a = np.diag(rnd.rand(self.m))
         q, r = la.qr(rnd.randn(self.m, self.m))
         A[i] = q.dot(a.dot(q.T))
         l[i] = logm(A[i])
     np_testing.assert_allclose(multilog(A, pos_def=True), l)
コード例 #26
0
ファイル: local_node.py プロジェクト: nikwoj/IVA
 def local_step(self) :
     N, _, K = self.X.shape
     self.Y, YtY = compute_Y(self.X, self.W)
     w_value = 0
     for k in range(K) :
         Q, R = qr(self.W[:,:,k])
         R = diag(R)
         w_value += sum(log(abs(R)))
     return YtY, w_value
コード例 #27
0
ファイル: CDST_demo.py プロジェクト: MrKriss/CDST-demo
  def __init__(self, version, p, numStreams = 1):

    self.F_version = version.split('.')[0]
    self.A_version = version.split('.')[1]
    if len(version.split('.')) > 2:
      self.S_version = version.split('.')[2]
    else:
      self.S_version = 'none'

    self.numStreams = numStreams
    
    # Calculate threshold. Depends on whether test is one or two tailed. 
    if '+ve' in self.A_version or '-ve' in self.A_version:      
      p['t_thresh'] = sp.stats.t.isf(1.0 * p['FP_rate'], p['SRE_sample_N'])
    elif 'both' in self.A_version:
      p['t_thresh'] = sp.stats.t.isf(0.5 * p['FP_rate'], p['SRE_sample_N'])      
    
    self.p = p
    self.p['version'] = version        

    """ Initialise all CD-ST variables """

    r = self.p['init_r']
    
    # Q_0
    if self.p['fix_init_Q'] != 0:  # fix inital Q as identity 
      q_0 = np.eye(numStreams);
      Q = q_0
    else: # generate random orthonormal matrix N x r 
      Q = np.eye(numStreams) # Max size of Q
      Q_0, R_0 = npl.qr(np.random.rand(numStreams,r))   
      Q[:,:r] = Q_0          
    # S_0
    small_value = self.p['small_value']
    S = np.eye(numStreams) * small_value # Avoids Singularity    
    # v-1
    v = np.zeros((numStreams,1)) 
    # U(t-1) for eigenvalue estimation
    U = np.eye(numStreams)

    # Define st dictionary 
    """ This stores variables from one timestep to the next """
    self.st  = {'Q' : Q,         # Orthogonal dominant subspace vectors
                'S' : S,     # Energy
                'v' : v,     # used for S update
                'U' : U,     # Used for eigen value calculations 
                'r' : r,     # Previous rank of Q and number of hidden variables h
                't' : 0,     # Timestep, used for ignoreup2  
                'sumEz' : 0.0,        # Exponetial sum of zt Energy 
                'sumEh': 0.0,     # Exponential sum of ht energy  
                'anomaly': np.array([0]*self.numStreams,dtype = bool)} 
      
    # Vars for SAX usage
    if 'none' not in self.S_version:
      self.st['SAX_trigger_q'] = [] 
      self.st['SAX_snapshots'] = {}
コード例 #28
0
def seqAng(seqDifv, U):
    """
    seqDifv is a sequence of row vector
    """
    q = LA.qr(U)[0]
    M, N = seqDifv.shape
    ang = np.zeros(M)
    for i in range(M):
        ang[i] = pAngle(seqDifv[i, :], q)
    return ang
コード例 #29
0
ファイル: matrix.py プロジェクト: BIRDSLab/temporal1form
def getRandomOrthogonal(n, s):
    """
  Create a random orthogonal matrix
  """
    if not n > 0:
        raise ValueError("cant create matrix of zero or negative size (n<=0)!")
    if floor(n) != n:
        raise ValueError("size of generated matrix must be an integer (n not integer)!")
    Q = linalg.qr(random.randn(n, n))[0]
    return Q
コード例 #30
0
ファイル: camera.py プロジェクト: bikramjitsingh91/cvscripts
def positive_qr(Z):
    """
    Compute QR decomposition such that R has nonnegative diagonal elements
    http://www.mathworks.com/matlabcentral/answers/6659-changes-to-qr-factorization-qr
    """
    Q, R = la.qr(Z)
    D = np.diag(np.sign(np.diag(R)))
    Q = Q.dot(D)
    R = D.dot(R)
    return Q, R
コード例 #31
0
ファイル: subspace.py プロジェクト: pawsen/pyvib
def subspace(G, covG, freq, n, r, U=None, Y=None, bd_method='nr', modal=False):
    """Estimate state-space model from Frequency Response Function (or Matrix)

    The linear state-space model is estimated from samples of the frequency
    response function (or frequency response matrix). The frequency-domain
    subspace method in `McKelvey1996`_ is applied with the frequency weighting
    in `Pintelon2002`_, i.e. weighting with the sampled covariance matrix.

    `p`: number of outputs, `m`: number of inputs, `F`: number of frequencies.

    Parameters
    ----------
    G : complex ndarray(p, m, F)
        Frequency Response Matrix (FRM)
    covG : ndarray(p*m, p*m, F)
        σ²_G, Covariance tensor on G (False if no weighting required)
    freq : ndarray(F)
        Vector of normalized frequencies at which the FRM is given (0 < freq < 0.5)
    n : int
        Model order
    r : int
        Number of block rows in the extended observability matrix (r > n)
    bd_method : str {'nr', 'explicit'}, optional
        Method used for BD estimation
    modal : bool {false}, optional
        Return

    Returns
    -------
    A : ndarray(n, n)
        state matrix
    B : ndarray(n, m)
        input matrix
    C : ndarray(p, n)
        output matrix
    D : ndarray(p, m)
        feed-through matrix
    unstable : boolean
        Indicating whether or not the identified state-space model is unstable

    Notes
    -----
    Algorithm: (see p. 119 `Paduart2008`_ for details)
    From a DFT of the state space eqs., and recursive use of the two equations
    give the relation: ``Gmat = OᵣX + SᵣU``. From this ``A`` and ``C`` are
    determined. ``B`` and ``D`` are found by minimizing the weighted error
    ``e(f) = W*(Ĝ(f) - G(f))`` where ``Ĝ(f) = C*(z(f)*I - A)^(-1)*B + D`` is
    the estimated- and ``G(f)`` is the measured frequency response matrix(FRM).
    The weight, ``W=1/σ_G``, is chosen in :cite:pinleton2002, sec. 5, to almost
    eliminate the bias resulting from observing the inputs and outputs ``U``
    and ``Y`` with errors.

    In ``Gmat``, ``Sᵣ`` is a lower triangular block toeplitz matrix and ``Oᵣ``,
    ``U`` are extended matrices and found as:
      1. Construct Extended observability matrix Oᵣ
          a. Construct Wᵣ with z
          b. Construct Hmat with H and Wᵣ
          c. Construct Umat with Wᵣ (U=eye(m))
          d. Split real and imaginary parts of Umat and Hmat
          e. Z=[Umat; Hmat]
          f. Calculate CY
          g. QR decomposition of Zᵀ (orthogonal projection)
          h. CY^(-1/2)*RT22=USV'
          i. Oᵣ=U(:,1:n)
      2. Estimate A and C from the shift property of Oᵣ
      3. Estimate B and D given A,C and H

    References
    ----------
    .. _McKelvey1996:
       McKelvey T., Akcay, H., and Ljung, L. (1996).
       Subspace-Based Multivariable System Identification From Frequency
       Response Data. IEEE Transactions on Automatic Control, 41(7):960-979

    .. _Pintelon2002:
       Pintelon, R. (2002). Frequency-domain subspace system identification
       using non-parametric noise models. Automatica, 38:1295-1311

    .. _Paduart2008:
       Paduart J. (2008). Identification of nonlinear systems using polynomial
       nonlinear state space models. PhD thesis, Vrije Universiteit Brussel.

    .. _noel2013:
       Noël, J.P., Kerschen G. (2013)
       Frequency-domain subspace identification for nonlinear mechanical
       systems. MSSP, doi:10.1016/j.ymssp.2013.06.034

    """
    # number of outputs/inputs and number of frequencies
    # When using G as input, _m reflects that G is 3d: (F,p,m), ie U: (F,m)
    if U is None and Y is None:
        F, p, m = G.shape
        is_frf = True
        _m = m
    else:
        F = len(freq)
        p = Y.shape[1]
        m = U.shape[1]
        is_frf = False
        _m = 1

    # 1.a. Construct Wr with z
    z = np.exp(2j * np.pi * freq)
    # if B,D is calculated explicit, we need an additional p and m rows in Gmat
    # and Umat. See eq (30) in noel2013.
    expl = 0
    if bd_method == 'explicit':
        expl = 1

    Wr = (z[:, None]**np.arange(r + expl)).T
    # 1.b. and 1.c. Construct Gmat and Umat
    # The shape depends on the method, ie if Y,U or G is supplied
    Gmat = np.empty(((r + expl) * p, F * _m), dtype=complex)
    Umat = np.empty(((r + expl) * m, F * _m), dtype=complex)
    if U is None and Y is None:
        for f in range(F):
            Gmat[:, f * m:(f + 1) * m] = kron(Wr[:, f, None], G[f])
            Umat[:, f * m:(f + 1) * m] = kron(Wr[:, f, None], np.eye(m))
    else:
        for f in range(F):
            Gmat[:, f] = kron(Wr[:, f], Y[f])
            Umat[:, f] = kron(Wr[:, f], U[f])

    # 1.e. and 1.f: split into real and imag part and stack into Z
    # we do it in a memory efficient way and avoids intermediate memory copies.
    # (Just so you know: It is more efficient to stack the result in a new
    # memory location, than overwriting the old). Ie.
    # Gre = np.hstack([Gmat.real, Gmat.imag]) is more efficient than
    # Gmat = np.hstack([Gmat.real, Gmat.imag])
    Z = np.empty(((r + expl) * (p + m), 2 * F * _m))
    Z[:(r + expl) * m, :F * _m] = Umat.real
    Z[:(r + expl) * m, F * _m:] = Umat.imag
    Z[(r + expl) * m:, :F * _m] = Gmat.real
    Z[(r + expl) * m:, F * _m:] = Gmat.imag

    # 1.f. Calculate CY from σ²_G
    if covG is False or covG is None:
        CY = np.eye(p * r)
        # covG = np.tile(np.eye(p*m), (F,1,1))
    else:
        CY = np.zeros((p * r, p * r))
        for f in range(F):
            # Take sum over the diagonal blocks of cov(vec(H)) (see
            # paduart2008(5-93))
            temp = np.zeros((p, p), dtype=complex)
            for i in range(m):
                temp += covG[f, i * p:(i + 1) * p, i * p:(i + 1) * p]
                CY += np.real(kron(np.outer(Wr[:r, f], Wr[:r, f].conj()),
                                   temp))

    # 1.g. QR decomposition of Z.T, Z=R.T*Q.T, to eliminate U from Z.
    R = qr(Z.T, mode='r')
    RT = R.T
    if bd_method == 'explicit':
        RT22 = RT[-(r + 1) * p:-p, -(r + 1) * p:-p]
    else:
        RT22 = RT[-r * p:, -r * p:]

    # 1.h. CY^(-1/2)*RT22=USV', Calculate CY^(-1/2) using svd decomp.
    UC, sc, _ = svd(CY, full_matrices=False)

    # it is faster to work on the diagonal scy, than the full matrix SCY
    # Note: We work with real matrices here, thus UC.conj().T -> UC.T
    sqrtCY = UC * np.sqrt(sc) @ UC.conj().T
    invsqrtCY = UC * 1 / np.sqrt(sc) @ UC.conj().T

    # Remove noise. By taking svd of CY^(-1/2)*RT22
    Un, sn, _ = svd(invsqrtCY @ RT22)  # , full_matrices=False)

    if modal:
        # in case we want to calculate A, C for different n's
        return sqrtCY, Un, sn

    if n == 0:
        # Offer possibility to choose model order
        n = int(input('Input model size'))

    # 1.i. Estimate extended observability matrix
    # NOTE: JP multiply with np.diag(np.sqrt(sn[:n])). ELEC does not
    Or = sqrtCY @ Un[:, :n] @ np.diag(np.sqrt(sn[:n]))

    # 2. Estimate A and C from shift property of Or
    A, *_ = lstsq(Or[:-p], Or[p:])
    # equal to np.linalg.pinv(Or[:-p]) @ Or[p:]
    C = Or[:p, :].copy()
    # Recompute Or from A and C. Or plays a major role in determining B
    # and D, thus J.P. Noel suggest that Or might be recalculated
    # Equal to Or[] = C @ np.linalg.matrix_power(A,j)
    # for j in range(1,r):
    #     Or[j*p:(j+1)*p,:] = Or[(j-1)*p:j*p,:] @ A

    # 3. Estimate B and D given A,C and H: (W)LS estimate
    # Compute weight, W = sqrt(σ²_G^-1)
    weight = False
    if covG is not False and covG is not None:
        weight = np.zeros_like(covG)  # .transpose((2,0,1))
        for f in range(F):
            weight[f] = matrix_square_inv(covG[f])

    if bd_method == 'explicit':
        B, D = bd_explicit(A, C, Or, n, r, m, p, RT)
    elif bd_method == 'nr':
        B, D = bd_nr(A, C, G, freq, n, r, m, p, U, Y, weight)
    else:  # opt: lm optimization
        B, D = bd_opt(A, C, G, freq, n, r, m, p, U, Y, weight)

    # Check stability of the estimated model
    isstable = is_stable(A)
    return A, B, C, D, z, isstable
コード例 #32
0
    def __init__(self, observations, initPoint, maxIter, threshold):
        self.solution = None
        nObs = len(observations)
        # initial parameters (position x,y)
        x0 = np.array([[initPoint.x()], [initPoint.y()]])  # brackets needed to create column and not row vector
        self.report = "Initial position: %13.3f %13.3f\n" % (x0[0], x0[1])
        dx = np.array([[2*threshold], [2*threshold]])
        it = 0
        # global observations vector
        l = np.array([[obs["observation"]] for obs in observations])  # brackets needed to create column and not row vector
        # adjustment main loop
        while max(np.abs(dx)) > threshold:
            it += 1
            if it > maxIter:
                x0 = [None, None]
                self.report += "\n!!! Maximum iterations reached (%u)" % (it-1)
                break
            # init matrices
            A = []
            B = []
            Qll = []
            w = []
            for i, obs in enumerate(observations):
                px = obs["x"]
                py = obs["y"]
                precision = obs["precision"]
                if obs["type"] == "distance":
                    r = obs["observation"]
                    # distance equation: (xc - px)^2 + (yc - py)^2 - r^2 = 0 (obs: r, param: xc,yc, fixed: px,py)
                    # jacobian for parameters
                    A.append([2*x0[0][0]-2*px, 2*x0[1][0]-2*py])
                    # jacobian for observations
                    B.append(-2*r)
                    # stochastic model
                    Qll.append(pow(precision, 2))
                    # misclosure
                    # brackets needed to create column and not row vector
                    w.append([pow(x0[0][0]-px, 2) + pow(x0[1][0]-py, 2) - pow(r, 2)])
                if obs["type"] == "orientation":
                    az = obs["observation"]
                    sinaz = sin(az*deg2rad)
                    cosaz = cos(az*deg2rad)
                    # equation: (xc-px)/sin(az) - (yc-py)/cos(az) = 0 (obs: az, param: xc,yc, fixed: px,py)
                    # jacobian for parameters
                    A.append([1/sinaz, -1/cosaz])
                    # jacobian for observations
                    B.append((x0[0][0]-px)*deg2rad*cosaz/pow(sinaz, 2) - (x0[1][0]-py)*-deg2rad*sinaz/pow(cosaz, 2))
                    # stochastic model
                    Qll.append(pow(precision, 2))
                    # misclosure
                    # brackets needed to create column and not row vector
                    w.append([(x0[0][0]-px)/sinaz - (x0[1][0]-py)/cosaz])
            # generate matrices
            A = np.array(A)
            B = np.diag(B)
            Qll = np.diag(Qll)
            w = np.array(w)
            # weight matrix
            Pm = np.dot(B, np.dot(Qll, B.T))
            P = la.inv(Pm)
            # normal matrix
            N = np.dot(A.T, np.dot(P, A))
            u = np.dot(A.T, np.dot(P, w))
            # QR decomposition
            q, r = la.qr(N)
            p = np.dot(q.T, u)
            dx = np.dot(la.inv(r), p)
            x0 -= dx
            self.report += "\nCorrection %u: %10.4f %10.4f" % (it, dx[0], dx[1])
        Qxx = la.inv(N)
        p1 = sqrt(Qxx[0][0])
        p2 = sqrt(Qxx[1][1])
        # residuals -Qll*B'*(P * (A* dx(iN)+w)) !!! ToBeChecked todo !!!
        v = np.dot(-Qll, np.dot(B.T, np.dot(P, np.dot(A, dx) + w)))
        self.solution = QgsPoint(x0[0], x0[1])

        self.report += "\n"
        self.report += "\nSolution:\t%13.3f\t%13.3f" % (x0[0], x0[1])
        self.report += "\nPrecision:\t%13.3f\t%13.3f" % (p1, p2)
        self.report += "\n\n Observation  |       x       |       y       |   Measure   | Precision | Residual"
        self.report += "  \n              |  [map units]  |  [map units]  |   [deg/m]   |  [1/1000] | [1/1000]"
        for i, obs in enumerate(observations):
            self.report += "\n%13s | %13.3f | %13.3f | %11.3f | %9.1f | %7.1f" % (obs["type"], obs["x"], obs["y"],
                                                                                  obs["observation"],
                                                                                  obs["precision"]*1000,
                                                                                  1000*v[i][0])
        sigmapos = np.dot(v.T, np.dot(P, v)) / (nObs - 2)  # vTPv / r
        if sigmapos > 1.8:
            sigmapos_comment = "precision is too optimistic"
        elif sigmapos < .5:
            sigmapos_comment = "precision is too pessimistc"
        else:
            sigmapos_comment = "precision seems realistic"
        self.report += "\n\nSigma a posteriori: %5.2f \t (%s)" % (sigmapos, sigmapos_comment)
コード例 #33
0
# QR decomposition solution to linear least squares
from numpy import array
from numpy.linalg import inv
from numpy.linalg import qr
from matplotlib import pyplot
# define dataset
data = array([
	[0.05, 0.12],
	[0.18, 0.22],
	[0.31, 0.35],
	[0.42, 0.38],
	[0.5, 0.49]])
# split into inputs and outputs
X, y = data[:,0], data[:,1]
X = X.reshape((len(X), 1))
# factorize
Q, R = qr(X)
b = inv(R).dot(Q.T).dot(y)
print(b)
# predict using coefficients
yhat = X.dot(b)
# plot data and predictions
pyplot.scatter(X, y)
pyplot.plot(X, yhat, color='red')
pyplot.show()
コード例 #34
0
 def rand(self):
     # Generate random  point using qr of random normally distributed
     # matrix.
     Y, _ = la.qr(crandn(self.n, self.p))
     P = crandn(self.p, self.p)
     return psd_point(Y, P @ P.T.conj())
コード例 #35
0
ファイル: extrescal.py プロジェクト: xitongdashi/Ext-RESCAL
def rescal(X, D, rank, **kwargs):
    """
    RESCAL 

    Factors a three-way tensor X such that each frontal slice 
    X_k = A * R_k * A.T. The frontal slices of a tensor are 
    N x N matrices that correspond to the adjacency matrices 
    of the relational graph for a particular relation.

    For a full description of the algorithm see: 
      Maximilian Nickel, Volker Tresp, Hans-Peter-Kriegel, 
      "A Three-Way Model for Collective Learning on Multi-Relational Data",
      ICML 2011, Bellevue, WA, USA

    Parameters
    ----------
    X : list
        List of frontal slices X_k of the tensor X. The shape of each X_k is ('N', 'N')
    D : matrix
        A sparse matrix involved in the tensor factorization (aims to incorporate
        the entity-term matrix aka document-term matrix)
    rank : int 
        Rank of the factorization
    lmbda : float, optional 
        Regularization parameter for A and R_k factor matrices. 0 by default 
    init : string, optional
        Initialization method of the factor matrices. 'nvecs' (default) 
        initializes A based on the eigenvectors of X. 'random' initializes 
        the factor matrices randomly.
    proj : boolean, optional 
        Whether or not to use the QR decomposition when computing R_k.
        True by default 
    maxIter : int, optional 
        Maximium number of iterations of the ALS algorithm. 50 by default. 
    conv : float, optional 
        Stop when residual of factorization is less than conv. 1e-5 by default    

    Returns 
    -------
    A : ndarray 
        matrix of latent embeddings for entities A
    R : list
        list of 'M' arrays of shape ('rank', 'rank') corresponding to the factor matrices R_k 
    f : float 
        function value of the factorization 
    iter : int 
        number of iterations until convergence 
    exectimes : ndarray 
        execution times to compute the updates in each iteration
    V : ndarray
        matrix of latent embeddings for words V
    """

    # init options
    ainit = kwargs.pop('init', __DEF_INIT)
    proj = kwargs.pop('proj', __DEF_PROJ)
    maxIter = kwargs.pop('maxIter', __DEF_MAXITER)
    conv = kwargs.pop('conv', __DEF_CONV)
    lmbda = kwargs.pop('lmbda', __DEF_LMBDA)
    preheatnum = kwargs.pop('preheatnum', __DEF_PREHEATNUM)

    if not len(kwargs) == 0:
        raise ValueError('Unknown keywords (%s)' % (kwargs.keys()))

    sz = X[0].shape
    dtype = X[0].dtype
    n = sz[0]

    _log.debug('[Config] rank: %d | maxIter: %d | conv: %7.1e | lmbda: %7.1e' %
               (rank, maxIter, conv, lmbda))

    # precompute norms of X
    normX = [squareFrobeniusNormOfSparseBoolean(M) for M in X]
    sumNormX = sum(normX)
    normD = squareFrobeniusNormOfSparse(D)
    _log.debug('[Algorithm] The tensor norm: %.5f' % sumNormX)
    _log.debug('[Algorithm] The extended matrix norm: %.5f' % normD)
    # initialize A
    if ainit == 'random':
        _log.debug('[Algorithm] The random initialization will be performed.')
        A = array(rand(n, rank), dtype=np.float64)
    elif ainit == 'nvecs':
        _log.debug(
            '[Algorithm] The eigenvector based initialization will be performed.'
        )
        tic = time.clock()
        avgX = X[0] + X[0].T
        for i in range(1, len(X)):
            avgX = avgX + (X[i] + X[i].T)
        toc = time.clock()
        elapsed = toc - tic
        _log.debug(
            'Initializing tensor slices by summation required secs: %.5f' %
            elapsed)

        tic = time.clock()
        eigvals, A = eigsh(avgX.tocsc(), rank)
        toc = time.clock()
        elapsed = toc - tic
        _log.debug('eigenvector decomposition required secs: %.5f' % elapsed)
    else:
        raise 'Unknown init option ("%s")' % ainit

    # initialize R
    if proj:
        Q, A2 = qr(A)
        X2 = __projectSlices(X, Q)
        R = __updateR(X2, A2, lmbda)
    else:
        raise 'Projection via QR decomposition is required; pass proj=true'

    _log.debug('[Algorithm] Finished initialization.')
    # compute factorization
    fit = fitchange = fitold = 0
    exectimes = []

    for iterNum in xrange(maxIter):
        tic = time.clock()

        V = updateV(A, D, lmbda)

        A = updateA(X, A, R, V, D, lmbda)
        if proj:
            Q, A2 = qr(A)
            X2 = __projectSlices(X, Q)
            R = __updateR(X2, A2, lmbda)
        else:
            raise 'Projection via QR decomposition is required; pass proj=true'

        # compute fit values
        fit = 0
        tensorFit = 0
        regularizedFit = 0
        extRegularizedFit = 0
        regRFit = 0
        fitDAV = 0
        if iterNum >= preheatnum:
            if lmbda != 0:
                for i in xrange(len(R)):
                    regRFit += norm(R[i])**2
                regularizedFit = lmbda * (norm(A)**2) + lmbda * regRFit
            if lmbda != 0:
                extRegularizedFit = lmbda * (norm(V)**2)

            fitDAV = normD + matrixFitNormWithoutNormD(D, A, V)

            for i in xrange(len(R)):
                tensorFit += (normX[i] + fitNormWithoutNormX(X[i], A, R[i]))

            fit = 0.5 * tensorFit
            fit += regularizedFit
            fit /= sumNormX
            fit += (0.5 * fitDAV + extRegularizedFit) / normD

        else:
            _log.debug('[Algorithm] Preheating is going on.')

        toc = time.clock()
        exectimes.append(toc - tic)
        fitchange = abs(fitold - fit)
        _log.debug(
            '[%3d] total fit: %.10f | tensor fit: %.10f | matrix fit: %.10f | delta: %.10f | secs: %.5f'
            % (iterNum, fit, tensorFit, fitDAV, fitchange, exectimes[-1]))

        fitold = fit
        if iterNum > preheatnum and fitchange < conv:
            break
    return A, R, fit, iterNum + 1, array(exectimes), V
コード例 #36
0
# In[5]:

A = np.array([
    1 + 0 * points,
    points,
]).T
A

# What's the right-hand side vector?

# -------------
# Now solve the least-squares system:

# In[6]:

Q, R = npla.qr(A, "complete")

# In[7]:

print(A.shape)
print(Q.shape)
print(R.shape)

m, n = A.shape

# Determine $x$. Use `spla.solve_triangular(A, b, lower=False)`.

# In[8]:

x = spla.solve_triangular(R[:n], Q.T.dot(values)[:n], lower=False)
コード例 #37
0
import numpy as np
from numpy.linalg import qr, svd, norm

A = np.matrix([[-0.808, 0.914, -1.38], [-1.828, -0.405, -0.513],
               [-0.06, 0.056, 0.912]])

b = np.matrix([[1], [0], [0]])

# QR factorization
Q, R = qr(A)

print('A=', A)
print('b=', b)
print('Q=', Q)
print('R=', R)
x = np.dot(np.linalg.inv(R), np.dot(np.transpose(Q), b))

print('X=', x)
コード例 #38
0
# Q10- qr decomposition

import numpy as np
import numpy.linalg as npl

# given matrix A
A=np.array([[5,-2],[-2,8]])
q,r=npl.qr(A)
print("QR decomposition of given matrix is \nQ=",r,"\nR=",r)

#For calculating eigenvalues using qr decomposition
for i in range(0,10):
    q,r=npl.qr(A)
    A=r*q
print("Eigen values of A using qr decomposition are ",A[0][0],A[1][1])

#For eigenvalues using eigh function
w,v=npl.eigh(A)
print("Eigenvalues of A using numpy.linalg.eigh are ",w[0],w[1])
コード例 #39
0
 def update_QR(self):
     "Perform the QR decomposition on the basis matrix."
     self.Q, self.R = qr(self.Phi)
コード例 #40
0
    def computeRegressorLinDepsQR(self, regressor=None):
        """get base regressor and identifiable basis matrix with QR decomposition

        gets independent columns (non-unique choice) each with its dependent ones, i.e.
        those std parameter indices that form each of the base parameters (including the linear factors)
        """
        if regressor is not None:
            # if supplied, get dependencies from specific regressor
            Y = regressor
            self.Q, self.R, self.P = sla.qr(Y, pivoting=True, mode='economic')
        else:
            #using random regressor gives us structural base params, not dependent on excitation
            #QR of transposed gives us basis of column space of original matrix (Gautier, 1990)
            Y, self.Q, self.R, self.P = self.getRandomRegressor(n_samples=self.opt['randomSamples'])

        """
        # get basis directly from regressor matrix using QR
        Qt,Rt,Pt = sla.qr(Y.T, pivoting=True, mode='economic')

        #get rank
        r = np.where(np.abs(Rt.diagonal()) > self.opt['minTol'])[0].size
        self.num_base_params = r

        Qt[np.abs(Qt) < self.opt['minTol']] = 0

        #get basis projection matrix
        S = np.zeros_like(Rt)
        for i in range(Rt.shape[0]):
            if np.abs(Rt[i,i]) < self.opt['minTol']:
                continue
            if Rt[i,i] < 0:
                S[i,i] = -1
            if Rt[i,i] > 0:
                S[i,i] = 1
        self.B = Qt.dot(S)[:, :r]
        #self.B = Qt[:, 0:r]

        """
        #get rank
        r = np.where(np.abs(self.R.diagonal()) > self.opt['minTol'])[0].size
        self.num_base_params = r
        self.num_base_inertial_params = r - self.num_dofs

        #create proper permutation matrix from vector
        self.Pp = np.zeros((self.P.size, self.P.size))
        for i in self.P:
            self.Pp[i, self.P[i]] = 1
        self.Pb = self.Pp.T[:, 0:self.num_base_params]
        self.Pd = self.Pp.T[:, self.num_base_params:]

        # get the choice of indices of "independent" columns of the regressor matrix
        # (representants chosen from each separate interdependent group of columns)
        self.independent_cols = self.P[0:r]

        # get column dependency matrix (with what factor are columns of "dependent" columns grouped)
        # i (independent column) = (value at i,j) * j (dependent column index among the others)
        R1 = self.R[0:r, 0:r]
        R2 = self.R[0:r, r:]
        self.linear_deps = sla.inv(R1).dot(R2)
        self.linear_deps[np.abs(self.linear_deps) < self.opt['minTol']] = 0

        self.Kd = self.linear_deps
        self.K = self.Pb.T + self.Kd.dot(self.Pd.T)

        # collect grouped columns for each independent column
        # and build base matrix
        # (slow too, save to file)
        if self.opt['useBasisProjection']:
            self.B = np.zeros((self.num_identified_params, self.num_base_params))
            for j in range(0, self.linear_deps.shape[0]):
                indep_idx = self.independent_cols[j]
                for i in range(0, self.linear_deps.shape[1]):
                    for k in range(r, self.P.size):
                        factor = self.linear_deps[j, k-r]
                        if np.abs(factor)>self.opt['minTol']: self.B[self.P[k],j] = factor
                self.B[indep_idx,j] = 1

            if self.opt['orthogonalizeBasis']:
                #orthogonalize, so linear relationships can be inverted (if B is square, will orthonormalize)
                Q_B_qr, R_B_qr = la.qr(self.B)
                Q_B_qr[np.abs(Q_B_qr) < self.opt['minTol']] = 0
                S = np.zeros_like(R_B_qr)
                for i in range(R_B_qr.shape[0]):
                    if np.abs(R_B_qr[i,i]) < self.opt['minTol']:
                        continue
                    if R_B_qr[i,i] < 0:
                        S[i,i] = -1
                    if R_B_qr[i,i] > 0:
                        S[i,i] = 1
                self.B = Q_B_qr.dot(S)
                #self.B = Q_B_qr
                self.Binv = self.B.T
            else:
                # in case B is not an orthogonal base (B.T != B^-1), we have to use pinv instead of T
                # (using QR on B yields orthonormal base if necessary)
                # in general, pinv is always working (but is numerically a bit different)
                self.Binv = la.pinv(self.B)

        # define sympy symbols for each std column
        self.base_syms = sympy.Matrix([sympy.Symbol('beta'+str(i),real=True) for i in range(self.num_base_params)])
        self.param_syms = list()     # type: List[sympy.Symbol]
        self.mass_syms = list()      # type: List[sympy.Symbol]
        self.friction_syms = list()  # type: List[sympy.Symbol]
        #indices of params within full param vector that are going to be identified
        self.identified_params = list()  # type: List[int]
        for i in range(0, self.num_links):
            #mass
            m = symbols('m_{}'.format(i))
            self.param_syms.append(m)
            self.identified_params.append(i*10)
            self.mass_syms.append(m)

            #first moment of mass
            p = 'c_{}'.format(i)  #symbol prefix
            syms = [symbols(p+'x'), symbols(p+'y'), symbols(p+'z')]
            self.param_syms.extend(syms)
            self.identified_params.extend([i*10+1, i*10+2, i*10+3])

            #3x3 inertia tensor about link-frame (for link i)
            p = 'I_{}'.format(i)
            syms = [symbols(p+'xx'), symbols(p+'xy'), symbols(p+'xz'),
                    symbols(p+'xy'), symbols(p+'yy'), symbols(p+'yz'),
                    symbols(p+'xz'), symbols(p+'yz'), symbols(p+'zz')
                   ]
            self.param_syms.extend([syms[0], syms[1], syms[2], syms[4], syms[5], syms[8]])

            if not self.opt['identifyGravityParamsOnly']:
                self.identified_params.extend([i*10+4, i*10+5, i*10+6, i*10+7, i*10+8, i*10+9])

        if self.opt['identifyFriction']:
            mp = self.num_model_params
            for i in range(0,self.num_dofs):
                s = [symbols('Fc_{}'.format(i))]
                self.param_syms.extend(s)
                self.friction_syms.extend(s)
                self.identified_params.append(mp+i)
            if not self.opt['identifyGravityParamsOnly']:
                if self.opt['identifySymmetricVelFriction']:
                    for i in range(0,self.num_dofs):
                        s = [symbols('Fv_{}'.format(i))]
                        self.param_syms.extend(s)
                        self.friction_syms.extend(s)
                        self.identified_params.append(mp+self.num_dofs+i)
                else:
                    for i in range(0,self.num_dofs):
                        s = [symbols('Fv+_{}'.format(i))]
                        self.param_syms.extend(s)
                        self.friction_syms.extend(s)
                        self.identified_params.append(mp+self.num_dofs+i)
                    for i in range(0,self.num_dofs):
                        s = [symbols('Fv-_{}'.format(i))]
                        self.param_syms.extend(s)
                        self.friction_syms.extend(s)
                        self.identified_params.append(mp+2*self.num_dofs+i)
        self.param_syms = np.array(self.param_syms)

        ## get symbolic equations for base param dependencies
        # Each dependent parameter can be ignored (non-identifiable) or it can be
        # represented by grouping some base and/or dependent parameters.
        # TODO: put this in regressor cache file (it's slow)
        if self.opt['useBasisProjection']:
            if self.opt['orthogonalizeBasis']:
                #this is only correct if basis is orthogonal
                self.base_deps = np.dot(self.param_syms[self.identified_params], self.B)
            else:
                #otherwise, we need to get relationships from the inverse
                B_qr_inv_z = la.pinv(self.B)
                B_qr_inv_z[np.abs(B_qr_inv_z) < self.opt['minTol']] = 0
                self.base_deps = np.dot(self.param_syms[self.identified_params], B_qr_inv_z.T)
        else:
            # using projection matrix from Gautier/Sousa method for base eqns
            # (K is orthogonal)
            self.base_deps = Matrix(self.K) * Matrix(self.param_syms[self.identified_params])

        # find std parameters that have no effect on estimation (not single or contributing to base
        # equations)
        # TODO: also put this in regressor cache file
        base_deps_syms = []   # type: List[sympy.Symbol]
        for i in range(self.base_deps.shape[0]):
            for s in self.base_deps[i].free_symbols:
                if s not in base_deps_syms:
                    base_deps_syms.append(s)
        self.non_id = [p for p in range(self.num_all_params) if self.param_syms[p] not in base_deps_syms]
        self.identifiable = [p for p in range(self.num_all_params) if p not in self.non_id]
コード例 #41
0
def _tracemin_fiedler(L, X, normalized, tol, method):
    """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
    """
    n = X.shape[0]

    if normalized:
        # Form the normalized Laplacian matrix and determine the eigenvector of
        # its nullspace.
        e = sqrt(L.diagonal())
        D = spdiags(1. / e, [0], n, n, format='csr')
        L = D * L * D
        e *= 1. / norm(e, 2)

    if not normalized:

        def project(X):
            """Make X orthogonal to the nullspace of L.
            """
            X = asarray(X)
            for j in range(X.shape[1]):
                X[:, j] -= X[:, j].sum() / n
    else:

        def project(X):
            """Make X orthogonal to the nullspace of L.
            """
            X = asarray(X)
            for j in range(X.shape[1]):
                X[:, j] -= dot(X[:, j], e) * e

    if method is None:
        method = 'pcg'
    if method == 'pcg':
        # See comments below for the semantics of P and D.
        def P(x):
            x -= asarray(x * X * X.T)[0, :]
            if not normalized:
                x -= x.sum() / n
            else:
                x = daxpy(e, x, a=-ddot(x, e))
            return x

        solver = _PCGSolver(lambda x: P(L * P(x)), lambda x: D * x)
    elif method == 'chol' or method == 'lu':
        # Convert A to CSC to suppress SparseEfficiencyWarning.
        A = csc_matrix(L, dtype=float, copy=True)
        # Force A to be nonsingular. Since A is the Laplacian matrix of a
        # connected graph, its rank deficiency is one, and thus one diagonal
        # element needs to modified. Changing to infinity forces a zero in the
        # corresponding element in the solution.
        i = (A.indptr[1:] - A.indptr[:-1]).argmax()
        A[i, i] = float('inf')
        solver = (_CholeskySolver if method == 'chol' else _LUSolver)(A)
    else:
        raise nx.NetworkXError('unknown linear system solver.')

    # Initialize.
    Lnorm = abs(L).sum(axis=1).flatten().max()
    project(X)
    W = asmatrix(ndarray(X.shape, order='F'))

    while True:
        # Orthonormalize X.
        X = qr(X)[0]
        # Compute interation matrix H.
        W[:, :] = L * X
        H = X.T * W
        sigma, Y = eigh(H, overwrite_a=True)
        # Compute the Ritz vectors.
        X *= Y
        # Test for convergence exploiting the fact that L * X == W * Y.
        res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm
        if res < tol:
            break
        # Depending on the linear solver to be used, two mathematically
        # equivalent formulations are used.
        if method == 'pcg':
            # Compute X = X - (P * L * P) \ (P * L * X) where
            # P = I - [e X] * [e X]' is a projection onto the orthogonal
            # complement of [e X].
            W *= Y  # L * X == W * Y
            W -= (W.T * X * X.T).T
            project(W)
            # Compute the diagonal of P * L * P as a Jacobi preconditioner.
            D = L.diagonal().astype(float)
            D += 2. * (asarray(X) * asarray(W)).sum(axis=1)
            D += (asarray(X) * asarray(X * (W.T * X))).sum(axis=1)
            D[D < tol * Lnorm] = 1.
            D = 1. / D
            # Since TraceMIN is globally convergent, the relative residual can
            # be loose.
            X -= solver.solve(W, 0.1)
        else:
            # Compute X = L \ X / (X' * (L \ X)). L \ X can have an arbitrary
            # projection on the nullspace of L, which will be eliminated.
            W[:, :] = solver.solve(X)
            project(W)
            X = (inv(W.T * X) * W.T).T  # Preserves Fortran storage order.

    return sigma, asarray(X)
コード例 #42
0
ファイル: rotations.py プロジェクト: saptadeeppal/pymanopt
 def retri(Y):
     Q, R = la.qr(Y)
     return np.dot(Q, np.diag(np.sign(np.sign(np.diag(R)) + 0.5)))
コード例 #43
0
print(P)
print(L)
print(U)

B = P.dot(L).dot(U)
print(B)

#QR descomposition
#A = QR

from numpy.linalg import qr

A = array([[1,2],[3,4],[5,6]])
print(A)

Q,R = qr(A,'complete')

print(Q)
print(R)

B = Q.dot(R)
print(B)

#cholesky descomposition
# A = L(L.T) transpose

from numpy.linalg import cholesky

A = array([[2,1,1],[1,2,1],[1,1,2]])

L = cholesky(A)
コード例 #44
0
ファイル: note1.py プロジェクト: jt1024/DataAnalysis
y = np.array([[6., 23.], [-1, 7], [8, 9]])
x
y
x.dot(y)  # 等价于np.dot(x, y)

np.dot(x, np.ones(3))

np.random.seed(12345)

from numpy.linalg import inv, qr

X = randn(5, 5)
mat = X.T.dot(X)
inv(mat)
mat.dot(inv(mat))
q, r = qr(mat)
r

###随机数生成
samples = np.random.normal(size=(4, 4))
samples

from random import normalvariate

N = 1000000
get_ipython().magic(
    u'timeit samples = [normalvariate(0, 1) for _ in xrange(N)]')
get_ipython().magic(u'timeit np.random.normal(size=N)')

# 范例:随机漫步
import random
コード例 #45
0
    def run(self,
            distance_matrix,
            num_dimensions_out=10,
            use_power_method=False,
            num_levels=1):
        """
        Performs a singular value decomposition.

        Parameters
        ----------
        distance_matrix: np.array
            Numpy matrix representing the distance matrix for which the
            eigenvectors and eigenvalues shall be computed
        num_dimensions_out: int
            Number of dimensions to keep. Must be lower than or equal to the
            rank of the given distance_matrix.
        num_levels: int
            Number of levels of the Krylov method to use (see paper).
            For most applications, num_levels=1 or num_levels=2 is sufficient.
        use_power_method: bool
            Changes the power of the spectral norm, thus minimizing
            the error). See paper p11/eq8.1 DOI = {10.1137/100804139}

        Returns
        -------
        np.array
            Array of eigenvectors, each with num_dimensions_out length.
        np.array
            Array of eigenvalues, a total number of num_dimensions_out.

        Notes
        -----
        The algorithm is based on 'An Algorithm for the Principal
        Component analysis of Large Data Sets'
        by N. Halko, P.G. Martinsson, Y. Shkolnisky, and M. Tygert.
        Original Paper: https://arxiv.org/abs/1007.5510

        Ported from reference MATLAB implementation: https://goo.gl/JkcxQ2
        """
        super(FSVD, self).run(distance_matrix, num_dimensions_out)

        m, n = distance_matrix.shape

        # Note: this transpose is removed for performance, since we
        # only expect square matrices.
        # Take (conjugate) transpose if necessary, because it makes H smaller,
        # leading to faster computations
        # if m < n:
        #     distance_matrix = distance_matrix.transpose()
        #     m, n = distance_matrix.shape
        if m != n:
            raise ValueError('FSVD.run(...) expects square distance matrix')

        k = num_dimensions_out + 2

        # Form a real nxl matrix G whose entries are independent,
        # identically distributed
        # Gaussian random variables of
        # zero mean and unit variance
        G = standard_normal(size=(n, k))

        if use_power_method:
            # use only the given exponent
            H = dot(distance_matrix, G)

            for x in xrange(2, num_levels + 2):
                # enhance decay of singular values
                # note: distance_matrix is no longer transposed, saves work
                # since we're expecting symmetric, square matrices anyway
                # (Daniel McDonald's changes)
                H = dot(distance_matrix, dot(distance_matrix, H))

        else:
            # compute the m x l matrices H^{(0)}, ..., H^{(i)}
            # Note that this is done implicitly in each iteration below.
            H = dot(distance_matrix, G)
            # Again, removed transpose: dot(distance_matrix.transpose(), H)
            # to enhance performance
            H = hstack((H, dot(distance_matrix, dot(distance_matrix, H))))
            for x in xrange(3, num_levels + 2):
                # Removed this transpose: dot(distance_matrix.transpose(), H)
                tmp = dot(distance_matrix, dot(distance_matrix, H))

                # Removed this transpose: dot(distance_matrix.transpose(), tmp)
                H = hstack((H, dot(distance_matrix, dot(distance_matrix,
                                                        tmp))))

        # Using the pivoted QR-decomposiion, form a real m * ((i+1)l) matrix Q
        # whose columns are orthonormal, s.t. there exists a real
        # ((i+1)l) * ((i+1)l) matrix R for which H = QR
        Q, R = qr(H)

        # Compute the n * ((i+1)l) product matrix T = A^T Q
        # Removed transpose of distance_matrix for performance
        T = dot(distance_matrix, Q)  # step 3

        # Form an SVD of T
        Vt, St, W = svd(T, full_matrices=False)
        W = W.transpose()

        # Compute the m * ((i+1)l) product matrix
        Ut = dot(Q, W)

        if m < n:
            # V_fsvd = Ut[:, :num_dimensions_out] # unused
            U_fsvd = Vt[:, :num_dimensions_out]
        else:
            # V_fsvd = Vt[:, :num_dimensions_out] # unused
            U_fsvd = Ut[:, :num_dimensions_out]

        S = St[:num_dimensions_out]**2

        # drop imaginary component, if we got one
        eigenvalues = S.real
        eigenvectors = U_fsvd.real

        return eigenvectors, eigenvalues
コード例 #46
0
    def _qr_factorization(self, y, input_idx, X_cols, output_idx, y_cols,
                          segment, operation):
        """
        Performs a QR-Factorization (Decomposition) using numpy linear
        algebra library and uses the R matrix to solve the Ordinary Least
        Square (OLS) problem.

        Arguments:
            y: the ouput signals
            input_idx: the sequential number of the execution input;
            X_cols: the input data columns in case they are provided;
            output_idx: the sequential number of the execution output;
            y_cols: the output data columns in case they are provided;
            segment: the sequential number of the execution segment (interval).
            operation: which operation to perform (all, condition_number or chi_squared_test)
        """

        # Take Column Names
        input_idx_name, output_idx_name = self._update_index_name(
            input_idx, X_cols, output_idx, y_cols)

        # Take Segment
        segment_idx = self.initial_intervals[segment]

        # Take Regressor Matrix
        Phi = self.Phi_dict["segment" + "_" +
                            str(segment)][output_idx_name][input_idx_name]

        # Define the y shift according to the model structure
        # If a model structure is of order 3, for example, the
        # output used for fitting the model must start 3 samples
        # ahead. In that case, y_shift=3. For Laguerre models, the
        # y_shift is always 1, regardless of the model order.
        y_length = len(y[segment_idx, output_idx])
        regressor_length = Phi.shape[0]
        y_shift = y_length - regressor_length

        # Create the Augmented Regressor Matrix [Phi y]
        self.Phi_aug_dict[
            "segment" + "_" +
            str(segment)][output_idx_name][input_idx_name] = np.zeros(
                (len(segment_idx[y_shift:]), self.Nb + 1))

        self.Phi_aug_dict["segment" + "_" + str(segment)][output_idx_name][
            input_idx_name][:Phi.shape[0], :self.Nb] = Phi

        self.Phi_aug_dict[
            "segment" + "_" +
            str(segment)][output_idx_name][input_idx_name][:, -1] = np.squeeze(
                y[segment_idx, output_idx][y_shift:])

        # QR-Factorization
        Q, R = LA.qr(
            self.Phi_aug_dict["segment" + "_" +
                              str(segment)][output_idx_name][input_idx_name])
        R1 = R[:self.Nb, :self.Nb]
        R2 = R[:self.Nb, self.Nb]
        R3 = R[self.Nb, self.Nb]

        # Comput Theta, Information Matrix and its Condition Number and the chi-squared Test
        if operation in ("all", "condition_number"):
            self.I_dict[
                "segment" + "_" +
                str(segment)][output_idx_name][input_idx_name] = (
                    1 / len(np.squeeze(y[segment_idx, output_idx][y_shift:]))
                ) * np.matmul(R1.T, R1)

            self.cond_num_dict[
                "segment" + "_" +
                str(segment)][output_idx_name][input_idx_name] = LA.cond(
                    self.I_dict["segment" + "_" +
                                str(segment)][output_idx_name][input_idx_name])

        if operation in ("all", "chi_squared_test"):
            try:
                self.theta_dict[
                    "segment" + "_" +
                    str(segment)][output_idx_name][input_idx_name] = np.matmul(
                        LA.inv(R1), R2)
            except:
                pass

            self.chi_squared_dict["segment" + "_" + str(segment)][
                output_idx_name][input_idx_name] = (np.sqrt(
                    len(np.squeeze(y[segment_idx, output_idx][y_shift:]))) /
                                                    np.abs(R3)) * LA.norm(
                                                        x=R2, ord=2)
コード例 #47
0
ファイル: Frahst_v3_1.py プロジェクト: aanchan/Old-PhD-Code
# -*- coding: utf-8 -*-
コード例 #48
0
linalg.inv(m3)

# You can also compute the [pseudoinverse](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse) using `pinv`:
linalg.pinv(m3)

# ## Identity matrix
# The product of a matrix by its inverse returns the identiy matrix (with small floating point errors):
m3.dot(linalg.inv(m3))

# You can create an identity matrix of size NxN by calling `eye`:
np.eye(3)

# ## QR decomposition
# The `qr` function computes the [QR decomposition](https://en.wikipedia.org/wiki/QR_decomposition) of a matrix:
q, r = linalg.qr(m3)
q

r

q.dot(r)  # q.r equals m3

# ## Determinant
# The `det` function computes the [matrix determinant](https://en.wikipedia.org/wiki/Determinant):
linalg.det(m3)  # Computes the matrix determinant

# ## Eigenvalues and eigenvectors
# The `eig` function computes the [eigenvalues and eigenvectors](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors) of a square matrix:
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues  # λ
コード例 #49
0
def multifit_covar(J=None, epsrel=0.0, weights=None):
    """This is the implementation of the multifit covariance.

    This is inspired from GNU Scientific Library (GSL).

    This function uses the Jacobian matrix J to compute the covariance matrix of the best-fit parameters, covar.

    The parameter 'epsrel' is used to remove linear-dependent columns when J is rank deficient.

    The weighting matrix 'W', is a square symmetric matrix. For independent measurements, this is a diagonal matrix. Larger values indicate greater significance.  It is formed by multiplying and Identity matrix with the supplied weights vector::

        W = I. w

    The weights should normally be supplied as a vector: 1 / errors^2. 

    The covariance matrix is given by::

        covar = (J^T.W.J)^{-1} ,

    and is computed by QR decomposition of J with column-pivoting. Any columns of R which satisfy::

        |R_{kk}| <= epsrel |R_{11}| ,

    are considered linearly-dependent and are excluded from the covariance matrix (the corresponding rows and columns of the covariance matrix are set to zero).  If the minimisation uses the weighted least-squares function::

        f_i = (Y(x, t_i) - y_i) / sigma_i ,

    then the covariance matrix above gives the statistical error on the best-fit parameters resulting from the Gaussian errors 'sigma_i' on the underlying data 'y_i'.

    This can be verified from the relation 'd_f = J d_c' and the fact that the fluctuations in 'f' from the data 'y_i' are normalised by 'sigma_i' and so satisfy::

        <d_f d_f^T> = I. ,

    For an unweighted least-squares function f_i = (Y(x, t_i) - y_i) the covariance matrix above should be multiplied by the variance of the residuals about the best-fit::

        sigma^2 = sum ( (y_i - Y(x, t_i))^2 / (n-p) ) ,

    to give the variance-covariance matrix sigma^2 C.  This estimates the statistical error on the best-fit parameters from the scatter of the underlying data.

    Links
    =====

    More information ca be found here:

        - U{GSL - GNU Scientific Library<http://www.gnu.org/software/gsl/>}
        - U{Manual: Overview<http://www.gnu.org/software/gsl/manual/gsl-ref_37.html#SEC510>}
        - U{Manual: Computing the covariance matrix of best fit parameters<http://www.gnu.org/software/gsl/manual/gsl-ref_38.html#SEC528>}
        - U{Other reference<http://www.orbitals.com/self/least/least.htm>}

    @param J:               The Jacobian matrix.
    @type J:                numpy array
    @param epsrel:          Any columns of R which satisfy |R_{kk}| <= epsrel |R_{11}| are considered linearly-dependent and are excluded from the covariance matrix, where the corresponding rows and columns of the covariance matrix are set to zero.
    @type epsrel:           float
    @keyword weigths:       The weigths which to scale with.  Normally submitted as the 1 over standard deviation of the measured intensity values per time point in power 2. weigths = 1 / sd_i^2.
    @type weigths:          numpy array
    @return:                The co-variance matrix
    @rtype:                 square numpy array
    """

    # Weighting matrix. This is a square symmetric matrix.
    # For independent measurements, this is a diagonal matrix. Larger values indicate greater significance.

    # Make a square diagonal matrix.
    eye_mat = eye(weights.shape[0])

    # Form weight matrix.
    W = multiply(eye_mat, weights)

    # The covariance matrix (sometimes referred to as the variance-covariance matrix), Qxx, is defined as:
    # Qxx = (J^t W J)^(-1)

    # Calculate step by step, by matrix multiplication.
    Jt = transpose(J)
    Jt_W = dot(Jt, W)
    Jt_W_J = dot(Jt_W, J)

    # Invert matrix by QR decomposition, to check columns of R which satisfy: |R_{kk}| <= epsrel |R_{11}|
    Q, R = qr(Jt_W_J)

    # Make the state ment matrix.
    abs_epsrel_R11 = absolute(multiply(epsrel, R[0, 0]))

    # Make and array of True/False statements.
    # These are considered linearly-dependent and are excluded from the covariance matrix.
    # The corresponding rows and columns of the covariance matrix are set to zero
    epsrel_check = absolute(R) <= abs_epsrel_R11

    # Form the covariance matrix.
    Qxx = dot(inv(R), transpose(Q))
    #Qxx2 = dot(inv(R), inv(Q) )
    #print(Qxx - Qxx2)

    # Test direct invert matrix of matrix.
    #Qxx_test = inv(Jt_W_J)

    # Replace values in Covariance matrix with inf.
    Qxx[epsrel_check] = 0.0

    # Throw a warning, that some colums are considered linearly-dependent and are excluded from the covariance matrix.
    # Only check for the diagonal, since the that holds the variance.
    diag_epsrel_check = diag(epsrel_check)

    # If any of the diagonals does not meet the epsrel condition.
    if any(diag_epsrel_check):
        for i in range(diag_epsrel_check.shape[0]):
            abs_Rkk = absolute(R[i, i])
            if abs_Rkk <= abs_epsrel_R11:
                warn(
                    RelaxWarning(
                        "Co-Variance element k,k=%i was found to meet |R_{kk}| <= epsrel |R_{11}|, meaning %1.1f <= %1.3f * %1.1f , and is therefore determined to be linearly-dependent and are excluded from the covariance matrix by setting the value to 0.0."
                        % (i + 1, abs_Rkk, epsrel, abs_epsrel_R11 / epsrel)))
                #print(cond(Jt_W_J) < 1./spacing(1.) )

    return Qxx
コード例 #50
0
ファイル: test_multi.py プロジェクト: thatdeep/pymanopt
 def test_multilog_singlemat(self):
     a = np.diag(rnd.rand(self.m))
     q, r = la.qr(rnd.randn(self.m, self.m))
     # A is a positive definite matrix
     A = q.dot(a.dot(q.T))
     np_testing.assert_allclose(multilog(A, pos_def=True), logm(A))
コード例 #51
0
 def __init__(self):
     super().__init__()
     for sz in SIZES:
         array = nla.qr(self.rng.standard_normal((sz, sz)))[0]
         self.matrices[sz] = matrices.OrthogonalMatrix(array)
         self.np_matrices[sz] = array
コード例 #52
0
ファイル: PCoA.py プロジェクト: Xingyinliu-Lab/PM2RA
def _fsvd(centered_distance_matrix, number_of_dimensions=10):
    """
    Performs singular value decomposition, or more specifically in
    this case eigendecomposition, using fast heuristic algorithm
    nicknamed "FSVD" (FastSVD), adapted and optimized from the algorithm
    described by Halko et al (2011).
    Parameters
    ----------
    centered_distance_matrix : np.array
       Numpy matrix representing the distance matrix for which the
       eigenvectors and eigenvalues shall be computed
    number_of_dimensions : int
       Number of dimensions to keep. Must be lower than or equal to the
       rank of the given distance_matrix.
    Returns
    -------
    np.array
       Array of eigenvectors, each with number_of_dimensions length.
    np.array
       Array of eigenvalues, a total number of number_of_dimensions.
    Notes
    -----
    The algorithm is based on 'An Algorithm for the Principal
    Component analysis of Large Data Sets'
    by N. Halko, P.G. Martinsson, Y. Shkolnisky, and M. Tygert.
    Original Paper: https://arxiv.org/abs/1007.5510
    Ported from MATLAB implementation described here:
    https://stats.stackexchange.com/a/11934/211065
    """

    m, n = centered_distance_matrix.shape

    # Number of levels of the Krylov method to use.
    # For most applications, num_levels=1 or num_levels=2 is sufficient.
    num_levels = 1

    # Changes the power of the spectral norm, thus minimizing the error).
    use_power_method = False

    # Note: a (conjugate) transpose is removed for performance, since we
    # only expect square matrices.
    if m != n:
        raise ValueError('FSVD expects square distance matrix')

    if number_of_dimensions > m or number_of_dimensions > n:
        raise ValueError('FSVD: number_of_dimensions cannot be larger than'
                         ' the dimensionality of the given distance matrix.')

    if number_of_dimensions < 0:
        raise ValueError('Invalid operation: cannot reduce distance matrix '
                         'to negative dimensions using PCoA. Did you intend '
                         'to specify the default value "0", which sets '
                         'the number_of_dimensions equal to the '
                         'dimensionality of the given distance matrix?')

    k = number_of_dimensions + 2

    # Form a real nxl matrix G whose entries are independent, identically
    # distributed Gaussian random variables of zero mean and unit variance
    G = standard_normal(size=(n, k))

    if use_power_method:
        # use only the given exponent
        H = dot(centered_distance_matrix, G)

        for x in range(2, num_levels + 2):
            # enhance decay of singular values
            # note: distance_matrix is no longer transposed, saves work
            # since we're expecting symmetric, square matrices anyway
            # (Daniel McDonald's changes)
            H = dot(centered_distance_matrix, dot(centered_distance_matrix, H))

    else:
        # compute the m x l matrices H^{(0)}, ..., H^{(i)}
        # Note that this is done implicitly in each iteration below.
        H = dot(centered_distance_matrix, G)
        # to enhance performance
        H = hstack(
            (H, dot(centered_distance_matrix, dot(centered_distance_matrix,
                                                  H))))
        for x in range(3, num_levels + 2):
            tmp = dot(centered_distance_matrix, dot(centered_distance_matrix,
                                                    H))

            H = hstack((H,
                        dot(centered_distance_matrix,
                            dot(centered_distance_matrix, tmp))))

    # Using the pivoted QR-decomposition, form a real m * ((i+1)l) matrix Q
    # whose columns are orthonormal, s.t. there exists a real
    # ((i+1)l) * ((i+1)l) matrix R for which H = QR
    Q, R = qr(H)

    # Compute the n * ((i+1)l) product matrix T = A^T Q
    T = dot(centered_distance_matrix, Q)  # step 3

    # Form an SVD of T
    Vt, St, W = svd(T, full_matrices=False)
    W = W.transpose()

    # Compute the m * ((i+1)l) product matrix
    Ut = dot(Q, W)

    U_fsvd = Ut[:, :number_of_dimensions]

    S = St[:number_of_dimensions]

    # drop imaginary component, if we got one
    # Note:
    #   In cogent, after computing eigenvalues/vectors, the imaginary part
    #   is dropped, if any. We know for a fact that the eigenvalues are
    #   real, so that's not necessary, but eigenvectors can in principle
    #   be complex (see for example
    #   http://math.stackexchange.com/a/47807/109129 for details)
    eigenvalues = S.real
    eigenvectors = U_fsvd.real

    return eigenvalues, eigenvectors
コード例 #53
0
ファイル: modulation.py プロジェクト: tommes0815/CommPy
def kbest(y, h, constellation, K):
    """ MIMO K-best Schnorr-Euchner Detection.

    Reference: Zhan Guo and P. Nilsson, 'Algorithm and implementation of the K-best sphere decoding for MIMO detection',
        IEEE Journal on Selected Areas in Communications, vol. 24, no. 3, pp. 491-503, Mar. 2006.

    Parameters
    ----------
    y : 1D ndarray
        Received complex symbols (length: num_receive_antennas)

    h : 2D ndarray
        Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)

    constellation : 1D ndarray of floats
        Constellation used to modulate the symbols

    K : positive integer
        Number of candidates kept at each step

    Returns
    -------
    x : 1D ndarray of constellation points
        Detected vector (length: num_receive_antennas)

    raises
    ------
    ValueError
                If h has more columns than rows.
    """
    nb_tx, nb_rx = h.shape
    if nb_rx > nb_tx:
        raise ValueError('h has more columns than rows')

    # QR decomposition
    q, r = qr(h)
    yt = q.conj().T.dot(y)

    # Initialization
    m = len(constellation)
    nb_can = 1

    if isinstance(constellation[0], complex):
        const_type = complex
    else:
        const_type = float
    X = empty((nb_rx, K * m), dtype=const_type)  # Set of current candidates
    d = tile(yt[:, None], (1, K * m))  # Corresponding distance vector
    d_tot = zeros(K * m, dtype=float)  # Corresponding total distance
    hyp = empty(K * m, dtype=const_type)  # Hypothesis vector

    # Processing
    for coor in range(nb_rx - 1, -1, -1):
        nb_hyp = nb_can * m

        # Copy best candidates m times
        X[:, :nb_hyp] = tile(X[:, :nb_can], (1, m))
        d[:, :nb_hyp] = tile(d[:, :nb_can], (1, m))
        d_tot[:nb_hyp] = tile(d_tot[:nb_can], (1, m))

        # Make hypothesis
        hyp[:nb_hyp] = repeat(constellation, nb_can)
        X[coor, :nb_hyp] = hyp[:nb_hyp]
        d[coor, :nb_hyp] -= r[coor, coor] * hyp[:nb_hyp]
        d_tot[:nb_hyp] += abs(d[coor, :nb_hyp])**2

        # Select best candidates
        argsort = d_tot[:nb_hyp].argsort()
        nb_can = min(nb_hyp, K)  # Update number of candidate

        # Update accordingly
        X[:, :nb_can] = X[:, argsort[:nb_can]]
        d[:, :nb_can] = d[:, argsort[:nb_can]]
        d[:coor, :nb_can] -= r[:coor, coor, None] * hyp[argsort[:nb_can]]
        d_tot[:nb_can] = d_tot[argsort[:nb_can]]
    return X[:, 0]
コード例 #54
0
def _tracemin_fiedler(L, X, normalized, tol, method):
    """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.

    The Fiedler vector of a connected undirected graph is the eigenvector
    corresponding to the second smallest eigenvalue of the Laplacian matrix of
    of the graph. This function starts with the Laplacian L, not the Graph.

    Parameters
    ----------
    L : Laplacian of a possibly weighted or normalized, but undirected graph

    X : Initial guess for a solution. Usually a matrix of random numbers.
        This function allows more than one column in X to identify more than
        one eigenvector if desired.

    normalized : bool
        Whether the normalized Laplacian matrix is used.

    tol : float
        Tolerance of relative residual in eigenvalue computation.
        Warning: There is no limit on number of iterations.

    method : string
        Should be 'tracemin_pcg', 'tracemin_chol' or 'tracemin_lu'.
        Otherwise exception is raised.

    Returns
    -------
    sigma, X : Two NumPy arrays of floats.
        The lowest eigenvalues and corresponding eigenvectors of L.
        The size of input X determines the size of these outputs.
        As this is for Fiedler vectors, the zero eigenvalue (and
        constant eigenvector) are avoided.
    """
    n = X.shape[0]

    if normalized:
        # Form the normalized Laplacian matrix and determine the eigenvector of
        # its nullspace.
        e = sqrt(L.diagonal())
        D = spdiags(1. / e, [0], n, n, format='csr')
        L = D * L * D
        e *= 1. / norm(e, 2)

    if normalized:

        def project(X):
            """Make X orthogonal to the nullspace of L.
            """
            X = asarray(X)
            for j in range(X.shape[1]):
                X[:, j] -= dot(X[:, j], e) * e
    else:

        def project(X):
            """Make X orthogonal to the nullspace of L.
            """
            X = asarray(X)
            for j in range(X.shape[1]):
                X[:, j] -= X[:, j].sum() / n

    if method == 'tracemin_pcg':
        D = L.diagonal().astype(float)
        solver = _PCGSolver(lambda x: L * x, lambda x: D * x)
    elif method == 'tracemin_chol' or method == 'tracemin_lu':
        # Convert A to CSC to suppress SparseEfficiencyWarning.
        A = csc_matrix(L, dtype=float, copy=True)
        # Force A to be nonsingular. Since A is the Laplacian matrix of a
        # connected graph, its rank deficiency is one, and thus one diagonal
        # element needs to modified. Changing to infinity forces a zero in the
        # corresponding element in the solution.
        i = (A.indptr[1:] - A.indptr[:-1]).argmax()
        A[i, i] = float('inf')
        if method == 'tracemin_chol':
            solver = _CholeskySolver(A)
        else:
            solver = _LUSolver(A)
    else:
        raise nx.NetworkXError('Unknown linear system solver: ' + method)

    # Initialize.
    Lnorm = abs(L).sum(axis=1).flatten().max()
    project(X)
    W = asmatrix(ndarray(X.shape, order='F'))

    while True:
        # Orthonormalize X.
        X = qr(X)[0]
        # Compute interation matrix H.
        W[:, :] = L * X
        H = X.T * W
        sigma, Y = eigh(H, overwrite_a=True)
        # Compute the Ritz vectors.
        X *= Y
        # Test for convergence exploiting the fact that L * X == W * Y.
        res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm
        if res < tol:
            break
        # Compute X = L \ X / (X' * (L \ X)).
        # L \ X can have an arbitrary projection on the nullspace of L,
        # which will be eliminated.
        W[:, :] = solver.solve(X, tol)
        X = (inv(W.T * X) * W.T).T  # Preserves Fortran storage order.
        project(X)

    return sigma, asarray(X)
コード例 #55
0
#when loading an archive, the result will be a dictionary like object
#load text file
arr = np.loadtxt('array_ex.txt',delimiter=',')

# Linear Algebra
x = np.array([[1.,2.,3.],[4.,5.,6.]])
y = np.array([[6.,23.],[-1.,7.],[8.,9.]])
x.dot(y)
#equivalently
np.dot(x,y)

from numpy.linalg import inv, qr
X = rand(5,5)
mat = X.T.dot(X)
inv(mat) # inverse matrix
q,r = qr(mat) # QR factorization / orthogonaliation

# random number generation
samples = np.random.normal(size=(4,4))

# Example Random Walk
nwalks = 5000
nsteps = 1000
draws = np.random.randint(0,2,size=(nwalks,nsteps))
steps = np.where(draws>0,1,-1)
walks = steps.cumsum(1)
# get max and min of all walks
walk_max, walk_min = walks.max(), walks.min()
# compute minimum crossing time to 30 or -30
hits30 = (np.abs(walks)>=30).any(1).sum()
crossing_times = (np.abs(walks)>=30).argmax(1).mean()
コード例 #56
0
ファイル: isvd.py プロジェクト: BerenMillidge/iterative_svd
# create ground truth SVD matrix
X = np.abs(np.random.normal(0.0, 5, size=(5, 5)))
print(X)
true_U, true_S, true_V = LA.svd(X)
print("True U: ", true_U)
print("True S : ", true_S)
print("True V : ", true_V)
# initialize our Us, Vs, Ys
U = np.abs(np.random.normal(0.0, 0.001, size=(5, 5)))
V = np.abs(np.random.normal(0.0, 0.001, size=(5, 5)))
Y = np.abs(np.random.normal(0.0, 0.001, size=(5, 5)))
num_iters = 0
lr = 0.1
N_iters = 1000
# orthogonalize U and V
U, _ = LA.qr(U)
V, _ = LA.qr(V)
#print(U.T @ U) # check it is orthogonal
# compute S using Eq 8
S = U.T @ Y @ V
penalization_param = 0.001
# begin loop
for i in range(N_iters):
    U += lr * (((Y @ V) + (U @ V.T @ Y.T @ U)) @ S)  # Eq 6
    V += lr * (((Y.T @ U) + (V @ U.T @ Y @ V)) @ S)  # Eq 7
    #U = U * np.sign(S)
    S = U.T @ Y @ V
    S = np.abs(S)  # make positive
    Y += lr * ((U @ S @ V.T) - Y + (penalization_param * (X - Y)))  # Eq 10
    #U, _ = LA.qr(U)
    #V,_ = LA.qr(V)
コード例 #57
0
def FRHH(streams, rr, alpha, sci=0):
    """ Fast row-Householder Subspace Traking Algorithm, Non adaptive version 
    
    """
    #===============================================================================
    #     #Initialise variables and data structures
    #===============================================================================
    # check input is type float32

    streams = float32(streams)
    alpha = float32(alpha)

    N = streams.shape[1]  # No. of streams

    # Data Stores
    E_t = [float32(0)]  # time series of total energy
    E_dash_t = [float32(0)]  # time series of reconstructed energy
    z_dash = npm.zeros(N, dtype=float32)  # time series of reconstructed data
    RSRE = mat([float32(0)
                ])  # time series of Root squared Reconstruction Error
    hid_var = npm.zeros((streams.shape[0], N),
                        dtype=float32)  # Array of hidden Variables

    seed(111)

    # Initial Q(0) - either random or I

    # Random
    qq, RR = qr(rand(N, rr))  # generate random orthonormal matrix N x r
    Q_t = [mat(float32(qq))]  # Initialise Q_t - N x r

    # Identity
    # q_I = npm.eye(N, rr)
    # Q_t = [q_I]

    S_t = [npm.ones(
        (rr, rr), dtype=float32) * float32(0.00001)]  # Initialise S_t - r x r

    No_inp_count = 0  # count of number of times there was no input i.e. z_t = [0,...,0]
    No_inp_marker = zeros((1, streams.shape[0] + 1))

    v_vec_min_1 = npm.zeros((rr, 1), dtype=float32)

    iter_streams = iter(streams)

    for t in range(1, streams.shape[0] + 1):

        z_vec = mat(iter_streams.next())

        z_vec = z_vec.T  # Now a column Vector

        hh = Q_t[t - 1].T * z_vec  # 13a

        Z = z_vec.T * z_vec - hh.T * hh  # 13b

        # Z = float(Z) # cheak that Z is really scalar

        if Z > 0.00000000001:

            # Refined version, sci accounts better for tracked eigen values
            if sci != 0:
                u_vec = S_t[t - 1] * v_vec_min_1
                extra_term = 2 * alpha * sci * u_vec * v_vec_min_1.T
                extra_term = float32(extra_term)
            else:
                extra_term = float32(0)

            X = alpha * S_t[t - 1] + hh * hh.T - extra_term

            # QR method - hopefully more stable
            aa = X.T
            b = sqrt(Z[0, 0]) * hh

            # b_vec = solve(aa,b)
            b_vec = QRsolve(aa, b)

            b_vec = float32(b_vec)

            beta = float32(4) * (b_vec.T * b_vec + 1)

            phi_sq_t = float32(0.5) + (
                float32(1.0) / sqrt(beta)
            )  # AGGGGGGGGGGGGGGGGGHHHHHHHHHHHHHHHHHH!

            phi_t = sqrt(phi_sq_t)

            gamma = (float32(1) - float32(2) * phi_sq_t) / (float32(2) * phi_t)

            delta = phi_t / sqrt(Z)

            v_vec_t = multiply(gamma, b_vec)

            S_t.append(X - multiply(float32(1) / delta, v_vec_t * hh.T))

            w_vec = multiply(delta, hh) - v_vec_t

            e_vec = multiply(delta, z_vec) - (Q_t[t - 1] * w_vec)

            Q_t.append(Q_t[t - 1] - float32(2) * (e_vec * v_vec_t.T))

            v_vec_min_1 = v_vec_t  # update for next time step

            # Record hidden variables
            hid_var[t - 1, :hh.shape[0]] = hh.T

            # Record reconstrunted z
            new_z_dash = Q_t[t - 1] * hh
            z_dash = npm.vstack((z_dash, new_z_dash.T))

            # Record RSRE
            new_RSRE = RSRE[0, -1] + (((norm(new_z_dash - z_vec))**2) /
                                      (norm(z_vec)**2))
            RSRE = npm.vstack((RSRE, mat(new_RSRE)))

        else:

            # Record hidden variables
            hid_var[t - 1, :hh.shape[0]] = hh.T

            # Record reconstrunted z
            new_z_dash = Q_t[t - 1] * hh
            z_dash = npm.vstack((z_dash, new_z_dash.T))

            # Record RSRE
            new_RSRE = RSRE[0, -1] + (((norm(new_z_dash - z_vec))**2) /
                                      (norm(z_vec)**2))
            RSRE = npm.vstack((RSRE, mat(new_RSRE)))

            # Repeat last entries
            Q_t.append(Q_t[-1])
            S_t.append(S_t[-1])

            # increment count
            No_inp_count += 1
            No_inp_marker[t - 1] = 1

    # convert to tuples to save memory
    Q_t = tuple(Q_t)
    S_t = tuple(S_t)
    rr = array(rr)
    E_t = array(E_t)
    E_dash_t = array(E_dash_t)

    return Q_t, S_t, rr, E_t, E_dash_t, hid_var, z_dash, RSRE, No_inp_count, No_inp_marker
コード例 #58
0
 def rand(self):
     # Generate random  point using qr of random normally distributed
     # matrix.
     O, _ = la.qr(randn(self.n, self.d))
     return O
コード例 #59
0
# # Conjugate Gradient Method

# In[1]:

import numpy as np
import numpy.linalg as la
import scipy.optimize as sopt
import matplotlib.pyplot as pt

# Let's make up a random linear system with an SPD $A$:

# In[2]:

np.random.seed(25)
n = 2
Q = la.qr(np.random.randn(n, n))[0]
A = Q @ (np.diag(np.random.rand(n)) @ Q.T)

b = np.random.randn(n)

# Here's the objective function for CG:

# In[3]:


def phi(xvec):
    x, y = xvec
    return 0.5 * (A[0, 0] * x * x + 2 * A[1, 0] * x * y +
                  A[1, 1] * y * y) - x * b[0] - y * b[1]

コード例 #60
0
def chatterjeeMachlerMod(A, y, **kwargs):
    # using the weights in chaterjeeMachler means that min resids val in median(resids)
    # instead, use M estimate weights with a modified residual which includes a measure of leverage
    # for this, use residuals / (1-p)^2
    # I wonder if this will have a divide by zero bug
    from resistics.common.math import eps
    from resistics.regression.moments import getLocation, getScale
    from resistics.regression.weights import getWeights
    from resistics.regression.robust import defaultOptions, applyWeights, olsModel
    import numpy.linalg as linalg

    # now calculate p and n
    n = A.shape[0]
    p = A.shape[1]
    pnRatio = 1.0 * p / n

    # calculate the projection matrix
    q, r = linalg.qr(A)
    Pdiag = np.empty(shape=(n), dtype="float")
    for i in range(0, n):
        Pdiag[i] = np.absolute(np.sum(q[i, :] * np.conjugate(q[i, :]))).real
    del q, r
    Pdiag = Pdiag / (np.max(Pdiag) + 0.0000000001)
    locP = getLocation(Pdiag, "median")
    scaleP = getScale(Pdiag, "mad")
    # bound = locP + 6*scaleP
    bound = locP + 6 * scaleP
    indices = np.where(Pdiag > bound)
    Pdiag[indices] = 0.99999
    leverageMeas = np.power(1.0 - Pdiag, 2)

    # weights for the first iteration
    # this is purely based on the leverage
    tmp = np.ones(shape=(n), dtype="float") * pnRatio
    tmp = np.maximum(Pdiag, tmp)
    weights = np.reciprocal(tmp)

    # get options
    options = parseKeywords(defaultOptions(), kwargs, printkw=False)
    # generalPrint("S-Estimate", "Using weight function = {}".format(weightFnc))
    if options["intercept"] == True:
        # add column of ones for constant term
        A = np.hstack((np.ones(shape=(A.shape[0], 1), dtype="complex"), A))

    # iteratively weighted least squares
    iteration = 0
    while iteration < options["maxiter"]:
        # do the weighted least-squares
        Anew, ynew = applyWeights(A, y, weights)
        paramsNew, squareResidNew, rankNew, sNew = linalg.lstsq(Anew,
                                                                ynew,
                                                                rcond=None)
        residsNew = y - np.dot(A, paramsNew)
        # check residsNew to make sure not all zeros (i.e. will happen in undetermined or equally determined system)
        if np.sum(np.absolute(residsNew)) < eps():
            # then return everything here
            return paramsNew, residsNew, weights
        residsNew = residsNew / leverageMeas
        scale = getScale(residsNew, "mad0")

        # standardise and calculate weights
        residsNew = residsNew / scale
        weightsNew = getWeights(residsNew, "huber")
        # increment iteration
        iteration = iteration + 1
        weights = weightsNew
        params = paramsNew

        if iteration > 1:
            # check to see whether the change is smaller than the tolerance
            changeResids = linalg.norm(residsNew -
                                       resids) / linalg.norm(residsNew)
            if changeResids < eps():
                # update resids
                resids = residsNew
                break
        # update resids
        resids = residsNew

    # now do the same again, but with a different function
    # do the least squares solution
    params, resids, squareResid, rank, s = olsModel(A, y)
    resids = resids / leverageMeas
    resids = resids / scale
    weights = getWeights(resids, "trimmedMean")
    # iteratively weighted least squares
    iteration = 0
    while iteration < options["maxiter"]:
        # do the weighted least-squares
        Anew, ynew = applyWeights(A, y, weights)
        paramsNew, squareResidNew, rankNew, sNew = linalg.lstsq(Anew,
                                                                ynew,
                                                                rcond=None)
        residsNew = y - np.dot(A, paramsNew)
        # check residsNew to make sure not all zeros (i.e. will happen in undetermined or equally determined system)
        if np.sum(np.absolute(residsNew)) < eps():
            # then return everything here
            return paramsNew, residsNew, weights

        residsNew = residsNew / leverageMeas
        scale = getScale(residsNew, "mad0")

        # standardise and calculate weights
        residsNew = residsNew / scale
        weightsNew = getWeights(residsNew, options["weights"])
        # increment iteration
        iteration = iteration + 1
        weights = weightsNew
        params = paramsNew

        # check to see whether the change is smaller than the tolerance
        changeResids = linalg.norm(residsNew - resids) / linalg.norm(residsNew)
        if changeResids < eps():
            # update resids
            resids = residsNew
            break
        # update resids
        resids = residsNew

    # at the end, return the components
    return params, resids, weights