Example #1
0
 def fit(self, data):   # 进行svd  分解操作
     if type(data) is np.ndarray:  # 如果是数组
         data = mat(data)
     else:
         ValueError("data type must be np.ndarray or np.matrix")
     U, D, V= svd(data)
     return U, D, V
Example #2
0
def tt_svd_old(tt, A, eps=1e-9):
    d = len(A.shape)
    tt.n = A.shape
    frob_norm = frobenius_norm(A)
    delta = frob_norm * eps / math.sqrt(d - 1)
    N = A.size
    ns = np.array(A.shape)
    C = A
    tt.cores = []
    ranks = np.zeros(d + 1, dtype=np.int)
    ranks[0] = 1
    for k in xrange(d - 1):
        C = reshape(C, (ranks[k] * ns[k], N / (ranks[k] * ns[k])))
        U, s, V = svd(C, full_matrices=False)
        ranks[k + 1] = rank_chop(s, delta)
        r_new = ranks[k + 1]
        if r_new == 0:
            # Tensor becomes zero as convolution by rank 0
            tt = tt_zeros(tt.n)
        U = U[:, :ranks[k + 1]]
        tt.cores.append(reshape(U, (ranks[k], ns[k], r_new)))
        V = V[:r_new, :]
        s = s[:r_new]
        V = dot(np.diag(s), V)
        C = V
        N = N * r_new / (ns[k] * ranks[k])
        r = r_new
    tt.cores.append(C.reshape(C.shape + (1,)))
    tt.d = d
    tt.r = ranks
    tt.r[-1] = 1
Example #3
0
def get_reverse_transforms(mat1, mat2):
    # get the reverse procrustes transformation to align m2 with m1
    
    m1 = array(mat1)
    m2 = array(mat2)
    
    # centralize matrices
    mean_m1 = [mean(column) for column in m1.T]
    m1 = m1-mean_m1
    mean_m2 = [mean(column) for column in m2.T]
    m2 = m2-mean_m2
    
    t = array(mean_m2)-array(mean_m1)
    
    # scaling
    scale_m1 = sqrt(sum(square(m1))/(m1.shape[1]))
    scale_m2 = sqrt(sum(square(m2))/(m2.shape[1]))
    m2 = (m2/scale_m2)*scale_m1
    
    sc = scale_m2/scale_m1
    
    # rotation
    u, s, v = svd(dot(m1.T, m2))
    r = inv(dot(v.T, u.T))
    
    return sc, r, mean_m1, mean_m2
Example #4
0
def get_reverse_transforms(mat1, mat2):
    # get the reverse procrustes transformation to align m2 with m1

    m1 = array(mat1)
    m2 = array(mat2)

    # centralize matrices
    mean_m1 = [mean(column) for column in m1.T]
    m1 = m1 - mean_m1
    mean_m2 = [mean(column) for column in m2.T]
    m2 = m2 - mean_m2

    t = array(mean_m2) - array(mean_m1)

    # scaling
    scale_m1 = sqrt(sum(square(m1)) / (m1.shape[1]))
    scale_m2 = sqrt(sum(square(m2)) / (m2.shape[1]))
    m2 = (m2 / scale_m2) * scale_m1

    sc = scale_m2 / scale_m1

    # rotation
    u, s, v = svd(dot(m1.T, m2))
    r = inv(dot(v.T, u.T))

    return sc, r, mean_m1, mean_m2
Example #5
0
  def learn(self, data, numsteps, visualize=False):
    if type(data) is not type([]): # learn on a single sequence
      numdims, numpoints = data.shape
      assert numdims == self.numdims
      logXi = zeros((numpoints-1,self.numstates,self.numstates),
          dtype=float)
      lastlogprob = -inf
      for iteration in range(numsteps):
        print "EM iteration: %d" % iteration

        # E-step
        logAlpha, logBeta = self.alphabeta(data)
        # compute xi and gamma
        for t in range(numpoints-1):
          for i in range(self.numstates):
            for j in range(self.numstates):
              logXi[t,i,j] = logAlpha[i,t] +\
                  self.logTransitionProbs[i,j] +\
                  self._loggaussian(data[:,t+1],j) +\
                  logBeta[j,t+1]
          logXi[t,:,:] -= logsumexp(logXi[t,:,:].flatten())
        logGamma = vstack((logsumexp(logXi, 2),
                           logsumexp(logXi[-1,:,:], 1)))
        logprob = logsumexp(logAlpha[:,-1])

        print "logprob = %f" % logprob
        if abs(logprob - lastlogprob) <= 10**-6:
          print "converged"
          break
        lastlogprob = logprob

        # M-step
        self.logInitProbs[:] = logGamma[0,:]
        self.logTransitionProbs[:] = logsumexp(logXi, 0) - \
            logsumexp(logGamma[:-1,:],0)[:,newaxis]
        G = exp(logGamma - logsumexp(logGamma, 0)[newaxis,:])
        for k in range(self.numstates):
          self.means[:,k] = sum(G[:,k][newaxis,:]*data,1)
          data_m = data - self.means[:,k][:,newaxis]
          self.covs[:,:,k] = dot((data_m*G[:,k][newaxis,:]), data_m.T)

        # threshold eigenvalues
        for k in range(self.numstates):
          U, D, Vt = svd(self.covs[:,:,k])
          D[D<0.01] = 0.01
          self.covs[:,:,k] = dot(U, dot(diag(D), Vt))

        # vsiualize:
        if visualize and self.numdims == 2:
          cla()
          scatter(data[0,:], data[1,:])
          for k in range(self.numstates):
            plotGaussian(self.means[:,k], self.covs[:,:,k])
 def __init__(self, distribution, num_eigen=2, \
              mean_est=array([-2.0, -2.0]), cov_est=0.05 * eye(2), \
              sample_discard=500, sample_lag=10, accstar=0.234):
     AdaptiveMetropolis.__init__(self, distribution=distribution, \
                                  mean_est=mean_est, cov_est=cov_est, \
                                  sample_discard=sample_discard, sample_lag=sample_lag, accstar=accstar)
     assert (num_eigen <= distribution.dimension)
     self.num_eigen = num_eigen
     self.dwscale = self.globalscale * ones([self.num_eigen])
     u, s, _ = svd(self.cov_est)
     self.eigvalues = s[0:self.num_eigen]
     self.eigvectors = u[:, 0:self.num_eigen]
Example #7
0
 def __init__(self, distribution, kernel, Z, nu2=0.1, gamma=0.1, num_eigen=10):
     Kameleon.__init__(self, distribution, kernel, Z, nu2, gamma)
     self.num_eigen = num_eigen
     if Z is None:
         self.Kc = None
         self.eigvalues = None
         self.eigvectors = None
     else:
         K = self.kernel.kernel(Z)
         H = Kernel.centring_matrix(len(self.Z))
         self.Kc = H.dot(K.dot(H))
         u, s, _ = svd(self.Kc)
         self.eigvalues = s[0 : self.num_eigen]
         self.eigvectors = u[:, 0 : self.num_eigen]
Example #8
0
def triangulate(f1, f2, m1, m2):
    u1 = f1.x
    v1 = f1.y
    u2 = f2.x
    v2 = f2.y

    Q = np.array([
        u1 * m1[2] - m1[0], v1 * m1[2] - m1[1], u2 * m2[2] - m2[0],
        v2 * m2[2] - m2[1]
    ])

    U, E, V = svd(Q)
    V /= V[-1:, -1:]

    return V[3, :]
Example #9
0
def sqrt_ginv(a, rcond=1e-15):
    a, wrap = _makearray(a)
    rcond = asarray(rcond)
    if _isEmpty2d(a):
        m, n = a.shape[-2:]
        res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
        return wrap(res)
    a = a.conjugate()
    u, s, vt = svd(a, full_matrices=False)
    # discard small singular values
    cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
    large = s > cutoff
    s = divide(1, s**0.5, where=large, out=s)
    s[~large] = 0
    res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
    return wrap(res)
Example #10
0
def cpt_Moore_Penrose(L):
    U, S, V = linalg.svd(L)  # 求出的V实际是(共轭转置,实数相当于直接转置)转置后的结果!
    num = len(S)  # 非零奇异值的个数,肯定不大于min_num
    num_U = len(U)
    num_V = len(V)
    min_num = min(num_U, num_V)

    S_matrix = np.zeros((num_U, num_V))  # 构造二维S矩阵
    S_plus = np.zeros((num_U, num_V))
    for i in range(num):
        S_matrix[i][i] = S[i]
        S_plus[i][i] = 1.0 / S[i]

    L_plus = np.dot(V.T, S_plus.T).dot(U.T)  # weki
    print('cpt_Moore_Penrose to compute u_ij,done!')
    return L_plus
Example #11
0
def svdFrog(ampFrog):
	'''
	Extracts the pulse and gate as functions of time from the FROG
	with the singular value decomposition method
	'''

	E =	fftpack.fftshift( fftpack.fft( fftpack.ifftshift(ampFrog, (0,)), axis=0 ) )
	nbPoints = shape(E)[0]

	for i in arange(1,nbPoints):
		E[i,:] = roll(E[i,:], i)

	[U, S, V] = la.svd(E)

	pulseField = U[:,0]
	gateFunction = conj(V[0,:])

	return [pulseField, gateFunction]
Example #12
0
def svdFrog(ampFrog):
    '''
	Extracts the pulse and gate as functions of time from the FROG
	with the singular value decomposition method
	'''

    E = fftpack.fftshift(fftpack.fft(fftpack.ifftshift(ampFrog, (0, )),
                                     axis=0))
    nbPoints = shape(E)[0]

    for i in arange(1, nbPoints):
        E[i, :] = roll(E[i, :], i)

    [U, S, V] = la.svd(E)

    pulseField = U[:, 0]
    gateFunction = conj(V[0, :])

    return [pulseField, gateFunction]
Example #13
0
    def testDecomposition(self):
        Wba = [[-1. / 3, 0], [0, -1. / 3]]
        Wbx = [[0, 0, 2. / 3, 2. / 3], [2. / 3, 2. / 3, 0, 0]]
        Wxx = [[0, 0, 2. / 3, -1. / 3], [0, 0, -1. / 3, 2. / 3],
               [2. / 3, -1. / 3, 0, 0], [-1. / 3, 2. / 3, 0, 0]]
        Wxa = [[2. / 3, 0], [2. / 3, 0], [0, 2. / 3], [0, 2. / 3]]
        I = identity(4).tolist()

        S = (matrix(I) - matrix(Wxx)).tolist()

        U, s, VH = svd(S)

        U = U.tolist()
        s = s.tolist()
        VH = VH.tolist()

        res = (matrix(U) * matrix(diag(s)) * matrix(VH)).tolist()
        difference = linalg.norm(matrix(res) - matrix(S))
        self.assertTrue(difference < 1e-10, 'svd exapansion incorrect')
Example #14
0
 def __init__(self,
              distribution,
              kernel,
              Z,
              nu2=0.1,
              gamma=0.1,
              num_eigen=10):
     Kameleon.__init__(self, distribution, kernel, Z, nu2, gamma)
     self.num_eigen = num_eigen
     if Z is None:
         self.Kc = None
         self.eigvalues = None
         self.eigvectors = None
     else:
         K = self.kernel.kernel(Z)
         H = Kernel.centring_matrix(len(self.Z))
         self.Kc = H.dot(K.dot(H))
         u, s, _ = svd(self.Kc)
         self.eigvalues = s[0:self.num_eigen]
         self.eigvectors = u[:, 0:self.num_eigen]
Example #15
0
def procrustes(m1, m2):

    # centralize matrices
    mean_m1 = [mean(column) for column in m1.T]
    m1 = m1-mean_m1
    mean_m2 = [mean(column) for column in m2.T]
    m2 = m2-mean_m2
 
    # scaling
    scale_m1 = sqrt(sum(square(m1))/(m1.shape[1]))
    scale_m2 = sqrt(sum(square(m2))/(m2.shape[1]))
    m2 = (m2/scale_m2)*scale_m1
        
    # rotation
    u, s, v = svd(dot(m1.T, m2))
    r = dot(v.T, u.T)
    m2 = dot(m2, r)
    
    # set m2 center to m1 center
    m2 = m2+mean_m1
    
    return m2
Example #16
0
 def optimize_vector(self, i_vec):
     """
     Optimizes torsion and bond angles by rotation around
     one atom i.
     1. moves coordinate origin to that atom.
     2. generate a rotation matrix from a singular value decompositon
     3. rotate all atoms after i.
     """
     center = self.moving[i_vec]
     moving_coords = self.get_moving_coords(center)
     fixed_coords = self.get_fixed_coords(center)
     # Do singular value decomposition
     a = dot(fixed_coords, transpose(moving_coords))
     u, d, vt = linalg.svd(a)
     # Check reflection
     if (linalg.det(u) * linalg.det(vt)) < 0:
         u = dot(u, S)
     # Calculate rotation
     rot_matrix = dot(u, vt)
     # Apply rotation
     if self.accept_rotation():
         self.apply_rotation(rot_matrix, i_vec, center)
Example #17
0
def procrustes(m1, m2):

    # centralize matrices
    mean_m1 = [mean(column) for column in m1.T]
    m1 = m1 - mean_m1
    mean_m2 = [mean(column) for column in m2.T]
    m2 = m2 - mean_m2

    # scaling
    scale_m1 = sqrt(sum(square(m1)) / (m1.shape[1]))
    scale_m2 = sqrt(sum(square(m2)) / (m2.shape[1]))
    m2 = (m2 / scale_m2) * scale_m1

    # rotation
    u, s, v = svd(dot(m1.T, m2))
    r = dot(v.T, u.T)
    m2 = dot(m2, r)

    # set m2 center to m1 center
    m2 = m2 + mean_m1

    return m2
Example #18
0
 def optimize_vector(self, i_vec):
     """
     Optimizes torsion and bond angles by rotation around
     one atom i.
     1. moves coordinate origin to that atom.
     2. generate a rotation matrix from a singular value decompositon
     3. rotate all atoms after i.
     """
     center = self.moving[i_vec]
     moving_coords = self.get_moving_coords(center)
     fixed_coords = self.get_fixed_coords(center)
     # Do singular value decomposition
     a = dot(fixed_coords, transpose(moving_coords))
     u, d, vt = linalg.svd(a)
     # Check reflection
     if (linalg.det(u) * linalg.det(vt))<0:
         u = dot(u, S)
     # Calculate rotation
     rot_matrix = dot(u, vt)
     # Apply rotation
     if self.accept_rotation():
         self.apply_rotation(rot_matrix, i_vec, center)
Example #19
0
def cseparate(x,
              M=None,
              N=4096,
              H=1024,
              W=4096,
              max_iter=200,
              pre_emphasis=True,
              magnitude_only=False,
              svd_only=False,
              transpose_spectrum=False):
    """
	complex-valued frequency domain separation by independent components
	using relative phase representation
	
	inputs:
	  x - the audio signal to separate (1 row)
	  M - the number of sources to extract
	options:
	  N - fft length in samples [4096]
	  H - hop size in samples   [1024]
	  W - window length in samples (fft padded with N-W zeros) [4096]
	  max_iter - maximum JADE ICA iterations [200]
	  pre_emphasis - apply an exponential spectral pre-emphasis filter [False]
	  magnitude_only - whether to use magnitude-only spectrum (real-valued factorization)
	  svd_only - whether to use SVD instead of JADE
	  transpose_spectrum - whether to transpose the spectrum prior to factorization
	output:
	  xhat - the separated signals (M rows)
	  xhat_all - the M separated signals mixed (1 row)
	
	Copyright (C) 2014 Michael A. Casey, Bregman Media Labs, 
	Dartmouth College All Rights Reserved
	"""
    def pre_func(x, a, b, c):
        return a * np.exp(-b * x) + c

    M = 20 if M is None else M

    phs_rec = lambda rp, dp: (np.angle(rp) + np.tile(
        np.atleast_2d(dp).T, rp.shape[1])).cumsum(1)

    F = LinearFrequencySpectrum(x, nfft=N, wfft=W, nhop=H)
    U = F._phase_map()
    XX = np.absolute(F.STFT)
    if pre_emphasis:
        xx = np.arange(F.X.shape[0])
        yy = XX.mean(1)
        popt, pcov = curve_fit(pre_func, xx, yy)
        XX = (XX.T * (1 / pre_func(xx, *popt))).T
#		w = np.r_[np.ones(64), .05*xx[64:]]
#		XX = (XX.T * w).T
    if magnitude_only:
        X = XX
    else:
        X = XX * np.exp(1j * np.array(F.dPhi))  # Relative phase STFT

    if transpose_spectrum:
        X = X.T

    if svd_only:
        u, s, v = svd(X.T)
        A = np.dot(u[:, :M], np.diag(s)[:M, :M])
        S = v[:M, :]  # v = V.H in np.linalg.svd
        AS = np.dot(A,
                    S).T  # Non Hermitian transpose avoids complex conjugation
    else:
        A, S = cjade(X.T, M, max_iter)  # complex-domain JADE by J. F. Cardoso
        AS = np.array(
            A * S).T  # Non Hermitian transpose avoids complex conjugation

    if transpose_spectrum:
        AS = AS.T
    X_hat = np.absolute(AS)

    if pre_emphasis:
        #X_hat = (XX.T / (w)).T
        X_hat = (XX.T * pre_func(xx, *popt)).T
    Phi_hat = phs_rec(AS, F.dphi)
    x_hat_all = F.inverse(X_hat=X_hat, Phi_hat=Phi_hat, usewin=True)

    x_hat = []
    for k in np.arange(M):
        if svd_only:
            AS = np.dot(A[:, k][:, np.newaxis], S[k, :][np.newaxis, :]).T
        else:
            AS = np.array(A[:, k] * S[k, :]).T
        if transpose_spectrum:
            AS = AS.T
        X_hat = np.absolute(AS)
        if pre_emphasis:
            #X_hat = (XX.T / (w)).T
            X_hat = (XX.T * pre_func(xx, *popt)).T
        Phi_hat = phs_rec(AS, F.dphi)
        x_hat.append(F.inverse(X_hat=X_hat, Phi_hat=Phi_hat, usewin=True))

    return x_hat, x_hat_all
Example #20
0
def cseparate(x, M=None, N=4096, H =1024, W=4096, max_iter=200, pre_emphasis=True, magnitude_only=False, svd_only=False, transpose_spectrum=False):
	"""
	complex-valued frequency domain separation by independent components
	using relative phase representation
	
	inputs:
	  x - the audio signal to separate (1 row)
	  M - the number of sources to extract
	options:
	  N - fft length in samples [4096]
	  H - hop size in samples   [1024]
	  W - window length in samples (fft padded with N-W zeros) [4096]
	  max_iter - maximum JADE ICA iterations [200]
	  pre_emphasis - apply an exponential spectral pre-emphasis filter [False]
	  magnitude_only - whether to use magnitude-only spectrum (real-valued factorization)
	  svd_only - whether to use SVD instead of JADE
	  transpose_spectrum - whether to transpose the spectrum prior to factorization
	output:
	  xhat - the separated signals (M rows)
	  xhat_all - the M separated signals mixed (1 row)
	
	Copyright (C) 2014 Michael A. Casey, Bregman Media Labs, 
	Dartmouth College All Rights Reserved
	"""
	def pre_func(x, a, b, c):
		return a * np.exp(-b * x) + c

	M = 20 if M is None else M

	phs_rec = lambda rp,dp: (np.angle(rp)+np.tile(np.atleast_2d(dp).T,rp.shape[1])).cumsum(1)

	F = LinearFrequencySpectrum(x, nfft=N, wfft=W, nhop=H)
	U = F._phase_map()    
	XX = np.absolute(F.STFT)
	if pre_emphasis:
		xx = np.arange(F.X.shape[0])
		yy = XX.mean(1)
		popt, pcov = curve_fit(pre_func, xx, yy)
		XX = (XX.T * (1/pre_func(xx,*popt))).T
#		w = np.r_[np.ones(64), .05*xx[64:]]
#		XX = (XX.T * w).T
	if magnitude_only:
		X = XX
	else:
		X = XX * np.exp(1j * np.array(F.dPhi)) # Relative phase STFT

	if transpose_spectrum:
		X = X.T

	if svd_only:
		u,s,v = svd(X.T)
		A = np.dot(u[:,:M], np.diag(s)[:M,:M])
		S = v[:M,:] # v = V.H in np.linalg.svd
		AS = np.dot(A,S).T # Non Hermitian transpose avoids complex conjugation
	else:
		A,S = cjade(X.T, M, max_iter) # complex-domain JADE by J. F. Cardoso
		AS = np.array(A*S).T # Non Hermitian transpose avoids complex conjugation

	if transpose_spectrum:
		AS = AS.T
	X_hat = np.absolute(AS)

	if pre_emphasis:
		#X_hat = (XX.T / (w)).T
		X_hat = (XX.T * pre_func(xx,*popt)).T
	Phi_hat = phs_rec(AS, F.dphi)
	x_hat_all = F.inverse(X_hat=X_hat, Phi_hat=Phi_hat, usewin=True)
	
	x_hat = []
	for k in np.arange(M):
		if svd_only:
			AS = np.dot(A[:,k][:,np.newaxis],S[k,:][np.newaxis,:]).T
		else:
			AS = np.array(A[:,k]*S[k,:]).T
		if transpose_spectrum:
			AS = AS.T
		X_hat = np.absolute(AS)
		if pre_emphasis:
			#X_hat = (XX.T / (w)).T
			X_hat = (XX.T * pre_func(xx,*popt)).T
		Phi_hat = phs_rec(AS, F.dphi)
		x_hat.append(F.inverse(X_hat=X_hat, Phi_hat=Phi_hat, usewin=True))

	return x_hat, x_hat_all
Example #21
0
    def Dagger(self, A, Left=None, Right=None, Mul=False):
        """
        Special computation of \f$\mathbf{A}^\dagger\f$ where
        \f$\mathbf{L}\cdot\mathbf{A}^\dagger\cdot\mathbf{R}\f$ needs to be computed\n
        @param A matrix \f$\mathbf{A}\f$ to be inverted
        @param Left optional matrix that appears to the left of \f$\mathbf{A}^\dagger\f$
        @param Right optional matrix that appears to the right of \f$\mathbf{A}^\dagger\f$
        @param Mul (optional) whether to provide the result \f$\mathbf{L}\cdot\mathbf{A}^\dagger\cdot\mathbf{R}\f$.
        Otherwise, by default, \$A^\dagger\f$ is returned.
        @return matrix \f$\mathbf{A}^\dagger\f$
        @throw LinAlgError if matrix cannot be inverted
        @remark All matrices supplied can be either list of list or numpy matrix, but
        the return type is always a numpy matrix
        @details if trySVD if False and alwaysUseSVD is False, then the Left and Right
        arguments are ignored and an attempt is made at calculating the Moore-Penrose
        pseudo-inverse of \f$\mathbf{A}\f$.  if the condition number of the resulting inverse is less
        than the conditionNumberLimit, then this method fails.\n
        If alwaysUseSVD is True or there is a failure and trySVD is True, then the svd
        is used.  The svd is not _better_ than the pseudo-inverse, per se, but it is able
        to make use of the left and right matrices.\n
        Many of the problems are of the form \f$\mathbf{L}\cdot\mathbf{A}^{-1}\cdot\mathbf{R}\f$.
        In many cases, the matrices \f$\mathbf{L}\f$
        and/or \f$\mathbf{R}\f$ are such that the all of the elements of the inverse of
        \f$\mathbf{A}\f$ are not used.
        Think of it as we only want to find certain elements of the inverse.  Situations like
        this arise, for example, when we have two wires in parallel connected to two circuit
        nodes.  We are not able to calculate the current through each of the wires, but we
        are able to calculate the current into and out of the parallel combination.  Another
        example is a circuit with no ground reference provided and we are calculating the
        differential voltage across an element in the circuit.  In cases like this, it is
        not possible to calculate the values of each circuit node, yet the answer exists and
        can be found.\n
        Using the svd, \f$\mathbf{A}\f$ is decomposed into \f$\mathbf{U}\cdot diag\left(\sigma\right)\cdot\mathbf{V}^H\f$
        where if \f$\mathbf{U}\f$ is \f$R\times C\f$, \f$diag\left(\sigma\right)\f$
        is \f$C\times C\f$ and \f$\mathbf{V}\f$ is \f$C\times C\f$.  The inverse of \f$\mathbf{A}\f$
        can be written as \f$\mathbf{V}\cdot diag\left(\sigma\right)^{-1}\cdot\mathbf{U}^H\f$.\n
        Here, we multiply the matrix \f$\mathbf{L}\cdot\mathbf{V}\f$ and the matrix \f$\mathbf{U}^H\cdot\mathbf{R}\f$.
        Then, if a column \f$rc\f$ of \f$\mathbf{L}\cdot\mathbf{V}\f$ is
        all zeros or a row \f$rc\f$ of \f$\mathbf{U}^H\cdot\mathbf{R}\f$ is zero,
        we know that the singular value \f$\sigma\left[rc\right]\f$ is
        not used and is irrelevant - we set it to one so that it can't harm us and return
        the inverse.
        @see trySVD
        @see alwaysUseSVD
        @see conditionNumberLimit
        @see singularValueLimit
        @throw LinAlgError if anything fails.
        """
        from numpy import linalg, array, diag, ndarray
        from numpy.linalg.linalg import LinAlgError, svd
        if A is None: return None
        if isinstance(A, list): A = array(A)
        if not self.alwaysUseSVD:
            try:
                # without this check, there is a gray zone where the matrix is really uninvertible
                # yet, produces total garbage without raising the exception.
                if 1.0 / linalg.cond(A) < self.conditionNumberLimit:
                    raise LinAlgError

                Adagger = linalg.inv(A)

                if Mul:
                    if Left is None: Left = array(1.)
                    elif isinstance(Left, list):
                        Left = array(Left)
                        if Left.shape == (1, 1):
                            Left = array(Left[0, 0])
                    if Right is None: Right = array(1.)
                    elif isinstance(Right, list):
                        Right = array(Right)
                        if Right.shape == (1, 1):
                            Right = array(Right[0, 0])
                    return Left.dot(Adagger).dot(Right)
                else:
                    return Adagger
            except:
                # the regular matrix inverse failed
                pass  # will get another try at it

        if self.trySVD:
            try:
                U, sigma, VH = svd(A, full_matrices=False)
                sigma = sigma.tolist()
                if Left is None: Left = array(1.)
                elif isinstance(Left, (list, ndarray)):
                    Left = array(Left)
                    if Left.shape == (1, 1):
                        Left = array(Left[0, 0])
                if Right is None: Right = array(1.)
                elif isinstance(Right, (list, ndarray)):
                    Right = array(Right)
                    if Right.shape == (1, 1):
                        Right = array(Right[0, 0])
                V = VH.conj().T
                lv = Left.dot(V)
                UH = U.conj().T
                uhr = UH.dot(Right)
                # assume that the singular value is unused according to left matrix
                sl = [False] * len(sigma)
                # if there is any element in column c that is nonzero
                # then the singular value is used
                for r in range(len(lv)):
                    for c in range(len(lv[0])):
                        if abs(lv[r][c]) > self.singularValueLimit:
                            sl[c] = True
                # assume that the singular value is unused according to the right matrix
                sr = [False] * len(sigma)
                # if there is any element in column c that is nonzero
                # then the singular value is used
                for r in range(len(uhr)):
                    for c in range(len(uhr[0])):
                        if abs(uhr[r][c]) > self.singularValueLimit:
                            sr[r] = True
                sUsed = [l and r for l, r in zip(sl, sr)]
                for u, s in zip(sUsed, sigma):
                    if u and (s < self.singularValueLimit):
                        raise LinAlgError
                sigmaInv = [
                    1. / self.singularValueLimit if
                    (not sUsed[i] and s < self.singularValueLimit) else 1. /
                    sigma[i] for i in range(len(sigma))
                ]
                if Mul:
                    return lv.dot(diag(sigmaInv)).dot(uhr)
                else:
                    return V.dot(diag(sigmaInv)).dot(UH)
            except:
                raise LinAlgError
        else:
            raise LinAlgError
Example #22
0
def pinv(M):
    U, D, Vt = svd(M)
    wellConditioned = D > 0.000000001
    return dot(U[:, wellConditioned],
               dot(diag(D[wellConditioned]**-1.0), Vt[wellConditioned, :]))
Example #23
0
def absdet(M):
    U,D,Vt = svd(M)
    wellConditioned = D>0.000000001
    return prod(D[wellConditioned])
Example #24
0
 def svd_creator(self):
     from numpy.linalg.linalg import svd
     self.U, self.S, self.Vt = svd(self.matrix)
    X_mean = np.mean(X, axis=0)
    X_std = X - X_mean
    return X_std, X_mean


'''initial_data'''
digit = 0
num = 5000

X = load_data(digit, num)
X_std, X_mean = zeroMean(X)
'''plot mean picture'''
fig_name = '0_mean'
display(X_mean, fig_name)
'''SVD decompositon'''
U, S, V = linalg.svd(X_std)
#eigvals=np.diag(S)
print(S.shape)
'''plot eigvals-dimensions'''
x = np.arange(100)
y = S[x]
plt.figure(figsize=(8, 5))
plt.plot(x, y, 'or')
plt.xlabel('dimension')
plt.ylabel('egivalues')
plt.savefig('eigenvalues_dimensions.png')
plt.show()
'''plot the first 20 eigenvectors'''
for i in xrange(20):

    fig_name = 'eigenvector_' + str(i + 1) + '.png'
Example #26
0
 def svd_creator(self):
     from numpy.linalg.linalg import svd
     self.U, self.S, self.Vt = svd(self.matrix)
Example #27
0
 def calc(self):
     self.U, self.S, self.Vt = svd(self.A)
Example #28
0
 def calc(self):
     self.U, self.S, self.Vt = svd(self.A)
Example #29
0
    def learn(self, data, numsteps, visualize=False):
        if type(data) is not type([]):  #learn on a single sequence
            numdims, numpoints = data.shape
            assert numdims == self.numdims
            logXi = zeros((numpoints - 1, self.numstates, self.numstates),
                          dtype=float)

            lastlogprob = -inf
            for iteration in range(numsteps):
                print "EM iteration: %d" % iteration

                #E-step:
                logAlpha, logBeta = self.alphabeta(data)
                #compute xi and gamma:
                for t in range(numpoints - 1):
                    for i in range(self.numstates):
                        for j in range(self.numstates):
                            logXi[t, i, j] = logAlpha[i, t]+\
                                           self.logTransitionProbs[i, j]+\
                                           self._loggaussian(data[:, t+1], j)+\
                                           logBeta[j, t+1]
                    logXi[t, :, :] -= logsumexp(logXi[t, :, :].flatten())

                logGamma = vstack(
                    (logsumexp(logXi, 2), logsumexp(logXi[-1, :, :], 1)))

                logprob = logsumexp(logAlpha[:, -1])

                print "logprob = %f" % logprob

                if abs(logprob - lastlogprob) <= 10**-6:
                    print "converged"
                    break

                lastlogprob = logprob

                #M-step:
                self.logInitProbs[:] = logGamma[0, :]
                self.logTransitionProbs[:] = logsumexp(logXi, 0) - \
                                      logsumexp(logGamma[:-1,:], 0)[:, newaxis]
                G = exp(logGamma - logsumexp(logGamma, 0)[newaxis, :])
                for k in range(self.numstates):
                    self.means[:, k] = sum(G[:, k][newaxis, :] * data, 1)
                    data_m = data - self.means[:, k][:, newaxis]
                    self.covs[:, :, k] = dot((data_m * G[:, k][newaxis, :]),
                                             data_m.T)

                #threshold eigenvalues:
                for k in range(self.numstates):
                    U, D, Vt = svd(self.covs[:, :, k])
                    D[D < 0.01] = 0.01
                    self.covs[:, :, k] = dot(U, dot(diag(D), Vt))

                #visualize:
                if visualize and self.numdims == 2:
                    data = concatenate(data)
                    cla()
                    scatter(*data)
                    for k in range(self.numstates):
                        plotGaussian(self.means[:, k], self.covs[:, :, k])

        else:  #got a list -- learn on multiple sequences
            numseqs = len(data)
            lastaverageLogprob = -inf
            logAlphas = [None] * numseqs
            logBetas = [None] * numseqs
            logXis = [None] * numseqs
            logGammas = [None] * numseqs
            logprobs = [None] * numseqs
            dataarray = concatenate(data)
            for iteration in range(numsteps):
                print "EM iteration: %d" % iteration
                #E-step:
                for seqindex, d in enumerate(data):
                    numdims, numpoints = d.shape
                    logAlphas[seqindex], logBetas[seqindex] = self.alphabeta(d)
                    #compute xi and gamma:
                    assert numdims == self.numdims
                    logXis[seqindex] = zeros(
                        (numpoints - 1, self.numstates, self.numstates),
                        dtype=float)
                    for t in range(numpoints - 1):
                        for i in range(self.numstates):
                            for j in range(self.numstates):
                                logXis[seqindex][t, i, j] = \
                                              logAlphas[seqindex][i, t]+\
                                              self.logTransitionProbs[i, j]+\
                                              self._loggaussian(d[:, t+1], j)+\
                                              logBetas[seqindex][j, t+1]
                        logXis[seqindex][t, :, :] -= logsumexp(
                            logXis[seqindex][t, :, :].flatten())

                    logGammas[seqindex] = vstack(
                        (logsumexp(logXis[seqindex],
                                   2), logsumexp(logXis[seqindex][-1, :, :],
                                                 1)))
                    logprobs[seqindex] = logsumexp(logAlphas[seqindex][:, -1])

                averageLogprob = mean(logprobs)
                print "logprob = %f" % averageLogprob
                if abs(averageLogprob - lastaverageLogprob) <= 10**-6:
                    print "converged"
                    break
                lastaverageLogprob = averageLogprob

                #M-step:
                logInitProbs = []
                logTransitionProbs = []
                oldmeans = self.means.copy()
                self.logInitProbs = \
                            logsumexp(array([l[0,:] for l in logGammas]),0)\
                                                          -log(double(numseqs))
                logXisArray = concatenate(logXis, 0)
                logGammasArray_ = \
                              concatenate(map(lambda x: x[:-1,:],logGammas), 0)
                self.logTransitionProbs = logsumexp(logXisArray, 0) \
                                    - logsumexp(logGammasArray_, 0)[:, newaxis]
                dataarray = concatenate(data, 1)
                logGammasArray = concatenate(logGammas, 0)
                G = exp(logGammasArray -
                        logsumexp(logGammasArray, 0)[newaxis, :])
                for k in range(self.numstates):
                    self.means[:, k] = sum(
                        exp(logGammasArray[:, k] -
                            logsumexp(logGammasArray[:, k], 0))[:, newaxis] *
                        dataarray.T, 0)
                    data_m = dataarray - oldmeans[:, k][:, newaxis]
                    self.covs[:, :, k] = dot((data_m * G[:, k][newaxis, :]),
                                             data_m.T)

                #threshold eigenvalues:
                for k in range(self.numstates):
                    U, D, Vt = svd(self.covs[:, :, k])
                    D[D < 0.01] = 0.01
                    self.covs[:, :, k] = dot(U, dot(diag(D), Vt))

                #visualize:
                if visualize and self.numdims == 2:
                    cla()
                    scatter(*dataarray)
                    for k in range(self.numstates):
                        plotGaussian(self.means[:, k], self.covs[:, :, k])
Example #30
0
    def learn(self, data, numsteps, visualize=False):
        if type(data) is not type([]): #learn on a single sequence
            numdims, numpoints = data.shape
            assert numdims == self.numdims
            logXi = zeros((numpoints-1,self.numstates,self.numstates),
                                                                   dtype=float)
            
            lastlogprob = -inf
            for iteration in range(numsteps):
                print "EM iteration: %d" % iteration

                #E-step:
                logAlpha, logBeta = self.alphabeta(data)
                #compute xi and gamma:
                for t in range(numpoints-1):
                    for i in range(self.numstates):
                        for j in range(self.numstates):
                            logXi[t, i, j] = logAlpha[i, t]+\
                                           self.logTransitionProbs[i, j]+\
                                           self._loggaussian(data[:, t+1], j)+\
                                           logBeta[j, t+1]
                    logXi[t, :, :] -= logsumexp(logXi[t, :, :].flatten())

                logGamma = vstack( (logsumexp(logXi, 2), 
                                    logsumexp(logXi[-1,:,:], 1)) )

                logprob = logsumexp(logAlpha[:, -1])
                
                print "logprob = %f" % logprob

                if abs(logprob-lastlogprob)<=10**-6:
                    print "converged" 
                    break

                lastlogprob = logprob

                #M-step:
                self.logInitProbs[:] = logGamma[0, :]
                self.logTransitionProbs[:] = logsumexp(logXi, 0) - \
                                      logsumexp(logGamma[:-1,:], 0)[:, newaxis]
                G = exp(logGamma - logsumexp(logGamma, 0)[newaxis,:])
                for k in range(self.numstates):
                    self.means[:, k] = sum(G[:, k][newaxis,:]*data, 1)
                    data_m = data-self.means[:, k][:,newaxis]
                    self.covs[:, :, k] = dot((data_m*G[:, k][newaxis,:]),
                                              data_m.T) 

                #threshold eigenvalues:
                for k in range(self.numstates):
                    U, D, Vt = svd(self.covs[:, :, k])
                    D[D<0.01] = 0.01
                    self.covs[:, :, k] = dot(U, dot(diag(D), Vt))
        
                #visualize: 
                if visualize and self.numdims == 2:
                    data = concatenate(data)
                    cla()
                    scatter(*data)
                    for k in range(self.numstates):
                        plotGaussian(self.means[:,k], self.covs[:, :, k])

        else: #got a list -- learn on multiple sequences
            numseqs = len(data)
            lastaverageLogprob = -inf
            logAlphas = [None] * numseqs
            logBetas = [None] * numseqs
            logXis = [None] * numseqs
            logGammas = [None] * numseqs
            logprobs = [None] * numseqs
            dataarray = concatenate(data)
            for iteration in range(numsteps):
                print "EM iteration: %d" % iteration
                #E-step:
                for seqindex, d in enumerate(data):
                    numdims, numpoints = d.shape
                    logAlphas[seqindex], logBetas[seqindex] = self.alphabeta(d)
                    #compute xi and gamma:
                    assert numdims == self.numdims
                    logXis[seqindex] = zeros(
                      (numpoints-1,self.numstates,self.numstates), dtype=float)
                    for t in range(numpoints-1):
                        for i in range(self.numstates):
                            for j in range(self.numstates):
                                logXis[seqindex][t, i, j] = \
                                              logAlphas[seqindex][i, t]+\
                                              self.logTransitionProbs[i, j]+\
                                              self._loggaussian(d[:, t+1], j)+\
                                              logBetas[seqindex][j, t+1]
                        logXis[seqindex][t, :, :] -= logsumexp(
                                           logXis[seqindex][t, :, :].flatten())

                    logGammas[seqindex] = vstack(
                                            (logsumexp(logXis[seqindex], 2), 
                                      logsumexp(logXis[seqindex][-1,:,:], 1)) )
                    logprobs[seqindex] = logsumexp(logAlphas[seqindex][:, -1])

                averageLogprob = mean(logprobs)
                print "logprob = %f" % averageLogprob
                if abs(averageLogprob-lastaverageLogprob)<=10**-6:
                    print "converged" 
                    break
                lastaverageLogprob = averageLogprob

                #M-step:
                logInitProbs = []
                logTransitionProbs = []
                oldmeans = self.means.copy()
                self.logInitProbs = \
                            logsumexp(array([l[0,:] for l in logGammas]),0)\
                                                          -log(double(numseqs))
                logXisArray = concatenate(logXis, 0)
                logGammasArray_ = \
                              concatenate(map(lambda x: x[:-1,:],logGammas), 0)
                self.logTransitionProbs = logsumexp(logXisArray, 0) \
                                    - logsumexp(logGammasArray_, 0)[:, newaxis]
                dataarray = concatenate(data, 1)
                logGammasArray = concatenate(logGammas, 0)
                G = exp(logGammasArray-logsumexp(logGammasArray,0)[newaxis,:])
                for k in range(self.numstates):
                    self.means[:, k] = sum( exp(logGammasArray[:,k]-
                                  logsumexp(logGammasArray[:,k],0))[:,newaxis]
                                                    * dataarray.T, 0)
                    data_m = dataarray-oldmeans[:, k][:,newaxis]
                    self.covs[:, :, k] = dot((data_m*G[:,k][newaxis,:]),
                                              data_m.T) 

                #threshold eigenvalues:
                for k in range(self.numstates):
                    U, D, Vt = svd(self.covs[:, :, k])
                    D[D<0.01] = 0.01
                    self.covs[:, :, k] = dot(U, dot(diag(D), Vt))
    
                #visualize: 
                if visualize and self.numdims == 2:
                    cla()
                    scatter(*dataarray)
                    for k in range(self.numstates):
                        plotGaussian(self.means[:,k], self.covs[:, :, k])
Example #31
0
def absdet(M):
    U, D, Vt = svd(M)
    wellConditioned = D > 0.000000001
    return prod(D[wellConditioned])
 def eigen_adapt(self):
     u, s, _ = svd(self.cov_est)
     self.eigvalues = s[0:self.num_eigen]
     # print "estimated eigenvalues: ", self.eigvalues
     self.eigvectors = u[:, 0:self.num_eigen]
Example #33
0
def pinv(M):
    U,D,Vt = svd(M)
    wellConditioned = D>0.000000001
    return dot(U[:,wellConditioned],
               dot(diag(D[wellConditioned]**-1.0), Vt[wellConditioned,:]))