コード例 #1
0
ファイル: stopping.py プロジェクト: thelahunginjeet/kbutil
 def __init__(self,data):
     self.data = data
     self.N,self.p = self.data.shape
     self.covMatrix = covmatrix(self.data)
     self.corMatrix = corrmatrix(self.data)
     _,self.covlambda,_ = svd(self.covMatrix, full_matrices=False)
     _,self.corlambda,_ = svd(self.corMatrix, full_matrices=False)
コード例 #2
0
ファイル: spike_extraction.py プロジェクト: melizalab/mspikes
def get_eigenvectors(spikes, nfeats, nspikes):
    """Calculate eigenvectors of spike waveforms

    spikes: resampled and aligned spike waveforms, dimensions (nspikes, nsamples)
    nfeats: the number of the most significant eigenvectors to return
    nspikes: the number of spikes to use

    Returns eigenvectors, dimension (nsamples, nfeats). Does not need to be
    transposed to calculate projections.

    The call to svd may "fail to converge", which just means dgesdd (a faster
    algorithm) didn't work. In this case, the algorithm tries to decompose the
    transpose. (see
    http://r.789695.n4.nabble.com/Observations-on-SVD-linpack-errors-and-a-workaround-td837282.html)

    """
    from numpy.linalg import svd, LinAlgError
    # center data
    data = spikes[:nspikes] - spikes[:nspikes].mean(0)
    try:
        u, s, v = svd(data, full_matrices=0)
        return v[:nfeats].T.copy()
    except LinAlgError:
        u, s, v = svd(data.T, full_matrices=0)
        return u[:, :nfeats].copy()
コード例 #3
0
    def _create_SDP(self):
        """ Creates the SDP knockoff of X"""
 
        # Check for rank deficiency (will add later).
 
        # SVD and come up with perpendicular matrix
        U, d, V = nplin.svd(self.X,full_matrices=True) 
        d[d<0] = 0
        U_perp = U[:,self.p:(2*self.p)]
        if self.randomize:
            U_perp = np.dot(U_perp,splin.orth(npran.randn(self.p,self.p)))
 
        # Compute the Gram matrix and its (pseudo)inverse.
        G     = np.dot(V.T * d**2 ,V)
        G_inv = np.dot(V.T * d**-2,V)
 
        # Optimize the parameter s of Equation 1.3 using SDP.
        self.s = solve_sdp(G)
        self.s[s <= self.zerotol] = 0
 
        # Construct the knockoff according to Equation 1.4:
        C_U,C_d,C_V = nplin.svd(2*np.diag(s) - (self.s * G_inv.T).T * self.s)
        C_d[C_d < 0] = 0
        X_ko = self.X - np.dot(self.X,G_inv*s) + np.dot(U_perp*np.sqrt(C_d),C_V)
        self.X_lrg = np.concatenate((self.X,X_ko), axis=1)
コード例 #4
0
def test_betti0(space, mesh):
    """
    Verify that the 0-form Hodge Laplacian with strong Dirichlet
    boundary conditions has kernel of dimension equal to the 2nd Betti
    number of the annulus mesh, i.e. 0.
    """
    V0tag, V1tag, V2tag = space

    if(len(V0tag) == 2):
        V0 = FunctionSpace(mesh, V0tag[0], V0tag[1])
    else:
        V0a = FiniteElement(V0tag[0], "triangle", V0tag[1])
        V0b = FiniteElement(V0tag[2], "triangle", V0tag[3])
        V0 = FunctionSpace(mesh, V0a + V0b)
    # V0 Hodge Laplacian
    u = TrialFunction(V0)
    v = TestFunction(V0)

    L = assemble(inner(nabla_grad(u), nabla_grad(v))*dx)

    bc0 = DirichletBC(V0, Constant(0.0), 9)
    L0 = assemble(inner(nabla_grad(u), nabla_grad(v))*dx, bcs=[bc0])

    u, s, v = linalg.svd(L.M.values)
    nharmonic = sum(s < 1.0e-5)
    assert(nharmonic == 1)

    u, s, v = linalg.svd(L0.M.values)
    nharmonic = sum(s < 1.0e-5)
    assert(nharmonic == 0)
コード例 #5
0
ファイル: roscca3.py プロジェクト: cvpapero/rqt_cca
    def stdNorm(self, U1, U2):
        print "U1"
        print U1
        print "U2"
        print U2

        mat1 = np.matrix(U1).T
        print mat1
        print mat1.mean(axis=1)
        mat1 = mat1 - mat1.mean(axis=1)
        print mat1
        mat1cov = np.cov(mat1)
        print mat1cov
        p1,l1,p1t = NLA.svd(mat1cov)
        print p1
        print l1
        print p1t
        l1sq = SLA.sqrtm(SLA.inv(np.diag(l1))) 
        snU1 =  np.dot(np.dot(l1sq, p1.T), mat1)

        mat2 = np.matrix(U2).T
        mat2 = mat2 - mat2.mean(axis=1)
        mat2cov = np.cov(mat2)
        p2,l2,p2t = NLA.svd(mat2cov)
        l2sq = SLA.sqrtm(SLA.inv(np.diag(l2))) 
        snU2 =  np.dot(np.dot(l2sq, p2.T), mat2)

        print "cov:"
        print np.cov(snU1)
        print np.cov(snU2)

        return snU1, snU2
コード例 #6
0
def admira(r, b, m, n, iter, A, A_star):
	if 2*r > min(m,n):
		r_prime = min(m,n)
	else:
		r_prime = 2*r

	# initialization
	X_hat = np.random.randn(m,n) # step 1
	Psi_hatU = np.matrix([])
	Psi_hatV = np.matrix([])
	for i in range(iter):
		Y = A_star(b - A(X_hat))
		(U, s, Vt) = svd(Y)
		Psi_primeU = U[:, 0:r_prime]
		Psi_primeV = Vt.T[:, 0:r_prime]
		if i > 0:
			Psi_tildeU = np.bmat([Psi_primeU, Psi_hatU])
			Psi_tildeV = np.bmat([Psi_primeV, Psi_hatV])
		else:
			Psi_tildeU = Psi_primeU
			Psi_tildeV = Psi_primeV
		AP = lambda b: APsiUV(b, A, Psi_tildeU, Psi_tildeV)
		APt = lambda s: APsitUV(s, A_star, Psi_tildeU, Psi_tildeV)
		ALS = lambda b: APt(AP(b))
		(s, res, iter) = cgsolve(ALS, APt(b), 1e-6, 100, False)
		X_tilde = Psi_tildeU*np.matrix(np.diag(np.array(s).reshape(-1)))*Psi_tildeV.T
		(U, s, Vt) = svd(X_tilde)
		Psi_hatU = U[:, 0:r]
		Psi_hatV = Vt.T[:, 0:r]
		X_hat = Psi_hatU*np.diag(s[0:r])*Psi_hatV.T

	return X_hat
コード例 #7
0
def test_betti0_periodic(horiz_complex, vert_complex):
    """
    Verify that the 0-form Hodge Laplacian has kernel of dimension
    equal to the 0th Betti number of the periodic extruded interval,
    i.e. 1.  Also verify that the 0-form Hodge Laplacian with
    Dirichlet boundary conditions has kernel of dimension equal to the
    2nd Betti number of the extruded mesh, i.e. 0.
    """
    U0, U1 = horiz_complex
    V0, V1 = vert_complex

    m = PeriodicUnitIntervalMesh(5)
    mesh = ExtrudedMesh(m, layers=4, layer_height=0.25)
    U0 = FiniteElement(U0[0], "interval", U0[1])
    V0 = FiniteElement(V0[0], "interval", V0[1])

    W0_elt = TensorProductElement(U0, V0)
    W0 = FunctionSpace(mesh, W0_elt)

    u = TrialFunction(W0)
    v = TestFunction(W0)

    L = assemble(inner(grad(u), grad(v))*dx)
    uvecs, s, vvecs = linalg.svd(L.M.values)
    nharmonic = sum(s < 1.0e-5)
    assert(nharmonic == 1)

    bcs = [DirichletBC(W0, 0., x) for x in ["top", "bottom"]]
    L = assemble(inner(grad(u), grad(v))*dx, bcs=bcs)
    uvecs, s, vvecs = linalg.svd(L.M.values)
    nharmonic = sum(s < 1.0e-5)
    assert(nharmonic == 0)
コード例 #8
0
ファイル: conceptor.py プロジェクト: trondarild/ikaros
def AND(C, B):
	
	dim, col = C.shape
	tolerance = 1e-14

	UC, SC, UtC = svd(C)
	UB, SB, UtB = svd(B)

	diag_SC = diag(SC)
	diag_SB = diag(SB)

	# sum up how many elements on diagonal 
	# are bigger than tolerance
	numRankC =  (1.0 * (diag_SC > tolerance)).sum()
	numRankB =  (1.0 * (diag_SB > tolerance)).sum()

	UC0 = matrix(UC[:, numRankC:])
	UB0 = matrix(UB[:, numRankB:])
	W, Sigma, Wt = svd(UC0 * UC0.transpose() + UB0 * UB0.transpose())
	numRankSigma =  (1.0 * (diag(Sigma) > tolerance)).sum()
	Wgk = matrix(W[:, numRankSigma:])
	I = matrix(identity(dim))
	CandB = \
	  Wgk * inv(Wgk.transpose() *  \
	  ( pinv(C, tolerance) + pinv(B, tolerance) - \
	    I) * Wgk) *Wgk.transpose()
	return CandB
コード例 #9
0
ファイル: dict_metrics.py プロジェクト: sylvchev/mdla
def principal_angles(A, B):
    '''Compute the principal angles between subspaces A and B.

    The algorithm for computing the principal angles is described in :
    A. V. Knyazev and M. E. Argentati,
    Principal Angles between Subspaces in an A-Based Scalar Product: 
    Algorithms and Perturbation Estimates. SIAM Journal on Scientific Computing, 
    23 (2002), no. 6, 2009-2041.
    http://epubs.siam.org/sam-bin/dbq/article/37733
    '''    
    # eps = np.finfo(np.float64).eps**.981
    # for i in range(A.shape[1]):
    #     normi = la.norm(A[:,i],np.inf)
    #     if normi > eps: A[:,i] = A[:,i]/normi
    # for i in range(B.shape[1]):
    #     normi = la.norm(B[:,i],np.inf)
    #     if normi > eps: B[:,i] = B[:,i]/normi
    QA = sl.orth(A)
    QB = sl.orth(B)
    _, s, Zs = svd(QA.T.dot(QB), full_matrices=False)
    s = np.minimum(s, ones_like(s))
    theta = np.maximum(np.arccos(s), np.zeros_like(s))
    V = QB.dot(Zs)
    idxSmall = s > np.sqrt(2.)/2.
    if np.any(idxSmall):
        RB = V[:,idxSmall]
        _, x, _ = svd(RB-QA.dot(QA.T.dot(RB)),full_matrices=False)
        thetaSmall = np.flipud(np.maximum(arcsin(np.minimum(x, ones_like(x))), zeros_like(x)))
        theta[idxSmall] = thetaSmall
    return theta
コード例 #10
0
def frequent_directions(A, ell):
  """A matrix "A" should be 256x7291
  """

  m = 256
  n = 7291

  if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')

  start = time.clock()

  B = np.hstack((A[:, :(ell-1)], np.zeros((m, 1))))

  for i in range(ell-1, n):

    # new matrix is just a single vector (i-th column of A)
    B[:, ell-1] = A[:, i]
    U, s, V = ln.svd(B, full_matrices=False)

    delta = s[-1] ** 2 # squared smallest singular value

    B = np.dot(U, np.diag(np.sqrt(abs(s ** 2 - delta))))

  U, s, V = ln.svd(B, full_matrices=False)

  elapsed_time = time.clock() - start
  print 'time:', elapsed_time

  return U, s, V
コード例 #11
0
ファイル: pca.py プロジェクト: PMBio/limix
def PCA(Y, components):
	"""
	run PCA, retrieving the first (components) principle components
	return [s0, eig, w0]
	s0: factors
	w0: weights
	"""

	N,D = Y.shape
	sv = linalg.svd(Y, full_matrices=0);
	[s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]]
	v = s0.std(axis=0)
	s0 /= v;
	w0 *= v;
	return [s0, w0]

	if N>D:
		sv = linalg.svd(Y, full_matrices=0);
		[s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]]
		v = s0.std(axis=0)
		s0 /= v;
		w0 *= v;
		return [s0, w0]
	else:
		K=np.cov(Y)
		sv = linalg.eigh(K)
		std_var = np.sqrt(sv[0])
		pc = sv[1]*std_var[np.newaxis(),0]
		#
		#ipdb.set_trace()
		return [pc,std_var]
コード例 #12
0
	def planar_ransac(self, pc):
		#fit to model Ax  +By + Cz + D = 0 (a plane)
		sample_iter = 20
		pc = pc[np.nonzero(np.nansum(pc, axis=1)>0)[0], :]
		pc = pc[::sample_iter,:]
		pc_len = pc.shape[0]
		n=int(0.1*pc_len)		#size of random sample
#		print "Points: ", n
		k=15; 		#number of iteration
		err_thresh=0.01				#deviation - meters
		min_points=.5*pc_len	 	#minime amount of points within deviation		
		
		iter_ = 0
		best_model = None
		best_consensus_set = None
		best_error = np.inf
		best_offset = None
		
		while best_model == None:
			while iter_ < k:

				maybe_inliers = pc[np.random.randint(0, pc_len, (n))] #get n random points from pc
				offset = np.mean(maybe_inliers, axis=0)
				maybe_inliers -= offset
				# Find model
				_,_,Vs = svd(maybe_inliers)
				V = Vs.T.conj() 
				Normal = V[:,2]
				maybe_model = Normal
				consensus_set = maybe_inliers

				err = np.sum((pc-offset) * maybe_model, axis=1)
				consensus_set = pc[np.nonzero(err < err_thresh)[0]]
#				print consensus_set.shape, pc_len
			
				if consensus_set.shape[0] > min_points:
					_,_,Vs = svd(maybe_inliers)
					V = Vs.T.conj() 
					Normal = V[:,2]
					better_model = Normal
				
					offset = np.mean(consensus_set, axis=0)
					new_error = np.sum(np.sum((consensus_set-offset) * maybe_model, axis=1))
#					print new_error
				
					if abs(new_error) < best_error:
	#					print "iter: ", iter_
	#					print "e: ", best_error					
						best_model = better_model
						best_consensus_set = consensus_set
						best_error = abs(new_error)
						best_offset = offset
					
				iter_ += 1
			#if there is no model, decrease the number of points required for 'best' model
			min_points = min_points*3/4 
			iter_ = 0
			
		return best_model, best_offset
コード例 #13
0
ファイル: test_linalg.py プロジェクト: Prastaruszek/numpy
 def check(dtype):
     x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
     u, s, vh = linalg.svd(x)
     assert_equal(u.dtype, dtype)
     assert_equal(s.dtype, get_real_dtype(dtype))
     assert_equal(vh.dtype, dtype)
     s = linalg.svd(x, compute_uv=False)
     assert_equal(s.dtype, get_real_dtype(dtype))
コード例 #14
0
ファイル: conceptor.py プロジェクト: trondarild/ikaros
def conceptor_similarity (a, b): 
      U_a, S_a, V_a = svd(a);
      U_b, S_b, V_b = svd(b);
      S_a = diag(S_a)
      S_b = diag(S_b)
      # similarity with previous conceptor
      return pow(norm((sqrt(S_a) * U_a.transpose() * \
              U_b * sqrt(S_b))),2) / \
              (norm(a) * norm(b));
コード例 #15
0
ファイル: myinput.py プロジェクト: warelle/rdft
def nk_singular(a,k, opt):
  (size,_) = a.shape
  nseq = rdft.all_leading_sequence(size)[-1]
  seq  = rdft.all_leading_sequence(k)[-1]
  if opt == 0:
    _, sings, _ = linalg.svd(a[np.ix_(nseq,seq)])
  else:
    _, sings, _ = linalg.svd(a[np.ix_(seq,nseq)])
  return sings
コード例 #16
0
def test_betti1(space):
    """
    Verify that the 1-form Hodge Laplacian with strong Dirichlet
    boundary conditions has kernel of dimension equal to the 1st Betti
    number of the annulus mesh, i.e. 1.
    """
    mesh = Mesh(join(cwd, "annulus.msh"))
    V0tag, V1tag, V2tag = space

    if(len(V0tag) == 2):
        V0 = FunctionSpace(mesh, V0tag[0], V0tag[1])
    else:
        V0a = FiniteElement(V0tag[0], "triangle", V0tag[1])
        V0b = FiniteElement(V0tag[2], "triangle", V0tag[3])
        V0 = FunctionSpace(mesh, V0a + V0b)

    V1 = FunctionSpace(mesh, V1tag[0], V1tag[1])

    W = V0*V1
    sigma, u = TrialFunctions(W)
    tau, v = TestFunctions(W)
    L = assemble((sigma*tau - inner(rot(tau), u) + inner(rot(sigma), v) +
                  div(u)*div(v))*dx)

    bc0 = DirichletBC(W.sub(0), 0., 9)
    bc1 = DirichletBC(W.sub(1), Expression(("0.0", "0.0")), 9)
    L0 = assemble((sigma*tau - inner(rot(tau), u) + inner(rot(sigma), v) +
                   div(u)*div(v))*dx, bcs=[bc0, bc1])

    dV0 = V0.dof_count
    dV1 = V1.dof_count

    A = numpy.zeros((dV0+dV1, dV0+dV1))
    A[:dV0, :dV0] = L.M[0, 0].values
    A[:dV0, dV0:dV0+dV1] = L.M[0, 1].values
    A[dV0:dV0+dV1, :dV0] = L.M[1, 0].values
    A[dV0:dV0+dV1, dV0:dV0+dV1] = L.M[1, 1].values

    u, s, v = linalg.svd(A)

    nharmonic = sum(s < 1.0e-5)
    assert(nharmonic == 1)

    dV0 = V0.dof_count
    dV1 = V1.dof_count

    A0 = numpy.zeros((dV0+dV1, dV0+dV1))
    A0[:dV0, :dV0] = L0.M[0, 0].values
    A0[:dV0, dV0:dV0+dV1] = L0.M[0, 1].values
    A0[dV0:dV0+dV1, :dV0] = L0.M[1, 0].values
    A0[dV0:dV0+dV1, dV0:dV0+dV1] = L0.M[1, 1].values

    u, s, v = linalg.svd(A0)

    nharmonic = sum(s < 1.0e-5)
    assert(nharmonic == 1)
コード例 #17
0
ファイル: myinput.py プロジェクト: warelle/rdft
def get_singular(a,fra):
  (size,_) = a.shape
  a_fra_subsings = []
  _, a_sings, _ = linalg.svd(a)
  i = 0
  for seq in rdft.all_leading_sequence(size):
    _, fra_subsing, _ = linalg.svd(fra[np.ix_(seq,seq)])
    a_fra_subsings.append( (a_sings[i], fra_subsing[-1]) )
    i = i + 1
  return a_fra_subsings
コード例 #18
0
def incremental_svd(A, qr_flg=False):
  """A matrix "A" should be 256x7291
  """

  m = 256
  n = 7291

  n0 = 256

  if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')

  start = time.clock()

  A0 = A[:, :n0]
  U, s, V = ln.svd(A0, full_matrices=False)

  # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix
  for i in range(n0, n):

    # new matrix is just a single vector (i-th column of A)
    A1 = np.matrix(A[:, i]).T

    if qr_flg:
      J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))
      U_, s_, V_ = ln.svd(
          np.vstack((
            np.hstack((np.diag(s), np.dot(U.T, A1))),
            np.hstack((np.zeros((K.shape[0], s.shape[0])), K))
          )),
          full_matrices=False)

      # update the result of SVD
      U = np.dot(np.hstack((U, J)), U_)

    else:
      U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)
      U = np.dot(U, U_)

    s = s_

    # NOTE: V from svd on NumPy is already transposed
    V = np.dot(V_,
          np.vstack((
            np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),
            np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))
          ))
        )

    # for next computation, update A0
    A0 = np.hstack((A0, A1))

  elapsed_time = time.clock() - start
  print 'time:', elapsed_time

  return U, s, V
コード例 #19
0
ファイル: DQMC.py プロジェクト: waltergu/HamiltonianPy
 def G(self):
     if self.pos in (0,len(self)):
         B=self[self.pos] if self.pos==0 else self[self.pos-1]
         U,S,V=svd(B.V.dot(B.U).transpose().conjugate()+np.diag(B.S))
         U,V=B.U.dot(U).transpose().conjugate(),V.dot(B.V).transpose().conjugate()
         return np.einsum('ij,j,jk->ik',V,1.0/S,U)
     else:
         BR,BL=self[self.pos-1],self[self.pos]
         U,S,V=svd(BL.V.dot(BR.U).transpose().conjugate()+np.einsum('i,ij,jk,k->ik',BR.S,BR.V,BL.U,BL.S))
         U,V=BR.U.dot(U).transpose().conjugate(),V.dot(BL.V).transpose().conjugate()
         return np.einsum('ij,j,jk->ik',V,1.0/S,U)
コード例 #20
0
ファイル: PCA.py プロジェクト: RONGLX/CSmathhomework2
def PCA(X):
    # tansform X to be centered data
    # cauz the feature dimention is larger than data number, so we should calculate the SVD of X's transpose
    V,D,UT = svd(X.T)
    U1,D1,VT1 = svd(X)
    U = array(matrix(UT).T)
    results = []
    results.append(U[:,0])
    results.append(U[:,1])
    results = dot(results,X) 
    return results
コード例 #21
0
    def __compute_svd(self, H, G):
        """Compute change-point score using SVD.

        """
        U, _, _ = ln.svd(H, full_matrices=False)
        Q, _, _ = ln.svd(G, full_matrices=False)

        # find the largest singular value for `r` principal component
        s = ln.svd(np.dot(U[:, :self.r].T, Q[:, :self.r]),
                   full_matrices=False, compute_uv=False)

        return 1 - s[0]
コード例 #22
0
ファイル: dlm.py プロジェクト: bcrestel/timeseries
def KalmanFilter_SVD(dataset, m0, svd_C0, Ft, Gt, svd_invV, svd_invW):
    """ Compute Kalman filter on dataset 
    starting with initial distribution N(m0, C0).
    Inputs:
        dataset = np.array containing data -- (time steps) x (observations)
        m0 = mean of initial state
        svd_C0 = [U,S] for covariance matrix of initial state
        Ft, Gt = list of matrices defining the DLM
        svd_invV = SVD factors of the inverse of V, i.e., [U, S] such that
            V^{-1} = U.S.U^T
        svd_invV = same as for invV but with W
        (V, W = covariance matrices for observation and model)
    Outputs:
        m_all = means of state estimate (theta_t | y_{1:t})
        C_all = covariance matrices of state estimate
        a_all = means for state predictive (theta_t | y_{1:t-1})
        R_all = covariance matrices for state predictive """
    timesteps = len(dataset)
    nbobs = dataset.size/timesteps
    param = m0.size
    m = m0.reshape((param, 1))
    C = [svd_C0[0], np.sqrt(svd_C0[1])]
    Gam = svd_invW[0].dot(np.diag(1/np.sqrt(svd_invW[1])))
    invV_ch = svd_invV[0].dot(np.diag(np.sqrt(svd_invV[1])))
    V = svd_invV[0].dot(np.diag(1/svd_invV[1])).dot(svd_invV[0].T)
    m_all = np.zeros((timesteps, param))
    a_all = np.zeros((timesteps, param))
    C_all, R_all = [], []
    ii = 0
    for YT, F, G in zip(dataset, Ft, Gt):
        Y = YT.reshape((nbobs,1))
        # State predictive: a, R
        a = G.dot(m)
        a_all[ii,:] = a.T
        tmp = np.diag(C[1]).dot((C[0].T).dot(G.T))
        Z, D, UT = svd(np.concatenate((tmp, Gam.T), axis=0))
        U = UT.T
        R_all.append([U, D])
        # Intermediate step: e, Q
        e = Y - F.dot(a)
        FRsq = F.dot(U.dot(np.diag(D)))
        Q = V + FRsq.dot(FRsq.T)
        Qinve = np.linalg.solve(Q, e)
        # State estimate: m, C
        m = a + U.dot(np.diag(D**2).dot(UT)).dot(F.T).dot(Qinve)
        m_all[ii,:] = m.T
        tmp = (invV_ch.T).dot(F.dot(U))
        Delta, tDinv, MT = svd(np.concatenate((tmp, np.diag(1/D)), axis=0))
        M = MT.T
        C = [U.dot(M), 1/tDinv]
        C_all.append(C)
        ii += 1
    return m_all, C_all, a_all, R_all
コード例 #23
0
def incrementalSVD(mat_a1, mat_a2, k, only_uk=False):
  """Apply SVD for a matrix with new columns

  :param mat_a1: original matrix (m x n1)
  :param mat_a2: new columns (m x n2)
  :param k: rank-k for the approximated result
  :returns: rank-k approximated U, S, V^T as a result of svd([mat_a1, mat_a2])
  """

  if mat_a1.shape[0] != mat_a2.shape[0]:
    raise ValueError('Error: the number of rows both in mat_a1 and mat_a2 should be the same')

  # get the number of rows and columns
  m = mat_a1.shape[0]
  n1 = mat_a1.shape[1]
  n2 = mat_a2.shape[1]

  if k < 1:
    raise ValueError('Error: rank k must be greater than or equal to 1')
  if k > min(m, n1 + n2):
    raise ValueError('Error: rank k must be less than or equal to min(m, n1 + n2)')

  # apply SVD for the original matrix
  mat_u1, vec_s1, mat_v1t = ln.svd(mat_a1, full_matrices=False)
  mat_s1 = np.diag(vec_s1)

  # define mat_f as [S, U^T A_1], and decompose it by SVD
  mat_f = np.hstack((mat_s1, np.dot(mat_u1.T, mat_a2)))
  mat_uf, vec_sf, mat_vft = ln.svd(mat_f, full_matrices=False)

  # keep rank-k approximation
  mat_uf = mat_uf[:, :k]
  if only_uk: return  np.dot(mat_u1, mat_uf)
  vec_sf = vec_sf[:k]
  mat_vft = mat_vft[:k, :]

  # create a temporary matrix to compute V_k
  V = mat_v1t.T
  Z1 = np.zeros((n1, n2))
  Z2 = np.zeros((n2, V.shape[1]))
  I = np.eye(n2)
  mat_tmp = np.vstack((
      np.hstack((V, Z1)),
      np.hstack((Z2, I))
    ))
  mat_vk = np.dot(mat_tmp, mat_vft.T)

  # compute U_k and S_k
  mat_uk = np.dot(mat_u1, mat_uf)
  mat_sk = np.diag(vec_sf)

  return mat_uk, mat_sk, mat_vk.T
def constrained_variance_maximization(X, q, u, r):
	# X is n x d
	# q is n x 1
	# u is d x 1
	# r is scalar
	# Returns (w, val) where w maximizes sum_{i=1}^n q[i] * dot(w, x[i])^2 
	# subject to ||w|| = 1 and ||w - u|| <= r,
	# and where val is the value of that maximum.
	
	n, d = X.shape
	q = q.reshape((n, 1))
	u = u.reshape((d, 1))
	
	Xq = sqrt(q) * X
	XqT_Xq = dot(Xq.T, Xq)
	
	# First check if the first principle component satisfies the constraints
	left, diagonal, right = svd(XqT_Xq)
	w1 = right[0].reshape((d, 1))
	val1 = diagonal[0]
	
	if lin.norm(u - w1, 2) <= r or lin.norm(u + w1, 2) <= r:
		return w1.reshape((d,)), val1
	
	# Now project the data
	Xq_proj = Xq - dot(Xq, u) * tile(u.T, (n, 1))
	
	# Find the first principle component of the projected data
	left, diagonal, right = svd(dot(Xq_proj.T, Xq_proj))
	v = right[0].reshape((d, 1))
	
	# This should be close to zero
#	assert abs(dot(u.T, v)) <= 0.01
	
	# Construct the vector and the value in the original space
	c1 = (1.0 + dot(u.T, u) - r**2) / 2.0
	c2 = sqrt(1.0 - c1**2)
	w = c1 * u + c2 * v
	val = dot(dot(w.T, XqT_Xq), w)[0, 0]
	
	# Check the result
#	print
#	print dot(dot(u.T, XqT_Xq), u)[0, 0]
#	print val
#	print val1
#	print lin.norm(w, 2)
#	print lin.norm(u - w, 2), r
#	assert dot(dot(u.T, XqT_Xq), u) <= val <= val1
#	assert 0.99 <= lin.norm(w, 2) <= 1.01
#	assert lin.norm(u - w, 2) <= r + 0.01
	
	return w.reshape((d,)), val
コード例 #25
0
ファイル: gp2kronSumSvd.py プロジェクト: PMBio/mtSet
    def _update_cache(self):
        """
        Update cache
        """
        cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed

        if self.Xr_has_changed:
            start = TIME.time()
            """ Row SVD Bg + Noise """
            Urstar,S,V = NLA.svd(self.Xr)
            self.cache['Srstar'] = SP.concatenate([S**2,SP.zeros(self.N-S.shape[0])])
            self.cache['Lr']     = Urstar.T
            self.mean.setRowRotation(Lr=self.cache['Lr'])

            smartSum(self.time,'cache_XXchanged',TIME.time()-start)
            smartSum(self.count,'cache_XXchanged',1)
        
        if cov_params_have_changed:
            start = TIME.time()
            """ Col SVD Noise """
            S2,U2 = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P))
            self.cache['Sc2'] = S2
            US2   = SP.dot(U2,SP.diag(SP.sqrt(S2)))
            USi2  = SP.dot(U2,SP.diag(SP.sqrt(1./S2)))
            """ Col SVD region """
            A     = SP.reshape(self.Cr.getParams(),(self.P,self.rank),order='F')
            Astar = SP.dot(USi2.T,A)
            Ucstar,S,V = NLA.svd(Astar)
            self.cache['Scstar'] = SP.concatenate([S**2,SP.zeros(self.P-S.shape[0])])
            self.cache['Lc']     = SP.dot(Ucstar.T,USi2.T)

            """ pheno """
            self.mean.setColRotation(self.cache['Lc'])


        if cov_params_have_changed or self.Xr_has_changed:
            """ S """
            self.cache['s'] = SP.kron(self.cache['Scstar'],self.cache['Srstar'])+1
            self.cache['d'] = 1./self.cache['s']
            self.cache['D'] = SP.reshape(self.cache['d'],(self.N,self.P), order='F')

            """ pheno """
            self.cache['LY']  = self.mean.evaluate()
            self.cache['DLY'] = self.cache['D']*self.cache['LY']

            smartSum(self.time,'cache_colSVDpRot',TIME.time()-start)
            smartSum(self.count,'cache_colSVDpRot',1)

        self.Y_has_changed = False
        self.Xr_has_changed = False
        self.Cr.params_have_changed = False
        self.Cn.params_have_changed = False
コード例 #26
0
def test_betti2(space):
    """
    Verify that the 2-form Hodge Laplacian with strong Dirichlet
    boundary conditions has kernel of dimension equal to the 2nd Betti
    number of the annulus mesh, i.e. 1.
    """
    mesh = Mesh(join(cwd, "annulus.msh"))
    V0tag, V1tag, V2tag = space

    V1 = FunctionSpace(mesh, V1tag[0], V1tag[1])

    V2 = FunctionSpace(mesh, V2tag[0], V2tag[1])

    W = V1*V2

    sigma, u = TrialFunctions(W)
    tau, v = TestFunctions(W)

    L = assemble((inner(sigma, tau) - div(tau)*u + div(sigma)*v)*dx)

    bc1 = DirichletBC(W.sub(0), Expression(("0.0", "0.0")), 9)
    L0 = assemble((inner(sigma, tau) - div(tau)*u + div(sigma)*v)*dx, bcs=[bc1])

    dV1 = V1.dof_count
    dV2 = V2.dof_count

    A = numpy.zeros((dV1+dV2, dV1+dV2))
    A[:dV1, :dV1] = L.M[0, 0].values
    A[:dV1, dV1:dV1+dV2] = L.M[0, 1].values
    A[dV1:dV1+dV2, :dV1] = L.M[1, 0].values
    A[dV1:dV1+dV2, dV1:dV1+dV2] = L.M[1, 1].values

    u, s, v = linalg.svd(A)

    nharmonic = sum(s < 1.0e-5)
    print nharmonic, V1tag[0]
    assert(nharmonic == 0)

    A0 = numpy.zeros((dV1+dV2, dV1+dV2))
    A0[:dV1, :dV1] = L0.M[0, 0].values
    A0[:dV1, dV1:dV1+dV2] = L0.M[0, 1].values
    A0[dV1:dV1+dV2, :dV1] = L0.M[1, 0].values
    A0[dV1:dV1+dV2, dV1:dV1+dV2] = L0.M[1, 1].values

    u, s, v = linalg.svd(A0)

    nharmonic = sum(s < 1.0e-5)
    assert(nharmonic == 1)
コード例 #27
0
ファイル: mmr_drawgraph.py プロジェクト: ipa-nhg/kukadu
def singular(Ytra,Ytes,sdata):

  m=Ytra.shape[0]
  mt=Ytes.shape[0]
  
  xmean=np.mean(Ytra,axis=0)
  ytra0=Ytra-np.outer(np.ones(m),xmean)
  xmean=np.mean(Ytes,axis=0)
  ytes0=Ytes-np.outer(np.ones(mt),xmean)

  xnorm=np.sqrt(np.sum(ytra0**2,axis=0))
  Ctra=np.dot(ytra0.T,ytra0)
  xnorm=xnorm+(xnorm==0)
  Ctra=Ctra/np.outer(xnorm,xnorm)
  xnorm=np.sqrt(np.sum(ytes0**2,axis=0))
  Ctes=np.dot(ytes0.T,ytes0)
  xnorm=xnorm+(xnorm==0)
  Ctes=Ctes/np.outer(xnorm,xnorm)

  stra=np_lin.svd(Ctra)[1]
  stes=np_lin.svd(Ctes)[1]

  xlinewidth=3
  
  fig1=lab.figure(figsize=(12,6))
  ax1=lab.subplot2grid((10,2),(1,0), rowspan=9)
  ## ax=fig1.add_subplot(1,2,1)
  ax1.plot(stra,linewidth=xlinewidth, \
          linestyle='-',color='b')
  ax1.set_xlabel('Label indexes', fontsize=14)
  ax1.set_ylabel('Eigen values',fontsize=14)
  ax1.set_title('Training:',fontsize=16)
  ax1.grid(True)

  ## ax=fig1.add_subplot(1,2,2)
  ax2=lab.subplot2grid((10,2),(1,1), rowspan=9)
  ax2.plot(stes,linewidth=xlinewidth, \
          linestyle='-',color='b')
  ax2.set_xlabel('Label indexes', fontsize=14)
  ax2.set_ylabel('Eigen values',fontsize=14)
  ax2.set_title('Test:',fontsize=16)
  ax2.grid(True)

  fig1.suptitle('Eigen values of correlation matrixes: '+sdata,fontsize=18  )
    
  lab.show()

  return
コード例 #28
0
ファイル: mesmimo.py プロジェクト: proteus-cpi/pylayers
    def transfer(self):
        """ calculate transfer matrix.
            it involves H and Hd against svd() which acts only over H.

        Returns
        -------

        HdH : Hermitian transfer matrix  (nf x nt x nt )
        U   : Unitary tensor  (nf x nt x nt )
        S   : Singular values (nf x nt)
        V   : = Ud (in that case because HdH Hermitian)  (nf x nt x nt)

        HdH = U L U^{\dagger}

        """

        # H  : nr x nt x nf
        H   = self.Hcal.y
        # Hd : nt x nr x nf
        Hd  = np.conj(self.Hcal.y.swapaxes(0,1))
        #HdH : nt x nt x nf
        HdH = np.einsum('ijk,jlk->ilk',Hd,H)
        # HdH : nf x nt x nt
        HdH  = HdH.swapaxes(0,2)
        #U   : nf x nt x nt
        #S   : nf x nt
        #V   : nf x nt x nt
        U,S,V  = la.svd(HdH)

        return (HdH,U,S,V)
コード例 #29
0
ファイル: mesmimo.py プロジェクト: proteus-cpi/pylayers
    def svd(self):
        """ singular value decomposition of matrix H

        Parameters
        ----------

        The native H matrix is currently (nr x nt x nf ). For applying a
        broadcasted svd a reshaping in (nf x nr x nt ) is required.
        In the future,  it would be a good thing to define the MIMO matrix as

        nf x na x nb structure from the begining

        or

        ns x nf x na x nb

        Returns
        -------

        U  : nf x nr x nr
        D  : nf x min(nr,nt)
        Vh : nf x nt x nt

        """
        # H  : nr x nt x nf
        H  = self.Hcal.y
        # H1  : nf x nt x nr
        H1 = H.swapaxes(0,2)
        # H2  : nf x nr x nt
        H2 = H1.swapaxes(1,2)
        U,D,Vh = la.svd(H2)
        return(U,D,Vh)
コード例 #30
0
ファイル: geometry.py プロジェクト: hjkgrp/molSimplify
def kabsch(mol0, mol1):
    # translate to align centroids with origin
    mol0, d0 = setPdistance(mol0, mol0.centersym(), [0, 0, 0], 0)
    mol1, d1 = setPdistance(mol1, mol1.centersym(), [0, 0, 0], 0)
    # get coordinates and matrices P,Q
    P, Q = [], []
    for atom0, atom1 in zip(mol0.getAtoms(), mol1.getAtoms()):
        P.append(atom0.coords())
        Q.append(atom1.coords())
    # Computation of the covariance matrix
    C = dot(transpose(P), Q)
    # Computation of the optimal rotation matrix
    # This can be done using singular value decomposition (SVD)
    # Getting the sign of the det(V)*(W) to decide
    # whether we need to correct our rotation matrix to ensure a
    # right-handed coordinate system.
    # And finally calculating the optimal rotation matrix U
    # see http://en.wikipedia.org/wiki/Kabsch_algorithm
    V, S, W = svd(C)
    d = (det(V) * det(W)) < 0.0
    # Create Rotation matrix U
    if d:
        S[-1] = -S[-1]
        V[:, -1] = -V[:, -1]
    U = dot(V, W)
    # Rotate P
    P = dot(P, U)
    # write back coordinates
    for i, atom in enumerate(mol0.getAtoms()):
        atom.setcoords(P[i])
    return mol0, U.tolist(), d0, d1
コード例 #31
0
ファイル: pca.py プロジェクト: Ask149/linalg
def pca_svd(A, numcomponents):
    A = A - np.mean(A, axis=0)
    [U, D, V] = alg.svd(A)
    V = V.T
    V = V[:, :numcomponents]
    return np.dot(A, V)
コード例 #32
0
    def decompose(self,
                  T,
                  orthogonal=True,
                  max_iter=200,
                  W_A=np.eye(2),
                  W_B=np.eye(2),
                  whitened=False):
        """
        uses method analogous to Kolda's to compute decomposition of T into two 
        symmetric tensors A and B.

        Parameters
        ----------
        T : 4d array
            Tensor to decompose
        orthogonal : bool, optional
            if True, T should be an odeco tensor train. The default is True.
            Whitening applied if false.
        max_iter : int, optional
            Number of iterations whitening algorithm executes to find psd 
            matrices before declaring failure. The default is 200.

        Raises
        ------
        ValueError
            If T is not a 4-way tensor of equal dimensions

        Returns
        -------
        lammy : 1d array
            weight vector of 3-way symmetric left core of T
        U : 2d array
            columns make up symmetric vectors in decomposition of left core of T.
        mu : 1d array
            weight vector of 3-way symmetric left core of T
        V : 2d array
            columns make up symmetric vectors in decomposition of right core of T.
        """
        if (whitened == True):
            warnings.warn(
                """W_A, W_B, whitened parameters should only be used in 
                          internal recursive call""",
                stacklevel=2)

        # check T is a 4-way partially symmetric tensor
        if len(T.shape) != 4:
            raise ValueError("T must be a 4-way tensor")

        if (T.shape[0] != T.shape[1]) or (T.shape[2] != T.shape[3]):
            raise ValueError("T must be a partially symmetric tensor")

        if (whitened == False) and (T.shape[0] != T.shape[2]):
            raise ValueError("T must be a partially symmetric tensor")

        # take weighted sums of slices of A's dimensions and B's dimensions in T
        S_A, S_B = self.sum_of_slices(T)

        if orthogonal == True:
            # compute eigendecomposition of weighted sums of slices
            vals_A, U = lg.eigh(S_A)
            absvals_A = np.abs(vals_A)
            idx = np.argsort(
                absvals_A)  # find index of sorted absolute eigvals
            # no. nonzero eigvals = rank_A
            rank_A = np.shape(S_A)[0] - np.searchsorted(absvals_A[idx], 10e-10)
            U = (U[:,
                   idx])[:,
                         -rank_A:]  # take rank_A highest corresp. eigenvectors

            vals_B, V = lg.eigh(S_B)
            absvals_B = np.abs(vals_B)
            idx = np.argsort(absvals_B)
            rank_B = np.shape(S_B)[0] - np.searchsorted(absvals_B[idx], 10e-10)
            V = (V[:,
                   idx])[:,
                         -rank_B:]  # take rank_B highest corresp. eigenvectors

            # dewhiten if necessary
            if whitened == True:  # only true for internal recursive call
                U_og = lg.pinv(W_A) @ U
                V_og = lg.pinv(W_B) @ V

            else:
                U_og = U
                V_og = V

            # obtain matrix of products of "eigenvalues" by contracting with eigenvectors
            eig_products = np.empty(shape=(rank_A, rank_B))

            for u in range(rank_A):
                for v in range(rank_B):
                    C = np.tensordot(T, U[:, u], axes=(0, 0))
                    C = np.tensordot(C, U[:, u], axes=(0, 0))
                    C = np.tensordot(C, V[:, v], axes=(0, 0))
                    C = np.tensordot(C, V[:, v], axes=(0, 0))
                    eig_products[u, v] = C / (U_og[:, u] @ V_og[:, v])

            # obtain "eigenvalues" by performing SVD on this rank-1 products matrix
            SVD = lg.svd(eig_products)

            # obtained up to scaling; absorb singular values into lambda WLOG
            lammy = SVD[0][:, 0] * SVD[1][0]
            mu = SVD[2][0, :]

            return lammy, U_og, mu, V_og

        else:  # apply whitening
            # Take sums of slices until psd matrices of ranks A and B are found
            for k in range(max_iter):
                if (np.all(lg.eigvalsh(S_A) > -10e-10)
                        and np.all(lg.eigvalsh(S_B) > -10e-10)):
                    break

                elif max_iter - k <= 1:
                    print(""""Max iterations reached for psd sum of slices 
                          associated with A\n""")
                    return

                S_A, S_B = self.sum_of_slices(T)

            # take "skinny" eigendecompositions of these psd matrices
            D_A, U_A = lg.eigh(S_A)
            rank_A = np.shape(S_A)[0] - np.searchsorted(D_A, 10e-10)
            U_A = np.flip(U_A[:, -rank_A:],
                          axis=1)  # rank_A highest corresp. eigenvectors
            D_A = np.diag(np.flip(
                D_A[-rank_A:]))  # diagonal of rank_A highest eigenvalues

            D_B, U_B = lg.eigh(S_B)
            rank_B = np.shape(S_B)[0] - np.searchsorted(D_B, 10e-10)
            U_B = np.flip(U_B[:, -rank_B:],
                          axis=1)  # rank_B highest corresp. eigenvectors
            D_B = np.diag(np.flip(
                D_B[-rank_B:]))  # diagonal of rank_B highest eigenvalues

            # produce whitening matrices
            W_A = lg.inv(D_A**0.5) @ U_A.T
            W_B = lg.inv(D_B**0.5) @ U_B.T

            # take the tensor-matrix product of W_A along A's modes and W_B along B's modes
            T_bar = multi_mode_dot(T, (W_A, W_A, W_B, W_B), modes=(0, 1, 2, 3))

            # apply orthogonal decomposition to whitened tensor T_bar
            return self.decompose(T_bar, W_A=W_A, W_B=W_B, whitened=True)
コード例 #33
0
plt.rcParams['font.family'] = 'Malgun Gothic'
plt.rcParams['axes.unicode_minus'] = False

#
# 특이값 분해 계산
#
input = RandomState(0).randint(0, 9, 4 * 3).reshape((4, 3))
print(input)
# [[5 0 3]
#  [3 7 3]
#  [5 2 4]
#  [7 6 8]]

# compact SVD
# 여기서 주의할 점은 리턴되는 값 중 v는 v의 전치행렬을 의미한다.
u, d, v = svd(input, full_matrices=False)

# 원래 행렬 복원
u @ np.diag(d) @ v

# 차원 축소 및 축소된 행렬의 프로베니어스 노름
reduced = input.shape[1] - 1
approxi_input = u[:, :reduced] @ np.diag(
    d)[:reduced, :reduced] @ v[:reduced, :]
diff = input - approxi_input
# diff = input - [email protected](d)@v.T
frobenius_norm = np.sqrt(np.trace(diff.T @ diff))
print(frobenius_norm)

# 희귀 행렬인 경우
input = RandomState(0).randint(0, 2, 3 * 5).reshape((3, 5))
コード例 #34
0
ファイル: grassmann.py プロジェクト: konscs/pymanopt_tt
 def dist(self, X, Y):
     u, s, v = svd(multiprod(multitransp(X), Y))
     s[s > 1] = 1
     s = np.arccos(s)
     return np.linalg.norm(s)
コード例 #35
0
 def calc(self):
     # U is the words dimensions, Vt documents, S how many concepts include
     self.U, self.S, self.Vt = svd(self.A)
コード例 #36
0
ファイル: category.py プロジェクト: uberkinder/Robusta
    def fit(self, X, y=None):
        """Fit data

        Parameters
        ----------
        X : DataFrame, shape [n_samples, n_features]
            The data to determine frequencies.

        Returns
        -------
        self

        """

        # Check data
        assert not X.isna().any().any(), 'Missing values are not allowed'

        columns = X.columns
        self.embeddings_ = {col: pd.DataFrame(index=X[col].unique()) for col in columns}

        self.n_components_ = pd.DataFrame(index=columns, columns=columns)
        self.sigmas_ = {}

        for a, b in combinations(columns, 2):

            # Count Matrix
            x = X.groupby([a, b]).size().unstack().fillna(0)

            # SVD
            u, s, v = svd(x, full_matrices=False)
            v = v.T

            # n_components
            if isinstance(self.n_components, int):
                n_components_ = min(self.n_components, len(s))

            elif isinstance(self.n_components, float):
                ratio = s.cumsum()/s.sum()
                n_components_ = (ratio > self.n_components).argmax() + 1

            else:
                raise ValueError('Unknown n_components type:', self.n_components)

            self.n_components_[a, b] = n_components_
            self.n_components_[b, a] = n_components_

            # Truncate
            u_cols, v_cols = [], []

            for i in range(n_components_):
                u_cols.append('({},{})_svd{}'.format(a, b, i+1))
                v_cols.append('({},{})_svd{}'.format(b, a, i+1))

            u = pd.DataFrame(u[:, :n_components_], columns=u_cols, index=x.index)
            v = pd.DataFrame(v[:, :n_components_], columns=v_cols, index=x.columns)

            # Append to Embeddings
            self.embeddings_[a] = self.embeddings_[a].join(u)
            self.embeddings_[b] = self.embeddings_[b].join(v)

        return self
コード例 #37
0
    def constraint_for_two_ph_at_momentum_p(self, p):

        # the x vector is [g1, g2_pair, g2_two_ph_block]

        L = self.L
        num_of_const = L * L * 4  #

        const_mat = np.zeros((num_of_const, L + L * L * L + 4 * L * L),
                             dtype=np.float32)
        const_vec = np.zeros(num_of_const, dtype=np.float32)

        const_index = 0

        for alpha in range(0, L):
            #diagonal part, up left block
            const_mat[const_index, alpha] += 1.0

            const_mat[const_index, L + L * L * L + alpha] -= 1.0
            const_mat[const_index, L + ((2 * alpha - p) % L) * L * L +
                      alpha] += self.commut_sign

            const_index += 1

            # diagonal part, down right block
            if (p == 0):
                const_mat[const_index, alpha] += 2.0 * self.commut_sign
            const_mat[const_index, (alpha + p) % L] += 1.0

            const_mat[const_index, L + L * L * L + (alpha + L)] += -1.0

            const_mat[const_index, L + ((2 * alpha + p) % L) * L * L +
                      (alpha + p) % L] += self.commut_sign
            if (p == 0):
                const_vec[const_index] += -1.0
            const_index += 1

        for alpha in range(0, self.L - 1):
            for gamma in range(alpha + 1, self.L):

                #-----------left up block real part------------------------

                [sign_tmp, index_tmp] = get_vec_index(alpha, gamma, "real", L)

                const_mat[const_index, index_tmp + L + L * L * (
                    (alpha + gamma - p) % L)] += self.commut_sign * sign_tmp

                [sign_tmp, index_tmp] = get_vec_index(alpha, gamma, "real",
                                                      2 * L)

                const_mat[const_index, index_tmp + L + L * L * L] += -sign_tmp

                const_index += 1

                # -----------left up block imag part------------------------

                [sign_tmp, index_tmp] = get_vec_index(alpha, gamma, "imag", L)

                const_mat[const_index, index_tmp + L + L * L * (
                    (alpha + gamma - p) % L)] += self.commut_sign * sign_tmp

                [sign_tmp, index_tmp] = get_vec_index(alpha, gamma, "imag",
                                                      2 * L)

                const_mat[const_index, index_tmp + L + L * L * L] += -sign_tmp

                const_index += 1

                #-----------right bottom block real part-----------------

                if (p == 0):
                    const_vec[const_index] += -1.0

                if (p == 0):
                    const_mat[const_index, alpha] += self.commut_sign
                    const_mat[const_index, gamma] += self.commut_sign

                [sign_tmp, index_tmp] = get_vec_index(
                    (alpha + p) % L, (gamma + p) % L, "real", L)

                const_mat[const_index, index_tmp + L + L * L * (
                    (alpha + gamma + p) % L)] += self.commut_sign * sign_tmp

                [sign_tmp, index_tmp] = get_vec_index(alpha + L, gamma + L,
                                                      "real", 2 * L)

                const_mat[const_index, index_tmp + L + L * L * L] += -sign_tmp

                const_index += 1

                # -----------right bottom block imag part-----------------

                [sign_tmp, index_tmp] = get_vec_index(
                    (alpha + p) % L, (gamma + p) % L, "imag", L)

                const_mat[const_index, index_tmp + L + L * L * (
                    (alpha + gamma + p) % L)] += self.commut_sign * sign_tmp

                [sign_tmp, index_tmp] = get_vec_index(alpha + L, gamma + L,
                                                      "imag", 2 * L)

                const_mat[const_index, index_tmp + L + L * L * L] += -sign_tmp

                const_index += 1

        for alpha in range(0, self.L):
            for gamma in range(0, self.L):

                # -----------left bottom block real part-----------------

                if (p == 0):
                    const_mat[const_index, alpha] += 1.0

                if ((alpha - gamma - p) % L == 0):
                    const_mat[const_index, alpha] += self.commut_sign

                [sign_tmp, index_tmp] = get_vec_index(alpha, (gamma + p) % L,
                                                      "real", L)

                const_mat[const_index, index_tmp + L + L * L *
                          ((alpha + gamma) % L)] += sign_tmp

                [sign_tmp, index_tmp] = get_vec_index(alpha, gamma + L, "real",
                                                      2 * L)

                const_mat[const_index, index_tmp + L + L * L * L] += -sign_tmp

                const_index += 1

                # -----------left bottom block imag part-----------------

                [sign_tmp, index_tmp] = get_vec_index(alpha, (gamma + p) % L,
                                                      "imag", L)

                const_mat[const_index, index_tmp + L + L * L *
                          ((alpha + gamma) % L)] += sign_tmp

                [sign_tmp, index_tmp] = get_vec_index(alpha, gamma + L, "imag",
                                                      2 * L)

                const_mat[const_index, index_tmp + L + L * L * L] += -sign_tmp

                const_index += 1

        u, s, vh = svd(const_mat)

        new_const_vec = np.dot(np.transpose((u)), const_vec)

        num_of_const_trunc = 0
        for i in range(0, len(s)):
            if (abs(s[i]) > self.truncation):
                num_of_const_trunc += 1

        #print("num of const two ph at p:",p,num_of_const_trunc)

        const_mat_truncation = np.zeros(
            (num_of_const_trunc, L + L * L * L + 4 * L * L), dtype=np.float32)
        const_vec_truncation = np.zeros(num_of_const_trunc, dtype=np.float32)

        const_index = 0

        for i in range(0, len(s)):
            if (abs(s[i]) <= self.truncation):
                continue
            else:
                const_mat_truncation[const_index, :] += s[i] * vh[i, :]
                const_vec_truncation[const_index] += new_const_vec[i]
                const_index += 1

        return [const_mat_truncation, const_vec_truncation, num_of_const_trunc]
コード例 #38
0
conj = np.conj(x)

l = len(conj)

p = 2
flipped = [0 for h in range(0, l)]

flipped = conj[::-1]

acf = signal.convolve(x, flipped, 'full')

a1 = np.asarray(toeplitz(c=np.asarray(acf), r=np.asarray(
    acf)))  #autocorrelation matrix that will be decomposed into eigenvectors

eigenValues, eigenVectors = LA.svd(a1)

idx = eigenValues.argsort()[::-1]
eigenValues = eigenValues[idx]
eigenVectors = eigenVectors[:, idx]

idx = eigenValues.argsort()[::-1]

eigenValues = eigenValues[
    idx]  # soriting the eigenvectors and eigenvalues from greatest to least eigenvalue
eigenVectors = eigenVectors[:, idx]

signal_eigen = np.array(eigenVectors[0:p])
noise_eigen = np.array(eigenVectors[p:len(eigenVectors)])  # noise subspace
noise_eigenVal = eigenValues[p:len(eigenValues)]
コード例 #39
0
ファイル: test_linalg.py プロジェクト: Kurios/Project32
 def do(self, a, b):
     c = asarray(a)  # a might be a matrix
     s = linalg.svd(c, compute_uv=False)
     old_assert_almost_equal(s[0] / s[-1], linalg.cond(a, 2), decimal=5)
コード例 #40
0
ファイル: test_linalg.py プロジェクト: Kurios/Project32
 def do(self, a, b):
     u, s, vt = linalg.svd(a, 0)
     assert_almost_equal(a, dot(multiply(u, s), vt))
     assert imply(isinstance(a, matrix), isinstance(u, matrix))
     assert imply(isinstance(a, matrix), isinstance(vt, matrix))
コード例 #41
0
def pca(X):
    m = len(X)

    u, s, v = svd(X.T * X / m)

    return u, s
コード例 #42
0
def _multivariate_ols_fit(endog, exog, method='svd', tolerance=1e-8):
    """
    Solve multivariate linear model y = x * params
    where y is dependent variables, x is independent variables

    Parameters
    ----------
    endog : array_like
        each column is a dependent variable
    exog : array_like
        each column is a independent variable
    method : str
        'svd' - Singular value decomposition
        'pinv' - Moore-Penrose pseudoinverse
    tolerance : float, a small positive number
        Tolerance for eigenvalue. Values smaller than tolerance is considered
        zero.
    Returns
    -------
    a tuple of matrices or values necessary for hypotheses testing

    .. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
    Notes
    -----
    Status: experimental and incomplete

    """
    y = endog
    x = exog
    nobs, k_endog = y.shape
    nobs1, k_exog = x.shape
    if nobs != nobs1:
        raise ValueError('x(n=%d) and y(n=%d) should have the same number of '
                         'rows!' % (nobs1, nobs))

    # Calculate the matrices necessary for hypotheses testing
    df_resid = nobs - k_exog
    if method == 'pinv':
        # Regression coefficients matrix
        pinv_x = pinv(x)
        params = pinv_x.dot(y)

        # inverse of x'x
        inv_cov = pinv_x.dot(pinv_x.T)
        if matrix_rank(inv_cov, tol=tolerance) < k_exog:
            raise ValueError('Covariance of x singular!')

        # Sums of squares and cross-products of residuals
        # Y'Y - (X * params)'B * params
        t = x.dot(params)
        sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
        return (params, df_resid, inv_cov, sscpr)
    elif method == 'svd':
        u, s, v = svd(x, 0)
        if (s > tolerance).sum() < len(s):
            raise ValueError('Covariance of x singular!')
        invs = 1. / s

        params = v.T.dot(np.diag(invs)).dot(u.T).dot(y)
        inv_cov = v.T.dot(np.diag(np.power(invs, 2))).dot(v)
        t = np.diag(s).dot(v).dot(params)
        sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
        return (params, df_resid, inv_cov, sscpr)
    else:
        raise ValueError('%s is not a supported method!' % method)
コード例 #43
0
                           0.58368177,
                           -0.3286576,
                           -0.23854156,
                       ], [0.18741533, 0.03066228, -0.94294771],
                       [0.65685862, -0.09220681, -0.60347573],
                       [0.63137604, -0.22978685, -0.27479238],
                       [0.59683195, -0.15111101, -0.40536606],
                       [0.68646128, 0.0046802, -0.68407367],
                       [0.62311759, 0.0101013, -0.75863324]])

    # compute mvee
    A, centroid = mvee(points)
    print(A)

    # point it and some other stuff
    U, D, V = la.svd(A)

    rx, ry, rz = [1 / np.sqrt(d) for d in D]
    u, v = np.mgrid[0:2 * np.pi:20j, -np.pi / 2:np.pi / 2:10j]

    x = rx * np.cos(u) * np.cos(v)
    y = ry * np.sin(u) * np.cos(v)
    z = rz * np.sin(v)

    for idx in range(x.shape[0]):
        for idy in range(y.shape[1]):
            x[idx, idy], y[idx, idy], z[idx, idy] = np.dot(
                np.transpose(V),
                np.array([x[idx, idy], y[idx, idy], z[idx, idy]])) + centroid

    fig = plt.figure()
コード例 #44
0
ファイル: misctools.py プロジェクト: uibcdf/ProDy
def sqrtm(matrix):
    """Returns the square root of a matrix."""
    (U,S,VT)=linalg.svd(matrix)
    D = diag(sqrt(S))
    return dot(dot(U,D),VT)
コード例 #45
0
        for k in range(1, numlevels + 1):
            Hgauge = ncon([vC[k - 1], vC[k - 1]], [[1, 2, -1], [2, 1, -2]])
            Vgauge = ncon([wC[k - 1], wC[k - 1]], [[1, 2, -1], [2, 1, -2]])
            FreeEnergy[k-1] = -1.0*(sum((4**np.int64(np.array(range(k,-1,-1))))*np.log(ATNRnorm[:(k+1)]))+ \
            np.log(ncon([ATNR[k],Hgauge,Vgauge],[[1,3,2,4],[1,2],[3,4]])))

        ##### Change gauge on disentanglers 'u'
        a = int(np.sqrt(chiM))
        gaugeX = np.eye(chiM).reshape(a, a, a,
                                      a).transpose(1, 0, 2,
                                                   3).reshape(chiM, chiM)
        upC = [0 for x in range(numlevels)]
        upC[0] = ncon([uC[0], gaugeX], [[-1, 1, -3, -4], [1, -2]])
        for k in range(1, numlevels):
            uF, sF, vhF = LA.svd(
                ncon([wC[k - 1], wC[k - 1]], [[1, 2, -1], [2, 1, -2]]))
            gaugeX = np.dot(uF, vhF)
            upC[k] = ncon([uC[k], gaugeX], [[-1, 1, -3, -4], [1, -2]])

        for k in range(numlevels):
            sXcg[k+1] = ncon([sXcg[k],upC[k],upC[k],wC[k],wC[k],wC[k],wC[k]],\
            [[3,4,1,2],[1,2,6,9],[3,4,7,10],[5,6,-1],[9,8,-2],[5,7,-3],[10,8,-4]])
            #sXcg[k+1] /= ATNRnorm[k+1]

        dtemp = LA.eigvalsh(sXcg[numlevels].reshape(
            (sXcg[numlevels].shape[0])**2, (sXcg[numlevels].shape[2])**2))
        ExpectX = max(abs(dtemp))
        print("Magnetization, ", ExpectX)

        print("Temperature is", Temp, "and free energy using TNR is",
              FreeEnergy[numlevels - 1] / (beta * (4**numlevels)))
コード例 #46
0
def compression(image, k):
    compress_size, image = svd(image, k)
    plt.title('k=%s' % k)
    plt.imshow(image, cmap='gray')
    print('选择%d个奇异值后的压缩尺寸:%d' % (k, compress_size))
コード例 #47
0
ファイル: Matrices.py プロジェクト: lgh0504/BARRA_risk
def Matrices():
    st = pd.HDFStore('style_Factors.h5')
    style = st.select('style_factor')
    industry = st.select('industry_factor')
    special = st.select('special_factor')
    last_expo = st.select('last_exposure')
    st.close()

    ###因子收益率协方差
    factors = pd.merge(style,
                       industry,
                       left_index=True,
                       right_index=True,
                       how='outer')
    factors = factors.fillna(0)
    factor_names = factors.columns.values.tolist()

    Fn = pd.DataFrame(index=factor_names, columns=factor_names)
    ###standardize, winsorize
    for d in range(len(factor_names)):
        Fn.iloc[d, d] = 1
        factors[factor_names[d]] = (
            factors[factor_names[d]] -
            factors[factor_names[d]].mean()) / factors[factor_names[d]].std()
        s_plus = max(0, min(1, 0.5 / (max(factors[factor_names[d]]) - 3)))
        s_minus = max(0, min(1, 0.5 / (-3 - min(factors[factor_names[d]]))))
        for i in range(len(factors)):
            if factors[factor_names[d]].iloc[i] > 3:
                factors[factor_names[d]].iloc[i] = 3 * (
                    1 - s_plus) + factors[factor_names[d]].iloc[i] * s_plus
            elif factors[factor_names[d]].iloc[i] < -3:
                factors[factor_names[d]].iloc[i] = -3 * (
                    1 - s_minus) + factors[factor_names[d]].iloc[i] * s_minus
        factors = factors.fillna(0)

    ##factors = factors.iloc[0:200]  ##testing first 200
    ##计算各因子之间协方差
    for i in range(len(factor_names)):
        for j in range(i + 1, len(factor_names)):
            k1_values = factors[factor_names[i]].values
            k2_values = factors[factor_names[j]].values

            F_k1_k2 = 0
            ##we are setting N = 10
            for phi in range(-10, 11):
                k1_k2_corr = cal_covar(
                    k1_values, k2_values, 480, 0, phi) / (math.sqrt(
                        cal_covar(k1_values, k1_values, 480, 0, 0) *
                        cal_covar(k2_values, k2_values, 480, phi, phi)))
                covariance = k1_k2_corr * math.sqrt(
                    cal_covar(k1_values, k1_values, 90, 0, 0)) * math.sqrt(
                        cal_covar(k2_values, k2_values, 90, phi, phi))
                F_k1_k2 += (11 - abs(phi)) * covariance
            Fn.loc[factor_names[i], factor_names[j]] = F_k1_k2
            Fn.loc[factor_names[j], factor_names[i]] = F_k1_k2

    ##H = 21, N = 10
    Fh = Fn * 21.0 / 11.0
    Fh = Fh.fillna(0)

    Fh_mat = Fh.as_matrix(columns=None)
    U, D, V = la.svd(Fh_mat)

    for n in range(len(D)):
        if D[n] <= 0:
            D[n] = 0.001
    D_mat = np.eye(len(D))
    for d in range(len(D)):
        D_mat[d][d] = D[d]

    F = np.matmul(np.matmul(U, D_mat), V)

    ##特质风险
    index = special.index.values
    index = special.index.values
    special['sec_ID'] = [x[1] for x in index]
    special['date'] = [x[0] for x in index]
    special = special[special.sec_ID.isin(last_expo.sec_ID)]
    special = special.sort_values('sec_ID')
    sec_IDs = special['sec_ID'].unique()
    special = special.fillna(0)

    ##特质风险计算
    ##gamma, 这里因测试数据不够长无法测试未完成. 导致部分股票历史数据不够长,按照计算方法可能无法准确估计该股票特质风险
    ##此段为计算gamma 并且修正那些gamma<1的股票.因为数据不足而无法测试,如需要请调试
    '''
    special['gamma'] = pd.Series()
    for s in sec_IDs:
        ##计算鲁邦差
        each_special = special[special.sec_ID==s]
        robust_sd = (np.percentile(each_special['special_inc'], 75) - np.percentile(each_special['special_inc'], 25)) / 1.35
        each_special['special_inc'][each_special.special_inc > 10*robust_sd] = 10 * robust_sd
        each_special['special_inc'][each_special.special_inc < -10 * robust_sd] = -10 * robust_sd
    
        normal_sd = each_special.special_inc.std()
        Z = abs((normal_sd - robust_sd) / robust_sd)
        ##计算blending coefficient
        gamma = min(1, max(0, (len(each_special)-60)/120)) * min(1, max(0, math.exp(1-Z)))
        special['gamma'][special.sec_ID==s] = gamma
    special = special.fillna(0)
    
    ###get the parameters from gamma=1 stocks
    last_day = last_expo.iloc[0,6]     ##找到最后一天的回归值,计算gamma<1的股票特质因子值
    last_day_specials = special[special.date==last_day]
    reference_gamma = last_day_specials[last_day_specials.gamma==1]  ##gamma==1 我们用来做借助的数据
    reset_gamma = last_day_specials[last_day_specials.gamma<1]  ##gamma<1 我们要从新计算特质值
    
    temp_mktVal = last_expo.mktVal
    temp_mktVal = [math.sqrt(x) for x in temp_mktVal]     ##weight=市值的平方根
    cutoff = np.percentile(temp_mktVal, 95)     ##去极值处理
    temp_mktVal = [min(x, cutoff) for x in temp_mktVal]
    w = np.array(temp_mktVal)    ##w = weights
    
    temp_y = last_day_specials[last_day_specials.sec_ID.isin(reference_gamma.sec_ID)]
    y = np.array(temp_y.loc[:,'special_inc'])
    
    temp_x = last_expo[last_expo.sec_ID.isin(reference_gamma.sec_ID)]
    ind = temp_x.loc[:, 'sector_code'].drop_duplicates().values
    x_ind = pd.DataFrame(columns=ind, index=temp_x.index)
    for i in ind:
        temp = temp_x[temp_x.sector_code == i]
        x_ind.loc[x_ind.index.isin(temp.index), i] = 1
    x_ind = x_ind.fillna(0)
    x_style = temp_x.iloc[:,0:3]  ##for testing
    ##x_style = temp_x.loc[:,['volatility','liquidity','momentum']]
    x_all = pd.merge(x_style, x_ind, left_index=True, right_index=True)    ###把风格因子和行业因子合并
    X = x_all.as_matrix(columns=None)
    
    wls_model = sm.WLS(y, X, weights=w)
    result = wls_model.fit()
    ##style factors first, then industry factors
    factor_return = result.params    ##回归的系数,用来计算gamma<1的股票特质因子值
    
    ####计算gamma<1的股票的特质因子值
    new_temp_x = last_expo[last_expo.sec_ID.isin(reset_gamma.sec_ID)]
    new_ind = new_temp_x.loc[:, 'sector_code'].drop_duplicates().values
    new_x_ind = pd.DataFrame(columns=ind, index=new_temp_x.index)
    for i in new_ind:
        new_temp = new_temp_x[new_temp_x.sector_code == i]
        new_x_ind.loc[new_x_ind.index.isin(new_temp.index), i] = 1
    new_x_ind = new_x_ind.fillna(0)
    new_x_style = new_temp_x.iloc[:,0:3]  ##for testing
    ##x_style = temp_x.loc[:,['volatility','liquidity','momentum']]
    new_x_all = pd.merge(new_x_style, new_x_ind, left_index=True, right_index=True)    ###把风格因子和行业因子合并
    new_X = new_x_all.as_matrix(columns=None)
    new_special = np.multiply(factor_return,new_X)
    reset_gamma['special_inc'] = new_special
    
    special = pd.merge(reset_gamma,reference_gamma, left_index=True, right_index=True) ##把修正过的特质因子值并到一个表格
    '''

    ###计算特质风险收益协方差
    delta = pd.DataFrame(index=sec_IDs, columns=sec_IDs)
    for d in range(len(sec_IDs)):
        delta.iloc[d, d] = 1

    for i in range(len(sec_IDs)):
        for j in range(i + 1, len(sec_IDs)):
            s1_val = special['special_inc'][special.sec_ID ==
                                            sec_IDs[i]].values
            s2_val = special['special_inc'][special.sec_ID ==
                                            sec_IDs[j]].values

            if len(s1_val) >= 10 and len(s2_val) >= 10:
                D_k1_k2 = 0
                for phi in range(-10, 10):
                    s1_s2_corr = cal_covar(
                        s1_val, s2_val, 480, 0, phi) / (math.sqrt(
                            cal_covar(s1_val, s1_val, 480, 0, 0) *
                            cal_covar(s1_val, s1_val, 480, phi, phi)))
                    covariance = s1_s2_corr * math.sqrt(
                        cal_covar(s1_val, s1_val, 90, 0, 0)) * math.sqrt(
                            cal_covar(s2_val, s2_val, 90, phi, phi))
                    D_k1_k2 += (11 - abs(phi)) * covariance
                delta.loc[sec_IDs[i], sec_IDs[j]] = D_k1_k2
                delta.loc[sec_IDs[j], sec_IDs[i]] = D_k1_k2
    delta = delta.fillna(0)

    ##tranfrom the dataframe to the matrix with all factor exposures
    temp = last_expo[last_expo.sec_ID.isin(sec_IDs)]
    temp = temp.set_index('sec_ID')
    temp['sec_ID'] = temp.index.values
    exposure = pd.DataFrame(index=temp.sec_ID, columns=factor_names)
    for s in style.columns.values:
        exposure[s] = temp[s]
    exist_industries = np.unique(temp['sector_code'].values)
    for ind in exist_industries:
        list = temp['sec_ID'][temp.sector_code == ind]
        exposure[ind][exposure.index.isin(list.values)] = 1

    exposure = exposure.fillna(0)
    X = exposure.values
    Delta = delta.values
    risk = np.matmul(np.matmul(X, F), np.transpose(X)) + Delta  ##股票超额收益率的协方差

    risk_df = pd.DataFrame(risk,
                           index=delta.index,
                           columns=delta.columns.values)

    st1 = pd.HDFStore('Risk.h5')
    st1.put('risk', risk_df)
    st1.close()
コード例 #48
0
def test():
    from numpy import linalg
    linalg.lstsq
    '''
    Test Data:
    K = 5
    votes = [(3,2,1,4), (4,1,2,3), (4, 2, 3, 1), (1, 2, 3, 4)]
    qfx2_utilities = [[(nx, nx, nx**3, k) for k, nx in enumerate(vote)] for vote in votes]
    M, altx2_nx= _utilities2_pairwise_breaking(qfx2_utilities)

    from numpy.linalg import svd, inv
    from numpy import eye, diag, zeros
    #Because s is sorted, and M is rank deficient, the value s[-1] should be 0
    np.set_printoptions(precision=2, suppress=True, linewidth=80)
    #The svd is: 
    #u * s * v = M
    u.dot(diag(s)).dot(v) = M

    #u is unitary: 
    inv(u).dot(u) == eye(len(s))
    
    diag(s).dot(v) == inv(u).dot(M)

    u.dot(diag(s)) == M.dot(inv(v))
    And because s[-1] is 0
    u.dot(diag(s))[:,-1:] == zeros((len(s),1))

    Because we want to find Mx = 0

    So flip the left and right sides
    M.dot(inv(v)[:,-1:]) == u.dot(diag(s))[:,-1:] 

    And you find
    M = M
    x = inv(v)[:,-1:]
    0 = u.dot(diag(s))[:,-1:] 
    
    So we have the solution to our problem as x = inv(v)[:,-1:]

    Furthermore it is true that 
    inv(v)[:,-1:].T == v[-1:,:]
    because v is unitary and the last vector in v corresponds to a singular
    vector because M is rank m-1
    
    ALSO: v.dot(inv(v)) = eye(len(s)) so
    v[-1].dot(inv(v)[:,-1:]) == 1
    
    this means that v[-1] is non-zero, and v[-1].T == inv(v[:,-1:])

    So all of this can be done as...
     '''

    # We could also say
    def eq(M1, M2):
        print(str(M1)+'\n = \n'+str(M2))
    # Compute SVD
    (u, s_, v) = linalg.svd(M)
    s = diag(s_)
    #---
    print('-------')
    print('M =\n%s' % (M,))
    print('-------')
    print('u =\n%s' % (u,))
    print('-------')
    print('s =\n%s' % (s,))
    print('-------')
    print('v =\n%s' % (v,))
    print('-------')
    print('u s v = M')
    eq(u.dot(s).dot(v), M)
    # We want to find Mx = 0
    print('-------')
    print('The last value of s is zeros because M is rank m-1 and s is sorted')
    print('s =\n%s' % (s,))
    print('-------')
    print('Therefore the last column of u.dot(s) is zeros')
    print('v is unitary so v.T = inv(v)')
    print('u s = M v.T')
    eq(u.dot(s), M.dot(v.T))
    print('-------')
    print('We want to find Mx = 0, and the last column of LHS corresponds to this')
    print('u s = M v.T')
    eq(u.dot(s), M.dot(v.T))

    # The right column u.dot(s) is 

    #Ok, so v[-1] can be negative, but that's ok
    # its unitary, we can just negate it. 
    # or we can take the absolute value or l2 normalize it
    # x = v[-1] = inv(v)[:,-1]
    # so
    # x.dot(x) == 1
    # hmmmm
    # I need to find a way to proove 
    # components of x are all negative or all 
    # positive
    # Verify s is 0
    x = v[-1]
コード例 #49
0
ファイル: MOR.py プロジェクト: acubibeovip/Image-Denoising
    def POD(self,
            sqrtR=None,
            cShots=None,
            energy=None,
            K=None,
            pltEig=True,
            filepath=None):
        """
        Function to generate optimal POD basis for a given set of snapshots of
        the solution.
        
        Inputs:
            sqrtR: square root of the FE mass matrix for numerical integration
            cShots: snapshots of the AD-PDE for various realization of MOR arguments
            K: FE coefficient matrix
            energy: energy level kept for selection of basis number
            pltEig: plot the eigenvalues of the covariance matrix
            filepath: path to store and load POD basis functions from
        """
        # Error handling:
        if uf.isnone(cShots) and uf.isnone(filepath):
            raise ValueError(
                'snapshots or a file path must be given to get the POD basis functions from!'
            )
        if not uf.isnone(cShots) and uf.isnone(sqrtR):
            raise ValueError('\'sqrtR\' must be provided for MOR!')

        # Construct the POD basis functions:
        if not uf.isnone(cShots):
            kapa = shape(cShots)[0]  # total number of snapshots
            Cmat = np.matmul(sqrtR, cShots)  # sqrt of covariance matrix
            Cmat = Cmat / np.sqrt(kapa)
            _, S, Vh = la.svd(
                Cmat,
                full_matrices=False)  # compute only kapa left singular vectors
            D = S**2  # eigenvalues of covariance matrix
            PhiTot = np.matmul(cShots, Vh.T)
        else:
            D, PhiTot = np.load(filepath)  # Load data

        # Save the POD basis function array:
        if not uf.isnone(filepath):
            np.save(filepath, (D, PhiTot))

        # Construct the reduced coefficient matrix:
        if not uf.isnone(K):
            KpTot = np.matmul(np.matmul(PhiTot.T, K.todense()), PhiTot)
        else:
            KpTot = None

        # Find the number of basis functions:
        if not uf.isnone(energy):
            sumD = np.cumsum(D)
            ind = sumD / sumD[-1] < energy
            basisNum = sum(ind) + 1

            Phi = PhiTot[:, :basisNum]
            if not uf.isnone(K): Kp = KpTot[:basisNum, :basisNum]
        else:
            basisNum = None
            Kp = None

        # Plot the eigenvalues:
        if pltEig:
            plt.semilogy(D)
            plt.grid(True)
            plt.axvline(basisNum, color='r', linestyle='--')
            plt.xlabel('basis function index')
            plt.ylabel('basis function energy')
            plt.title('eigenvalue spectrum')
            plt.show()

        return Phi, Kp, basisNum, PhiTot, KpTot
コード例 #50
0
def svd_sv(s1, factor=3):
    s_num = str_2_num(s1)
    U, s, Vh = LA.svd(s_num, full_matrices=False)
    vc = U.dot(s**factor)
    return vc
コード例 #51
0
images = np.array(images)
img_vec = images.reshape(images.shape[0], -1)
mean_face = np.mean(images, axis=0)
zerom_img = img_vec - mean_face.reshape(-1)
sd = np.std(zerom_img)
zerom_img /= sd
#display_images(images, 5, 5)
#display_images(mean_face, 1,1)

cov_mat = np.cov(zerom_img)
egval, egvec = np.linalg.eig(cov_mat)
egfaces = np.transpose(np.dot(np.transpose(zerom_img), egvec))
nm = np.linalg.norm(egfaces, axis=1)
nm = nm.reshape(-1, 1)
egfaces /= nm
egfaces, s, v = svd(np.transpose(zerom_img), full_matrices=False)
eg_index = np.argsort(egval)[::-1]
#display_images(egfaces.reshape(images.shape),5,5)

# task 2
components = [2, 5, 15]
eg_faceset = []
prj_face = []
for comp in components:
    top_egfaces = []
    for k in range(comp):
        top_egfaces.append(egfaces[eg_index[k]])

    eg_faceset.append(top_egfaces)
    top_egfaces = np.array(top_egfaces)
    projected_faces = np.dot(zerom_img, np.transpose(top_egfaces))
    def update_singletensor(self, c_i, c_j, c_k):
        # c_i为层数,c_j为张量位置
        path_len = 5 - c_i
        path = [[c_i, c_j, c_k]]
        tem_c_j = c_j
        tem_c_k = c_k
        for i in range(1, path_len):
            # //表示整数除法,返回int型
            tem_c_j = tem_c_j // 2
            tem_c_k = tem_c_k // 2
            path.append([c_i + i, tem_c_j, tem_c_k])
        # 更新contracted,即各层的张量
        for i in range(1, 5):
            if i == c_i:
                # 遍历长宽进行更新
                for j, k in product(range(self.layer_units[i]), range(self.layer_units[i])):
                    if (self.flag_contract[i, j, k] == 0) and ((j != c_j) or (k != c_k)):
                        x =  self.contracted[i - 1][2 * j][2 * k]
                        y = self.contracted[i - 1][2 * j][2 * k]
                        self.contracted[i][j][k] = self.contract_unit(self.tn_layers[i][j][k], self.contracted[i - 1][2 * j][2 * k], self.contracted[(
                            i - 1)][2 * j][2 * k + 1], self.contracted[i - 1][2 * j + 1][2 * k], self.contracted[i - 1][2 * j + 1][2 * k + 1], self.n_train)
                        self.flag_contract[i, j, k] = 1
                        if i < 4:
                            self.flag_contract[i + 1, j // 2, k // 2] = 0
                self.contracted[c_i][c_j][c_k] = self.contract_local(self.contracted[c_i - 1][2 * c_j][2 * c_k], self.contracted[(
                    c_i - 1)][2 * c_j][2 * c_k + 1], self.contracted[c_i - 1][2 * c_j + 1][2 * c_k], self.contracted[c_i - 1][2 * c_j + 1][2 * c_k + 1], self.n_train)
                self.flag_contract[c_i, c_j, c_k] = 0
                if i < 4:
                    self.flag_contract[c_i + 1, c_j // 2, c_k // 2] = 0
            else:
                for j, k in product(range(self.layer_units[i]), range(self.layer_units[i])):
                    if self.flag_contract[i, j, k] == 0:
                        if ([i, j, k] in path) and ((i - 1) == c_i):
                            if (c_j % 2 == 0) and (c_k % 2 == 0):
                                [lab1, lab2, lab3] = ["2", "3", "4"]
                                tensor1 = self.contracted[c_i][c_j][c_k + 1]
                                tensor2 = self.contracted[c_i][c_j + 1][c_k]
                                tensor3 = self.contracted[c_i][c_j + 1][c_k + 1]

                            if (c_j % 2 == 0) and (c_k % 2 == 1):
                                [lab1, lab2, lab3] = ["1", "3", "4"]
                                tensor1 = self.contracted[c_i][c_j][c_k - 1]
                                tensor2 = self.contracted[c_i][c_j + 1][c_k - 1]
                                tensor3 = self.contracted[c_i][c_j + 1][c_k]

                            if (c_j % 2 == 1) and (c_k % 2 == 0):
                                [lab1, lab2, lab3] = ["1", "2", "4"]
                                tensor1 = self.contracted[c_i][c_j - 1][c_k]
                                tensor2 = self.contracted[c_i][c_j - 1][c_k + 1]
                                tensor3 = self.contracted[c_i][c_j][c_k + 1]

                            if (c_j % 2 == 1) and (c_k % 2 == 1):
                                [lab1, lab2, lab3] = ["1", "2", "3"]
                                tensor1 = self.contracted[c_i][c_j - 1][c_k - 1]
                                tensor2 = self.contracted[c_i][c_j - 1][c_k]
                                tensor3 = self.contracted[c_i][c_j][c_k - 1]

                            self.contracted[i][j][k] = self.contract_special(
                                self.tn_layers[i][j][k], tensor1, lab1, tensor2, lab2, tensor3, lab3, self.n_train)
                            self.flag_contract[i, j, k] = 0
                            if i < 4:
                                self.flag_contract[i + 1, j // 2, k // 2] = 0

                        else:
                            # print(i,j,k)
                            self.contracted[i][j][k] = self.contract_unit(self.tn_layers[i][j][k], self.contracted[i - 1][2 * j][2 * k], self.contracted[
                                i - 1][2 * j][2 * k + 1], self.contracted[i - 1][2 * j + 1][2 * k], self.contracted[i - 1][2 * j + 1][2 * k + 1], self.n_train)
                            if ([i, j, k] in path):
                                self.flag_contract[i, j, k] = 0
                            else:
                                self.flag_contract[i, j, k] = 1
                            if i < 4:
                                self.flag_contract[i + 1, j // 2, k // 2] = 0
        # 计算环境张量E
        if c_i != 4:

            bond = self.contracted[c_i][c_j][c_k].shape[0]             
                
            tempD = tn.zeros_tensor([self.bond_inner, self.n_train], labels=['m', 'g'])
            for m, g in product(range(self.bond_inner), range(self.n_train)):
                sum1 = 0
                for f in range(self.bond_label):
                    sum1 = sum1 + self.contracted[4][0][0].data[f, m, g] * self.labels.data[g, f]
                tempD.data[m, g] = sum1

            tensor_environment = tn.contract(self.contracted[c_i][c_j][c_k], tempD, ["down"], ["g"])

        else:
            tensor_environment = tn.contract(
                self.contracted[4][0][0], self.labels, "down", "up")
        # 根据不同层数更新tn_layers
        if c_i == 1:
            matrix = np.reshape(tensor_environment.data, (self.bond_data *
                                                          self.bond_data * self.bond_data * self.bond_data, self.bond_inner))
            u, sigma, vt = la.svd(matrix, 0)
            # 更新T张量
            self.tn_layers[c_i][c_j][c_k].data = np.reshape(
                np.dot(u, vt), (self.bond_data, self.bond_data, self.bond_data, self.bond_data, self.bond_inner))
        else:
            if c_i == 4:
                matrix = np.reshape(tensor_environment.data, (self.bond_inner *
                                                              self.bond_inner * self.bond_inner * self.bond_inner, self.bond_label))
                u, sigma, vt = la.svd(matrix, 0)
                self.tn_layers[c_i][c_j][c_k].data = np.reshape(
                    np.dot(u, vt), (self.bond_inner, self.bond_inner, self.bond_inner, self.bond_inner, self.bond_label))
            else:
                matrix = np.reshape(tensor_environment.data, (self.bond_inner *
                                                              self.bond_inner * self.bond_inner * self.bond_inner, self.bond_inner))
                u, sigma, vt = la.svd(matrix, 0)
                self.tn_layers[c_i][c_j][c_k].data = np.reshape(
                    np.dot(u, vt), (self.bond_inner, self.bond_inner, self.bond_inner, self.bond_inner, self.bond_inner))

        # compute the training accuracy-------------------------------------------
        j = c_j
        k = c_k
        # 从当前层到最后一层更新对应位置张量
        for i in range(c_i, 5):
            self.contracted[i][j][k] = self.contract_unit(self.tn_layers[i][j][k],
                                                          self.contracted[i -
                                                                          1][2 * j][2 * k],
                                                          self.contracted[i -
                                                                          1][2 * j][2 * k + 1],
                                                          self.contracted[i -
                                                                          1][2 * j + 1][2 * k],
                                                          self.contracted[i - 1][2 * j + 1][2 * k + 1], self.n_train)
            j = j // 2
            k = k // 2

        temp = tn.contract(self.contracted[4][0][0], self.labels, "up", "down")
        # 求矩阵的迹
        temp.trace("up", "down")
        acc = temp.data / self.n_train
        return acc
コード例 #53
0
import numpy as np
import numpy.linalg as linalg

A = np.array([[4, 0, -7], [-3, 1, 5]])
U, s, V = linalg.svd(A)

print(A, "\n")
# Left singular matrix
# U column is orthogonal
print("U Matrix:")
print(U, "\n")
# Diagonal Matrix with non zero element on the diagonal
# First element is the largest and the right most element is the smallest
print("s Vector: ")
print(s, "\n")
print("V matrix:")
# Right singular matrix
print(V, "\n")

print(np.dot(U[:, 0], U[:, 1]))
コード例 #54
0
 def preprocess_feature(self):
     if self.features.shape[1] > 200:
         U, S, VT = la.svd(self.features)
         Ud = U[:, 0:200]
         Sd = S[0:200]
         self.features = np.array(Ud) * Sd.reshape(200)
コード例 #55
0
 def GetRepUseSVD(self, probTranMat, alpha):
     U, S, VT = la.svd(probTranMat)
     Ud = U[:, 0:self.dim]
     Sd = S[0:self.dim]
     return np.array(Ud) * np.power(Sd, alpha).reshape((self.dim))
コード例 #56
0
ファイル: irlb.py プロジェクト: airysen/irlbpy
def lanczos(A, nval, tol=0.0001, maxit=50, center=None, scale=None, L=None):
    """Estimate a few of the largest singular values and corresponding singular
    vectors of matrix using the implicitly restarted Lanczos bidiagonalization
    method of Baglama and Reichel, see:

    Augmented Implicitly Restarted Lanczos Bidiagonalization Methods,
    J. Baglama and L. Reichel, SIAM J. Sci. Comput. 2005

    Keyword arguments:
    tol   -- An estimation tolerance. Smaller means more accurate estimates.
    maxit -- Maximum number of Lanczos iterations allowed.

    Given an input matrix A of dimension j * k, and an input desired number
    of singular values n, the function returns a tuple X with five entries:

    X[0] A j * nu matrix of estimated left singular vectors.
    X[1] A vector of length nu of estimated singular values.
    X[2] A k * nu matrix of estimated right singular vectors.
    X[3] The number of Lanczos iterations run.
    X[4] The number of matrix-vector products run.

    The algorithm estimates the truncated singular value decomposition:
    A.dot(X[2]) = X[0]*X[1].
    """
    mmult = None
    m = None
    n = None
    if A.ndim == 2:
        mmult = multA
        m = A.shape[0]
        n = A.shape[1]
        if (min(m, n) < 2):
            raise MatrixShapeException(
                "The input matrix must be at least 2x2.")

    elif A.ndim == 1:
        mmult = multS
        A = np.pad(A, (0, A.shape[0] % 2), mode='edge')
        N = A.shape[0]
        if L is None:
            L = N // 2
        K = N - L + 1
        m = L
        n = K
        A = prepare_s(A, L)
    elif A.ndim > 2:
        raise MatrixShapeException("The input matrix must be 2D array")
    nu = nval

    m_b = min((nu + 20, 3 * nu, n))  # Working dimension size
    mprod = 0
    it = 0
    j = 0
    k = nu
    smax = 1
    # sparse = sparse.issparse(A)

    V = np.zeros((n, m_b))
    W = np.zeros((m, m_b))
    F = np.zeros((n, 1))
    B = np.zeros((m_b, m_b))

    V[:, 0] = np.random.randn(n)  # Initial vector
    V[:, 0] = V[:, 0] / np.linalg.norm(V)

    while it < maxit:
        if (it > 0):
            j = k

        VJ = V[:, j]

        # apply scaling
        if scale is not None:
            VJ = VJ / scale

        W[:, j] = mmult(A, VJ, L=L)
        mprod = mprod + 1

        # apply centering
        # R code: W[, j_w] <- W[, j_w] - ds * drop(cross(dv, VJ)) * du
        if center is not None:
            W[:, j] = W[:, j] - np.dot(center, VJ)

        if (it > 0):
            # NB W[:,0:j] selects columns 0,1,...,j-1
            W[:, j] = orthog(W[:, j], W[:, 0:j])
        s = np.linalg.norm(W[:, j])
        sinv = invcheck(s)
        W[:, j] = sinv * W[:, j]

        # Lanczos process
        while (j < m_b):
            F = mmult(A, W[:, j], TP=True, L=L)
            mprod = mprod + 1

            # apply scaling
            if scale is not None:
                F = F / scale

            F = F - s * V[:, j]
            F = orthog(F, V[:, 0:j + 1])
            fn = np.linalg.norm(F)
            fninv = invcheck(fn)
            F = fninv * F
            if (j < m_b - 1):
                V[:, j + 1] = F
                B[j, j] = s
                B[j, j + 1] = fn
                VJp1 = V[:, j + 1]

                # apply scaling
                if scale is not None:
                    VJp1 = VJp1 / scale

                W[:, j + 1] = mmult(A, VJp1, L=L)
                mprod = mprod + 1

                # apply centering
                # R code: W[, jp1_w] <- W[, jp1_w] - ds * drop(cross(dv, VJP1))
                # * du
                if center is not None:
                    W[:, j + 1] = W[:, j + 1] - np.dot(center, VJp1)

                # One step of classical Gram-Schmidt...
                W[:, j + 1] = W[:, j + 1] - fn * W[:, j]
                # ...with full reorthogonalization
                W[:, j + 1] = orthog(W[:, j + 1], W[:, 0:(j + 1)])
                s = np.linalg.norm(W[:, j + 1])
                sinv = invcheck(s)
                W[:, j + 1] = sinv * W[:, j + 1]
            else:
                B[j, j] = s
            j = j + 1
        # End of Lanczos process
        S = nla.svd(B)
        R = fn * S[0][m_b - 1, :]  # Residuals
        if it == 0:
            smax = S[1][0]  # Largest Ritz value
        else:
            smax = max((S[1][0], smax))

        conv = sum(np.abs(R[0:nu]) < tol * smax)
        if (conv < nu):  # Not coverged yet
            k = max(conv + nu, k)
            k = min(k, m_b - 3)
        else:
            break
        # Update the Ritz vectors
        V[:, 0:k] = V[:, 0:m_b].dot(S[2].transpose()[:, 0:k])
        V[:, k] = F
        B = np.zeros((m_b, m_b))
        # Improve this! There must be better way to assign diagonal...
        for l in range(k):
            B[l, l] = S[1][l]
        B[0:k, k] = R[0:k]
        # Update the left approximate singular vectors
        W[:, 0:k] = W[:, 0:m_b].dot(S[0][:, 0:k])
        it = it + 1

    U = W[:, 0:m_b].dot(S[0][:, 0:nu])
    V = V[:, 0:m_b].dot(S[2].transpose()[:, 0:nu])
    # return((U, S[1][0:nu], V, it, mprod))

    return LanczosResult(**{
        'U': U,
        's': S[1][0:nu],
        'V': V,
        'steps': it,
        'nmult': mprod
    })
コード例 #57
0
ファイル: util_wct.py プロジェクト: czczup/URST
    def whiten_and_color_np(self, cF, sF):
        # print("*" * 30 + " whiten_and_color begin")

        # ---------------------------------
        # svd for content feature
        cF = cF.data.cpu().numpy()
        cFSize = cF.shape
        c_mean = np.repeat(np.mean(cF, 1), cFSize[1], axis=0).reshape(cFSize)
        cF = cF - c_mean
        contentConv = np.divide(np.matmul(cF, np.transpose(cF)),
                                cFSize[1] - 1) + np.eye(cFSize[0])
        # print("-" * 5 + " contentConv np")
        # print(np.abs(contentConv).sum()) # checked, same

        c_u, c_e, c_v = linalg.svd(contentConv)
        c_v = np.transpose(c_v)
        # print("-" * 5 + " content svd np")
        # print(c_u.shape, c_e.shape, c_v.shape)
        # print(np.abs(c_u).sum())
        # print(np.abs(c_e).sum())
        # print(np.abs(c_v).sum())

        k_c = cFSize[0]
        for i in range(cFSize[0]):
            if c_e[i] < EigenValueThre:
                k_c = i
                break
        # print("k_c = %s\n" % k_c)

        # ---------------------------------
        # svd for style feature
        sF = sF.data.cpu().numpy()
        sFSize = sF.shape
        s_mean = np.mean(sF, 1)
        sF = sF - np.repeat(s_mean, sFSize[1], axis=0).reshape(sFSize)
        styleConv = np.divide(np.matmul(sF, np.transpose(sF)), sFSize[1] - 1)
        # print("-" * 5 + " styleConv np")
        # print(np.abs(styleConv).sum()) # checked, same

        s_u, s_e, s_v = linalg.svd(styleConv)
        s_v = np.transpose(s_v)
        # print("-" * 5 + " style svd np")
        # print(s_u.shape, s_e.shape, s_v.shape)
        # print(np.abs(s_u).sum())
        # print(np.abs(s_e).sum())
        # print(np.abs(s_v).sum())

        k_s = sFSize[0]
        for i in range(sFSize[0]):
            if s_e[i] < EigenValueThre:
                k_s = i
                break
        # print("k_s = %s\n" % k_s)

        c_d = pow(c_e[0:k_c], -0.5)
        step1 = np.matmul(c_v[:, 0:k_c], np.diag(c_d))
        step2 = np.matmul(step1, (np.transpose(c_v[:, 0:k_c])))
        whiten_cF = np.matmul(step2, cF)
        # print("*" * 30 + " whiten_cF np")
        # print(np.abs(whiten_cF).sum()) # checked, same

        s_d = pow(s_e[0:k_s], 0.5)
        targetFeature = np.matmul(
            np.matmul(np.matmul(s_v[:, 0:k_s], np.diag(s_d)),
                      np.transpose(s_v[:, 0:k_s])), whiten_cF)
        targetFeature = targetFeature + np.repeat(s_mean, cFSize[1],
                                                  axis=0).reshape(cFSize)
        # print("-" * 5 + " targetFeature np")
        # print(np.abs(targetFeature).sum()) # checked, different

        # print("*" * 30 + " whiten_and_color done\n")
        return torch.from_numpy(targetFeature)
コード例 #58
0
ファイル: ls-cca.py プロジェクト: sonaldangi12/DataScience
print("rxx xy")
print(r11)
print(r12)

# rmat = np.array([[m1.transpose()*m1, m1.transpose()*m2], [m2.transpose()*m1, m2.transpose()*m2]])

# dmat = np.array([[m1.transpose()*m1, np.zeros((rows, n_components), dtype=int)], [np.zeros((rows, n_components), dtype=int), m2.transpose()*m2]])

# tmat = smath.sqrt(r11*(-1)) * r12
# tmat = tmat * smath.sqrt(r22*(-1))

tmat = np.dot(r11**(-0.5), r12)
tmat = np.dot(tmat, r22**(-0.5))

u, s, vh = la.svd(tmat)

# canonical matrices

wx = np.dot(r11**(-0.5), u)
wy = np.dot(r22**(-0.5), vh.transpose())

print("wx, wy")
print(wx)
print(wy)

# canonical variables

zx = np.dot(wx.transpose(), m1)
zy = np.dot(wy.transpose(), m2)
コード例 #59
0
muCpp = distribution1D.vectord_cxx(len(mu))
for i in range(len(mu)):
    muCpp[i] = mu[i]
covCpp = distribution1D.vectord_cxx(len(cov))
for i in range(len(cov)):
    covCpp[i] = cov[i]

# call the functions from the crow to compute the svd
covType = "abs"
rank = 4
mvnDistribution = distribution1D.BasicMultivariateNormal(
    covCpp, muCpp, str(covType), rank)

# using numpy to compute the svd
COVn = np.asarray(cov).reshape(-1, sqrt(len(cov)))
Un, Sn, Vn = LA.svd(COVn, full_matrices=False)
uNp = Un[:, :rank]
sNp = Sn[:rank]
coordinateInOriginalSpace = np.dot(
    uNp, np.dot(np.diag(np.sqrt(sNp)), coordinateInTransformedSpace))

#compute the gold solution:
mu = np.asarray(mu)
coordinateInOriginalSpace += mu

#call crow to compute the coordinate
coordinate = mvnDistribution.coordinateInTransformedSpace(rank)
Xcoordinate = mvnDistribution.coordinateInverseTransformed(coordinate)

coordinate = [coordinate[i] for i in range(4)]
coordinate = np.asarray(coordinate)
コード例 #60
0
def Rotation_translation(K, E, point_X1, point_X2):
    C = np.zeros((3, 1, 4))
    R = np.zeros((3, 3, 4))
    U, S, Vt = LA.svd(E)
    W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
    #print(U,'U')
    C[:, :, 0] = U[:, 2]
    C[:, :, 1] = -U[:, 2]
    C[:, :, 2] = U[:, 2]
    C[:, :, 3] = -U[:, 2]
    R[:, :, 0] = R[:, :, 1] = np.dot(np.dot(U, W), Vt)
    R[:, :, 2] = R[:, :, 3] = np.dot(np.dot(U, W.transpose()), Vt)
    for i in range(4):
        if LA.det(R[:, :, i]) < 0:
            R[:, :, i] = -R[:, :, i]
            C[:, :, i] = -C[:, :, 3]
            print(LA.det(R[:, :, i]))
    best_C = np.mat(np.zeros((3, 1)))
    best_R = np.mat(np.eye(3))
    best = 0
    point_X1 = np.hstack((point_X1, np.ones((len(point_X1), 1))))
    point_X2 = np.hstack((point_X2, np.ones((len(point_X2), 1))))
    print(point_X1)
    X1_3D = np.ones((4, 8, 4))
    X2_3D = np.ones((4, 8, 4))
    for i in range(4):
        sum = 0
        C_current = np.mat(C[:, :, i])
        R_current = np.mat(R[:, :, i])
        R_mat = np.mat(R[2, :, i])
        P = np.dot(K, np.eye(3, 4))
        P_2 = np.hstack((R_current, C_current))
        P_2 = np.dot(K, P_2)
        #base=np.mat([0,0,0,1])
        #P_2=np.vstack((P_2,base))
        X1 = point_X1
        X2 = point_X2
        for k in range(len(point_X1)):
            #A=[X1[0,k]*P[2,:]-P[0,:]]
            #two=X1[1,k]*P[2,:]-P[1,:]
            #three=X2[0,k]*P_2[2,:]-P_2[0,:]
            #four=X2[1,k]*P_2[2,:]-P_2[1,:]
            #A=np.vstack((A,two))
            #A=np.vstack((A,three))
            #A=np.vstack((A,four))
            A1 = np.dot(vector_to_skew(X1[k, :]), P)
            A2 = np.dot(vector_to_skew(X2[k, :]), P)
            A = np.vstack((A1, A2))
            u, s, vt = LA.svd(A)
            v = vt.transpose()
            X = v[:, 3]
            X = X / X[3]
            for h in range(4):
                X1_3D[h, k, i] = X[h, 0]

            point_1 = np.mat(X1_3D[0:3, k, i])
            point_1 = point_1.reshape(3, 1)
            if (np.dot(R_mat, (point_1 - C_current))) > 0:
                sum += 1
        if best < sum:
            best = sum
            best_C = C_current
            best_R = R_current
        print(sum, 'sum')
    #print(best_C,'best C')
    #print(best_R,'best R')
    #print(LA.det(best_R))
    return best_C, best_R