Пример #1
0
    def worker():
        x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
        w, v = eigs(x, k=3, v0=v0)
        results.append(w)

        w, v = eigsh(x, k=3, v0=v0)
        results.append(w)
Пример #2
0
        def _sparse_left_svd():
            # for some reasons arpack does not allow computation of rank(A) eigenvectors (??)
            AA = self.data.transpose()*self.data

            if self.data.shape[1] > 1:
                # do not compute full rank if desired
                if self._k > 0 and self._k < self.data.shape[1]-1:
                    k = self._k
                else:
                    k = self.data.shape[1]-1
                try:
                    values, v_vectors = linalg.eigen_symmetric(AA,k=k)
                except AttributeError:
                    values, v_vectors = linalg.eigsh(AA,k=k)
            else:
                values, v_vectors = eigh(AA.todense())
            # get rid of too low eigenvalues
            v_vectors = v_vectors[:, values > self._EPS]
            values = values[values > self._EPS]

            # sort eigenvectors according to largest value
            idx = np.argsort(values)
            values = values[idx[::-1]]

            # argsort sorts in ascending order -> access is backwards
            self.V = scipy.sparse.csc_matrix(v_vectors[:,idx[::-1]])

            # compute S
            self.S = scipy.sparse.csc_matrix(np.diag(np.sqrt(values)))

            # and the inverse of it
            S_inv = scipy.sparse.csc_matrix(np.diag(1.0/np.sqrt(values)))

            self.U = self.data * self.V * S_inv
            self.V = self.V.transpose()
Пример #3
0
def ED(n):
	#n: number of sites
	J = 1 #coupling for sum_{<ij>}s^z_i s^z_j
	h = 1 #coupling for sum_{i}s^x_i
	row = np.array(range(2**n))

	l = [bin(x)[2:].rjust(n, '0') for x in range(2**n)]
	b = np.array([np.array(map(int, i)) for i in l])
	d = np.array([np.array(map(int, i)) for i in l])
	onlyone1 = []
	for i in range(n):
		onlyone1.append(b[2**i])
	onlyone1 = np.asarray(onlyone1)
	###########################################################
	'''Sort Tags'''
	T = []
	for i in range(2**n):
		 T.append(calculateTag(b[i]))

	Tsorted = np.asarray(qsort(T))

	###########################################################
	data = [-J*(n-1.)]
	rowcol = [0]
	abc = np.zeros((n-1), dtype=np.double)

	off_row = []
	off_col = []
	off_data = []


	for i in range(2**n):
		'''Diagonal'''
		for j in reversed(range(n-1)):
			if b[i,j]==b[i,j+1]:
				abc[j] = binary_search(Tsorted,calculateTag(b[i]))
			else:
				abc[j] = -binary_search(Tsorted,calculateTag(b[i]))
		if np.sum(abc)!=0:
			rowcol.append(i)
			data.append(-J * (np.sum(abc))/(abs(abc[0])))
		'''Off Diagonal'''		
		for j in range(n):
			off_col.append(binary_search(Tsorted,calculateTag(np.bitwise_xor(d[i],onlyone1[j]))))
			off_row.append(i)
			off_data.append(-h)

	Diagonal = sparse.csr_matrix((data,(rowcol,rowcol)), dtype=np.double).toarray()
	Off_Diagonal = sparse.csr_matrix((off_data, (off_row,off_col)), dtype=np.double).toarray()
	##########################################################
	'''Diagonalize Full Hamiltonian'''

	Ham = Diagonal + Off_Diagonal
	print Ham
	vals, vecs = arp.eigsh(Ham, k=1, which='SA')
	print vals
Пример #4
0
def arpack_eigsh(A, **kwargs):
    """
    Scipy 0.9 renamed eigen_symmetric to eigsh in
    scipy.sparse.linalg.eigen.arpack
    """
    from scipy.sparse.linalg.eigen import arpack
    if hasattr(arpack, 'eigsh'):
        return arpack.eigsh(A, **kwargs)
    else:
        return arpack.eigen_symmetric(A, **kwargs)
Пример #5
0
def compute_rank_approx(sz, routes):
    A, b, N, block_sizes, x_true = util.load_data(str(sz)+"/experiment2_waypoints_matrices_routes_"+str(routes)+".mat")
    def matvec_XH_X(x):                                                                 
        return A.dot(A.T.dot(x))
    XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype, shape=(A.shape[0], A.shape[0]))
    eigvals, eigvec = eigsh(XH_X, k=500, tol=10**-5)
    eigvals = eigvals[::-1]
    for i, val in enumerate(eigvals):
        if val < 10**-6:
            return (N.shape[1], i)
Пример #6
0
def arpack_eigsh(A, **kwargs):
    """Compat function for sparse symmetric eigen vectors decomposition

    Scipy 0.9 renamed eigen_symmetric to eigsh in
    scipy.sparse.linalg.eigen.arpack
    """
    from scipy.sparse.linalg.eigen import arpack
    if hasattr(arpack, 'eigsh'):
        return arpack.eigsh(A, **kwargs)
    else:
        return arpack.eigen_symmetric(A, **kwargs)
Пример #7
0
def test_symmetric_no_convergence():
    np.random.seed(1234)
    m = generate_matrix(30, hermitian=True, pos_definite=True)
    tol, rtol, atol = _get_test_tolerance('d')
    try:
        w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
        raise AssertionError("Spurious no-error exit")
    except ArpackNoConvergence as err:
        k = len(err.eigenvalues)
        if k <= 0:
            raise AssertionError("Spurious no-eigenvalues-found case")
        w, v = err.eigenvalues, err.eigenvectors
        assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
Пример #8
0
 def eval_evec(self,d,typ,k,which,v0=None):
     a=d['mat'].astype(typ)
     if v0 == None:
         v0 = d['v0']
     exact_eval=self.get_exact_eval(d,typ,k,which)
     eval,evec=eigsh(a,k,which=which,v0=v0)
     # check eigenvalues
     assert_array_almost_equal(eval,exact_eval,decimal=_ndigits[typ])
     # check eigenvectors A*evec=eval*evec
     for i in range(k):
         assert_array_almost_equal(dot(a,evec[:,i]),
                                   eval[i]*evec[:,i],
                                   decimal=_ndigits[typ])
Пример #9
0
def test_eigsh_for_k_greater():
    # Test eigsh() for k beyond limits.
    A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4))  # sparse
    A = generate_matrix(4, sparse=False)
    M_dense = generate_matrix_symmetric(4, pos_definite=True)
    M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
    M_linop = aslinearoperator(M_dense)
    eig_tuple1 = eigh(A, b=M_dense)
    eig_tuple2 = eigh(A, b=M_sparse)

    with suppress_warnings() as sup:
        sup.filter(RuntimeWarning)

        assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
        assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
        assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)

        # M as LinearOperator
        assert_raises(TypeError, eigsh, A, M=M_linop, k=4)

        # Test 'A' for different types
        assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
        assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
 def pca(self, data):
     
     # get dimensions
     num_data,dim = data.shape
     
     if dim>num_data:
         K = np.dot(data,data.T)
         eigen_values,eigen_vectors  = eigsh(K,k = np.linalg.matrix_rank(K)-1,which = 'LA')
         U = np.dot(data.T,eigen_vectors/np.sqrt(eigen_values))
         eigen_values, eigen_vectors = eigen_values[::-1]/(len(data)-1),U[:,::-1]      
     else:
         U,eigen_values,eigen_vectors = np.linalg.svd(data,full_matrices=False)
         eigen_vectors=eigen_vectors.T
             
     return eigen_vectors, eigen_values.cumsum(axis=0)/eigen_values.sum()
Пример #11
0
 def test_no_convergence(self):
     np.random.seed(1234)
     m = np.random.rand(30, 30)
     m = m + m.T
     try:
         w, v = eigsh(m, 4, which='LM', v0=m[:,0], maxiter=5)
         raise AssertionError("Spurious no-error exit")
     except ArpackNoConvergence, err:
         k = len(err.eigenvalues)
         if k <= 0:
             raise AssertionError("Spurious no-eigenvalues-found case")
         w, v = err.eigenvalues, err.eigenvectors
         for ww, vv in zip(w, v.T):
             assert_array_almost_equal(dot(m, vv), ww*vv,
                                       decimal=_ndigits['d'])
Пример #12
0
def ncut( W, nbEigenValues ):
	# parameters
	offset=.5
	maxiterations=100
	eigsErrorTolerence=1e-6
	truncMin=1e-6
	eps=2.2204e-16

        m=shape(W)[1]

        # make sure that W is symmetric, this is a computationally expensive operation, only use for debugging
        #if (W-W.transpose()).sum() != 0:
	#	print "W should be symmetric!"
	#	exit(0)

	# degrees and regularization
 	# S Yu Understanding Popout through Repulsion CVPR 2001
	# Allows negative values as well as improves invertability
	# of d for small numbers
	# i bet that this is what improves the stability of the eigen
	d=abs(W).sum(0)
	dr=0.5*(d-W.sum(0))
	d=d+offset*2
	dr=dr+offset

	# calculation of the normalized LaPlacian
	W=W+spdiags(dr,[0],m,m,"csc")
	Dinvsqrt=spdiags((1.0/sqrt(d+eps)),[0],m,m,"csc")
	P=Dinvsqrt*(W*Dinvsqrt);
	
	# perform the eigen decomposition
	eigen_val,eigen_vec=eigsh(P,nbEigenValues,maxiter=maxiterations,tol=eigsErrorTolerence,which='LA')
	
	# sort the eigen_vals so that the first
	# is the largest
	i=argsort(-eigen_val)
	eigen_val=eigen_val[i]
	eigen_vec=eigen_vec[:,i]

	# normalize the returned eigenvectors
	eigen_vec=Dinvsqrt*matrix(eigen_vec)
	norm_ones=norm(ones((m,1)))
	for i in range(0,shape(eigen_vec)[1]):
		eigen_vec[:,i]=(eigen_vec[:,i] / norm(eigen_vec[:,i]))*norm_ones
		if eigen_vec[0,i] != 0:
			eigen_vec[:,i] = -1 * eigen_vec[:,i] * sign( eigen_vec[0,i] )

	return(eigen_val, eigen_vec)
Пример #13
0
 def update_bond(self, i):
     j = i + 1
     # get effective Hamiltonian
     Heff = HEffective(self.LPs[i], self.RPs[j], self.H_mpo[i], self.H_mpo[j])
     # Diagonalize Heff, find ground state `theta`
     theta0 = np.reshape(self.psi.get_theta2(i), [Heff.shape[0]])  # initial guess
     e, v = arp.eigsh(Heff, k=1, which='SA', return_eigenvectors=True, v0=theta0)
     theta = np.reshape(v[:, 0], Heff.theta_shape)
     # split and truncate
     Ai, Sj, Bj = split_truncate_theta(theta, self.chi_max, self.eps)
     # put back into MPS
     Gi = np.tensordot(np.diag(self.psi.Ss[i]**(-1)), Ai, axes=[1, 0])  # vL [vL*], [vL] i vC
     self.psi.Bs[i] = np.tensordot(Gi, np.diag(Sj), axes=[2, 0])  # vL i [vC], [vC*] vC
     self.psi.Ss[j] = Sj  # vC
     self.psi.Bs[j] = Bj  # vC j vR
     self.update_LP(i)
     self.update_RP(j)
Пример #14
0
def kpca(data, k):
    """
        Performs the eigen decomposition of the kernel matrix.
        
        arguments:
        * data: 2D numpy array representing the symmetric kernel matrix.
        * k: number of principal components to keep.
        
        return:
        * w: the eigen values of the covariance matrix sorted in from 
              highest to lowest.
        * u: the corresponding eigen vectors. u[:,i] is the vector
             corresponding to w[i]
             
        Notes: If you want to perform the full decomposition, consider 
               using 'full_kpca' instead.
    """
    w, u = eigsh(data, k=k, which='LA')
    return w[::-1], u[:, ::-1]
Пример #15
0
     def _sparse_left_svd():        
         # for some reasons arpack does not allow computation of rank(A) eigenvectors (??)
         AA = self.data.transpose()*self.data
 
         if self.data.shape[1] > 1:                
             # do not compute full rank if desired
             if self._k > 0 and self._k < AA.shape[1]-1:
                 k = self._k
             else:
                 k = self.data.shape[1]-1
             
             if scipy.version.version == '0.9.0':                    
                 values, v_vectors = linalg.eigsh(AA,k=k)                    
             else:
                 values, v_vectors = linalg.eigen_symmetric(AA,k=k)
                                 
         else:                
             values, v_vectors = eigh(AA.todense())    
        
         
         # get rid of negative/too low eigenvalues   
         s = np.where(values > self._EPS)[0]
         v_vectors = v_vectors[:, s] 
         values = values[s]
         
         # sort eigenvectors according to largest value
         idx = np.argsort(values)[::-1]                  
         values = values[idx]
         
         # argsort sorts in ascending order -> access is backwards            
         self.V = scipy.sparse.csc_matrix(v_vectors[:,idx])      
         
         # compute S
         tmp_val = np.sqrt(values)            
         l = len(idx)      
         self.S = scipy.sparse.spdiags(tmp_val, 0, l, l,format='csc') 
         
         # and the inverse of it                                         
         S_inv = scipy.sparse.spdiags(1.0/tmp_val, 0, l, l,format='csc')
         
         self.U = self.data * self.V * S_inv        
         self.V = self.V.transpose()           
Пример #16
0
def pca(data, k):
    """
        Performs the eigen decomposition of the covariance matrix.
        
        arguments:
        * data: 2D numpy array where each row is a sample and
                each column a feature.
        * k: number of principal components to keep.
        
        return:
        * w: the eigen values of the covariance matrix sorted in from 
              highest to lowest.
        * u: the corresponding eigen vectors. u[:,i] is the vector
             corresponding to w[i]
             
        Notes: If the number of samples is much smaller than the number
               of features, you should consider the use of 'svd_pca'.
    """
    cov = np.cov(data.T)
    w, u = eigsh(cov, k=k, which='LA')
    return w[::-1], u[:, ::-1]
Пример #17
0
def chebyshev_polynomials(adj, k):
    """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
    print("Calculating Chebyshev polynomials up to order {}...".format(k))

    adj_normalized = normalize_adj(adj)
    laplacian = sp.eye(adj.shape[0]) - adj_normalized
    largest_eigval, _ = eigsh(laplacian, 1, which='LM')
    scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])

    t_k = list()
    t_k.append(sp.eye(adj.shape[0]))
    t_k.append(scaled_laplacian)

    def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
        s_lap = sp.csr_matrix(scaled_lap, copy=True)
        return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two

    for i in range(2, k+1):
        t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))

    return sparse_to_tuple(t_k)
Пример #18
0
        def _sparse_right_svd():
            ## for some reasons arpack does not allow computation of rank(A) eigenvectors (??)    #
            AA = self.data * self.data.transpose()

            if self.data.shape[0] > 1:
                # only compute a few eigenvectors ...
                if self._k > 0 and self._k < self.data.shape[0] - 1:
                    k = self._k
                else:
                    k = self.data.shape[0] - 1
                if scipy.version.version == "0.9.0":
                    values, u_vectors = linalg.eigsh(AA, k=k)
                else:
                    values, u_vectors = linalg.eigen_symmetric(AA, k=k)
            else:
                values, u_vectors = eigh(AA.todense())

            # get rid of negative/too low eigenvalues
            s = np.where(values > self._EPS)[0]
            u_vectors = u_vectors[:, s]
            values = values[s]

            # sort eigenvectors according to largest value
            # argsort sorts in ascending order -> access is backwards
            idx = np.argsort(values)[::-1]
            values = values[idx]

            self.U = scipy.sparse.csc_matrix(u_vectors[:, idx])

            # compute S
            tmp_val = np.sqrt(values)
            l = len(idx)
            self.S = scipy.sparse.spdiags(tmp_val, 0, l, l, format="csc")

            # and the inverse of it
            S_inv = scipy.sparse.spdiags(1.0 / tmp_val, 0, l, l, format="csc")

            # compute V from it
            self.V = self.U.transpose() * self.data
            self.V = S_inv * self.V
Пример #19
0
def extern_pca(data, k):
    """
        Performs the eigen decomposition of the covariance matrix based
        on the eigen decomposition of the exterior product matrix.
        
        
        arguments:
        * data: 2D numpy array where each row is a sample and
                each column a feature.
        * k: number of principal components to keep.
        
        return:
        * w: the eigen values of the covariance matrix sorted in from 
              highest to lowest.
        * u: the corresponding eigen vectors. u[:,i] is the vector
             corresponding to w[i]
             
        Notes: This function computes PCA, based on the exterior product
               matrix (C = X*X.T/(n-1)) instead of the covariance matrix
               (C = X.T*X) and uses relations based of the singular
               value decomposition to compute the corresponding the
               final eigen vectors. While this can be much faster when 
               the number of samples is much smaller than the number
               of features, it can lead to loss of precisions.
               
               The (centered) data matrix X can be decomposed as:
                    X.T = U * S * v.T
               On computes the eigen decomposition of :
                    X * X.T = v*S^2*v.T
               and the eigen vectors of the covariance matrix are
               computed as :
                    U = X.T * v * S^(-1)
    """
    data_m = data - data.mean(0)
    K = np.dot(data_m, data_m.T)
    w, v = eigsh(K, k=k, which='LA')
    U = np.dot(data.T, v / np.sqrt(w))
    return w[::-1] / (len(data) - 1), U[:, ::-1]
Пример #20
0
 def _sparse_right_svd():
     ## for some reasons arpack does not allow computation of rank(A) eigenvectors (??)    #
     AA = self.data*self.data.transpose()
     
     if self.data.shape[0] > 1:                    
         # only compute a few eigenvectors ...
         if self._k > 0 and self._k < self.data.shape[0]-1:
             k = self._k
         else:
             k = self.data.shape[0]-1
         values, u_vectors = linalg.eigsh(AA,k=k)
     else:                
         values, u_vectors = eigh(AA.todense())
     
     # get rid of negative/too low eigenvalues   
     s = np.where(values > self._EPS)[0]
     u_vectors = u_vectors[:, s] 
     values = values[s]
     
     # sort eigenvectors according to largest value
     # argsort sorts in ascending order -> access is backwards
     idx = np.argsort(values)[::-1]
     values = values[idx]                        
     
     self.U = scipy.sparse.csc_matrix(u_vectors[:,idx])
             
     # compute S
     tmp_val = np.sqrt(values)            
     l = len(idx)
     self.S = scipy.sparse.spdiags(tmp_val, 0, l, l,format='csc') 
     
     # and the inverse of it            
     S_inv = scipy.sparse.spdiags(1.0/tmp_val, 0, l, l,format='csc')
     
     # compute V from it
     self.V = self.U.transpose() * self.data
     self.V = S_inv * self.V
Пример #21
0
def chebyshev_polynomials(adj, k):
    """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
    print("Calculating Chebyshev polynomials up to order {}...".format(k))

    adj_normalized = normalize_adj(adj)
    laplacian = sp.eye(adj.shape[0]) - adj_normalized
    largest_eigval, _ = eigsh(laplacian, 1, which='LM')
    #https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.eigsh.html
    # sp.eye(adj.shape[0]) 单位矩阵
    scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(
        adj.shape[0])

    t_k = list()
    t_k.append(sp.eye(adj.shape[0]))
    t_k.append(scaled_laplacian)

    def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
        s_lap = sp.csr_matrix(scaled_lap, copy=True)
        return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two

    for i in range(2, k + 1):
        t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))

    return sparse_to_tuple(t_k)
Пример #22
0
def chebyshev_polynomials(adj, k):
    """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).
    Detail: scaled laplacian = 2 / \lambda_1 * (I - D^(-1/2)AD^(-1/2)) - I"""
    print("Calculating Chebyshev polynomials up to order {}...".format(k))

    adj_normalized = normalize_adj(adj)
    laplacian = sp.eye(adj.shape[0]) - adj_normalized
    largest_eigval, _ = eigsh(laplacian, 1, which='LM')
    scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(
        adj.shape[0])

    t_k = list()
    t_k.append(sp.eye(adj.shape[0]))
    t_k.append(scaled_laplacian)

    def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
        """Detail: Chebyshev recurrence: T_n = 2 * x * T_{n-1} - T_{n-2}"""
        s_lap = sp.csr_matrix(scaled_lap, copy=True)
        return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two

    for i in range(2, k + 1):
        t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))

    return sparse_to_tuple(t_k)
Пример #23
0
def chebyshev_polynomials(adj, k,st=False):
    """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
    #print("Calculating Chebyshev polynomials up to order {}...".format(k))

    adj_normalized = normalize_adj(adj)
    laplacian = sp.eye(adj.shape[0]) - adj_normalized
    largest_eigval, _ = eigsh(laplacian, 1, which='LM')
    scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])

    t_k = list()
    t_k.append(sp.eye(adj.shape[0]))
    t_k.append(scaled_laplacian)

    def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
        s_lap = sp.csr_matrix(scaled_lap, copy=True)
        return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two

    for i in range(2, k+1):
        t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))

    if st==False:
        return sparse_to_tuple(t_k)
    else:
        return t_k
Пример #24
0
def get_E_Ising_exact(g, h, J, L):
    H = get_H_Ising(g, h, J, L)
    e = arp.eigsh(H,k=1,which='SA',return_eigenvectors=False)
    return(e)
Пример #25
0
def generate_graph_features(glycan, libr=None):
    """compute graph features of glycan\n
    | Arguments:
    | :-
    | glycan (string): glycan in IUPAC-condensed format
    | libr (list): library of monosaccharides; if you have one use it, otherwise a comprehensive lib will be used\n
    | Returns:
    | :-
    | Returns a pandas dataframe with different graph features as columns and glycan as row
    """
    if libr is None:
        libr = lib
    g = glycan_to_nxGraph(glycan, libr=libr)
    #nbr of different node features:
    nbr_node_types = len(set(nx.get_node_attributes(g, "labels")))
    #adjacency matrix:
    A = nx.to_numpy_matrix(g)
    N = A.shape[0]
    diameter = nx.algorithms.distance_measures.diameter(g)
    deg = np.array([np.sum(A[i, :]) for i in range(N)])
    dens = np.sum(deg) / 2
    avgDeg = np.mean(deg)
    varDeg = np.var(deg)
    maxDeg = np.max(deg)
    nbrDeg4 = np.sum(deg > 3)
    branching = np.sum(deg > 2)
    nbrLeaves = np.sum(deg == 1)
    deg_to_leaves = np.array([np.sum(A[:, deg == 1]) for i in range(N)])
    max_deg_leaves = np.max(deg_to_leaves)
    mean_deg_leaves = np.mean(deg_to_leaves)
    deg_assort = nx.degree_assortativity_coefficient(g)
    betweeness_centr = np.array(
        pd.DataFrame(nx.betweenness_centrality(g), index=[0]).iloc[0, :])
    betweeness = np.mean(betweeness_centr)
    betwVar = np.var(betweeness_centr)
    betwMax = np.max(betweeness_centr)
    betwMin = np.min(betweeness_centr)
    eigen = np.array(
        pd.DataFrame(nx.katz_centrality_numpy(g), index=[0]).iloc[0, :])
    eigenMax = np.max(eigen)
    eigenMin = np.min(eigen)
    eigenAvg = np.mean(eigen)
    eigenVar = np.var(eigen)
    close = np.array(
        pd.DataFrame(nx.closeness_centrality(g), index=[0]).iloc[0, :])
    closeMax = np.max(close)
    closeMin = np.min(close)
    closeAvg = np.mean(close)
    closeVar = np.var(close)
    flow = np.array(
        pd.DataFrame(nx.current_flow_betweenness_centrality(g),
                     index=[0]).iloc[0, :])
    flowMax = np.max(flow)
    flowMin = np.min(flow)
    flowAvg = np.mean(flow)
    flowVar = np.var(flow)
    flow_edge = np.array(
        pd.DataFrame(nx.edge_current_flow_betweenness_centrality(g),
                     index=[0]).iloc[0, :])
    flow_edgeMax = np.max(flow_edge)
    flow_edgeMin = np.min(flow_edge)
    flow_edgeAvg = np.mean(flow_edge)
    flow_edgeVar = np.var(flow_edge)
    load = np.array(pd.DataFrame(nx.load_centrality(g), index=[0]).iloc[0, :])
    loadMax = np.max(load)
    loadMin = np.min(load)
    loadAvg = np.mean(load)
    loadVar = np.var(load)
    harm = np.array(
        pd.DataFrame(nx.harmonic_centrality(g), index=[0]).iloc[0, :])
    harmMax = np.max(harm)
    harmMin = np.min(harm)
    harmAvg = np.mean(harm)
    harmVar = np.var(harm)
    secorder = np.array(
        pd.DataFrame(nx.second_order_centrality(g), index=[0]).iloc[0, :])
    secorderMax = np.max(secorder)
    secorderMin = np.min(secorder)
    secorderAvg = np.mean(secorder)
    secorderVar = np.var(secorder)
    x = np.array([len(nx.k_corona(g, k).nodes()) for k in range(N)])
    size_corona = x[x > 0][-1]
    k_corona = np.where(x == x[x > 0][-1])[0][-1]
    x = np.array([len(nx.k_core(g, k).nodes()) for k in range(N)])
    size_core = x[x > 0][-1]
    k_core = np.where(x == x[x > 0][-1])[0][-1]
    M = ((A + np.diag(np.ones(N))).T / (deg + 1)).T
    eigval, vec = eigsh(M, 2, which='LM')
    egap = 1 - eigval[0]
    distr = np.abs(vec[:, -1])
    distr = distr / sum(distr)
    entropyStation = np.sum(distr * np.log(distr))
    features = np.array([
        diameter, branching, nbrLeaves, avgDeg, varDeg, maxDeg, nbrDeg4,
        max_deg_leaves, mean_deg_leaves, deg_assort, betweeness, betwVar,
        betwMax, eigenMax, eigenMin, eigenAvg, eigenVar, closeMax, closeMin,
        closeAvg, closeVar, flowMax, flowAvg, flowVar, flow_edgeMax,
        flow_edgeMin, flow_edgeAvg, flow_edgeVar, loadMax, loadAvg, loadVar,
        harmMax, harmMin, harmAvg, harmVar, secorderMax, secorderMin,
        secorderAvg, secorderVar, size_corona, size_core, nbr_node_types, egap,
        entropyStation, N, dens
    ])
    col_names = [
        'diameter', 'branching', 'nbrLeaves', 'avgDeg', 'varDeg', 'maxDeg',
        'nbrDeg4', 'max_deg_leaves', 'mean_deg_leaves', 'deg_assort',
        'betweeness', 'betwVar', 'betwMax', 'eigenMax', 'eigenMin', 'eigenAvg',
        'eigenVar', 'closeMax', 'closeMin', 'closeAvg', 'closeVar', 'flowMax',
        'flowAvg', 'flowVar', 'flow_edgeMax', 'flow_edgeMin', 'flow_edgeAvg',
        'flow_edgeVar', 'loadMax', 'loadAvg', 'loadVar', 'harmMax', 'harmMin',
        'harmAvg', 'harmVar', 'secorderMax', 'secorderMin', 'secorderAvg',
        'secorderVar', 'size_corona', 'size_core', 'nbr_node_types', 'egap',
        'entropyStation', 'N', 'dens'
    ]
    feat_dic = {col_names[k]: features[k] for k in range(len(features))}
    return pd.DataFrame(feat_dic, index=[glycan])
Пример #26
0
def get_E_XXZ_exact(g,J,L):
    H = get_H_XXZ(g, J, L)
    e = arp.eigsh(H,k=1,which='SA',return_eigenvectors=False)
    return(e)
Пример #27
0
def rmsd(n: int, coord1: np.ndarray,
         coord2: np.ndarray) -> Tuple[float, np.ndarray]:
    """Calculate the least square rmsd in Angstrom for two
    coordinate sets coord1(n,3) and coord2(n,3) using a method based on
    quaternions."""

    from kallisto.units import Bohr

    # copy original coordinates
    x = np.zeros(shape=(n, 3), dtype=np.float64)
    y = np.zeros(shape=(n, 3), dtype=np.float64)

    x[0:n, :] = coord1[0:n, :]
    y[0:n, :] = coord2[0:n, :]

    # calculate the barycenters, centroidal coordinates, and the norms
    x_norm = 0.0
    y_norm = 0.0
    x_center = np.zeros(shape=(3, ), dtype=np.float64)
    y_center = np.zeros(shape=(3, ), dtype=np.float64)
    xi = np.zeros(shape=(n, ), dtype=np.float64)
    yi = np.zeros(shape=(n, ), dtype=np.float64)

    for i in range(3):
        for j in range(n):
            xi[j] = x[j, i] * Bohr
            yi[j] = y[j, i] * Bohr
        x_center[i] = np.sum(xi) / float(n)
        y_center[i] = np.sum(yi) / float(n)
        xi[:] = xi[:] - x_center[i]
        yi[:] = yi[:] - y_center[i]
        x[:, i] = xi[:]
        y[:, i] = yi[:]
        x_norm += np.dot(xi, xi)
        y_norm += np.dot(yi, yi)

    # calculate the R matrix
    rmat = np.zeros(shape=(3, 3), dtype=np.float64)
    for i in range(3):
        for j in range(3):
            rmat[i, j] = np.dot(x[:, i], y[:, j])

    # calculate the S matrix (quaternions)
    # use fac = -1 instead of u.T below
    fac = -1
    smat = np.zeros(shape=(4, 4), dtype=np.float64)
    smat[0, 0] = rmat[0, 0] + rmat[1, 1] + rmat[2, 2]
    smat[1, 0] = fac * (rmat[1, 2] - rmat[2, 1])
    smat[2, 0] = fac * (rmat[2, 0] - rmat[0, 2])
    smat[3, 0] = fac * (rmat[0, 1] - rmat[1, 0])

    smat[0, 1] = smat[1, 0]
    smat[1, 1] = rmat[0, 0] - rmat[1, 1] - rmat[2, 2]
    smat[2, 1] = rmat[0, 1] + rmat[1, 0]
    smat[3, 1] = rmat[0, 2] + rmat[2, 0]

    smat[0, 2] = smat[2, 0]
    smat[1, 2] = smat[2, 1]
    smat[2, 2] = -rmat[0, 0] + rmat[1, 1] - rmat[2, 2]
    smat[3, 2] = rmat[1, 2] + rmat[2, 1]

    smat[0, 3] = smat[3, 0]
    smat[1, 3] = smat[3, 1]
    smat[2, 3] = smat[3, 2]
    smat[3, 3] = -rmat[0, 0] - rmat[1, 1] + rmat[2, 2]

    # calculate largest eigenvalue and eigenvector
    eigenval, eigenvec = eigsh(smat, 1, which="LA")
    eigenvec = eigenvec.squeeze()

    # convert quaternion eigenvec to rotation matrix U
    u = rotationMatrix(eigenvec)

    # root mean squared deviation
    error = np.sqrt(
        np.maximum(
            0.0,
            ((x_norm + y_norm) - 2 * eigenval) / float(n),
        ))

    return error, u
Пример #28
0
            saver.restore(sess, args.resume)
            print("Model restored.")

        # writer
        writer = tf.summary.FileWriter(make_dir(args.logdir), sess.graph)

        # training and eval
        for epoch in range(args.maxiter):
            data_tuple = (train_set.X, train_set.Y)
            noise = np.zeros(model.n_weights)
            if epoch % args.frequence == 0:
                # eval matrix
                covariance, _ = eval_Cov(sess, one_loader(), model)
                l2norm_cov = np.linalg.norm(covariance, ord='fro')
                trnorm_cov = np.sqrt(covariance.trace())
                ecov, vcov = eigsh(covariance, args.lead, which='LM')
                summary = sess.run(matrix_summary_merged,
                                   feed_dict={
                                       _l2norm_cov: l2norm_cov,
                                       _trnorm_cov: trnorm_cov,
                                       _ecov: ecov
                                   })
                writer.add_summary(summary, epoch)
                noise = np.matmul(vcov,
                                  np.random.normal(0, np.sqrt(np.abs(ecov))))

            # training step
            loss, acc = train(sess, [data_tuple], model, args.lr,
                              args.keep_prob, noise)
            summary = sess.run(train_summary_merged,
                               feed_dict={
Пример #29
0
def lmax(L, normalized=True):
    """Upper-bound on the spectrum."""
    if normalized:
        return 2
    else:
        return eigsh(L, k=1, which='LM', return_eigenvectors=False)[0]
Пример #30
0
        writer = tf.summary.FileWriter(make_dir(args.logdir), sess.graph)

        # training and eval
        for epoch in range(args.maxiter):
            data_tuple = (train_set.X, train_set.Y)
            # update matrix every ** iters
            if epoch % args.frequence == 0:
                # eval matrix
                hessian = eval_Hess(sess, [data_tuple], model)
                covariance, grad_mean = eval_Cov(sess, one_loader(), model)
                l2norm_hess = np.linalg.norm(hessian, ord='fro')
                l2norm_cov = np.linalg.norm(covariance, ord='fro')
                trnorm_hess = np.sqrt(hessian.trace())
                trnorm_cov = np.sqrt(covariance.trace())
                hessian/=l2norm_hess; covariance/=l2norm_cov
                ehess, vhess = eigsh(hessian, args.lead, which='LM')
                ecov, vcov = eigsh(covariance, args.lead, which='LM')
                summary = sess.run(matrix_summary_merged, feed_dict={
                    _grad: grad_mean,
                    _l2norm_hess: l2norm_hess,
                    _l2norm_cov: l2norm_cov,
                    _trnorm_hess: trnorm_hess,
                    _trnorm_cov: trnorm_cov,
                    _ehess: ehess,
                    _ecov: ecov,
                    _vhess: vhess,
                    _vcov: vcov})
                writer.add_summary(summary, epoch)
                ehessBar = ehess / np.sum(ehess) * np.square(trnorm_cov)
            noise = np.matmul(vhess, np.random.normal(0, np.sqrt(np.abs(ehessBar))))
Пример #31
0
 def eigs_np(indices, values, dense_shape):
     A_sp = sp.coo_matrix((values, indices.T), shape=dense_shape)
     return eigsh(A_sp, n, which="LM")[0]
Пример #32
0
def maxeig_normalize(mx):
    """Normalize sparse matrix"""
    maxeig = eigsh(mx, 1, which='LA')[0][0]
    mx /= maxeig
    return mx
Пример #33
0
    def buildbasis(self):
        self.A = np.load(
            '/home/hyf/MobileTrafficPrediction/src/models/kegra/wight_matrix_lb_weekly_4070_2.npy'
        )
        #self.A = np.load('/home/hyf/MobileTrafficPrediction/src/models/kegra/wight_matrix_lb_weekly_4070.npy')
        if self.filter == 'localpool':
            print('using local...')
            #self.A = normalize_adj(self.A)
            adj = sp.coo_matrix(self.A)
            print('building diags..')
            d = sp.diags(np.power(np.array(adj.sum(1)), -0.5).flatten(), 0)
            print('normalizing..')
            a_norm = adj.dot(d).transpose().dot(d)
            print('csr to array..')
            self.A = a_norm.toarray()
            #discard far neighbor(those with larger weight)
            #self.A[np.where(self.A < 0.9)] = 0
            #self.A = np.load('wight_matrix_lb_weekly_4070.npy')
            self.A = tf.convert_to_tensor(self.A, dtype='float32')
            self.basis.append(self.A)
        elif self.filter == 'chebyshev':
            print('using chebyshev...')
            #L = normalized_laplacian(self.A)
            adj = sp.coo_matrix(self.A)
            d = sp.diags(np.power(np.array(adj.sum(1)), -0.5).flatten(), 0)
            a_norm = adj.dot(d).transpose().dot(d)
            laplacian = sp.eye(adj.shape[0]) - a_norm
            #L_scaled = rescale_laplacian(L)
            try:
                print(
                    'calculating largest eigenvalue of normalized graph laplacian...'
                )
                largest_eigval = eigsh(laplacian,
                                       1,
                                       which='LM',
                                       return_eigenvectors=False)[0]
            except ArpackNoConvergence:
                print(
                    'Eigenvalue calculation did not converge! Using largest_eigval = 2 instead.'
                )
                largest_eigval = 2
            X = (2. / largest_eigval) * laplacian - sp.eye(
                laplacian.shape[0])  #L_SCALED
            #T_k = chebyshev_polynomial(L_scaled, MAX_DEGREE)
            print('calculating chebyshev polynomials up to order {}...'.format(
                MAX_DEGREE))
            T_k = list()
            T_k.append(sp.eye(X.shape[0]).tocsr())
            T_k.append(X)

            def chebyshev_recurrence(T_k_1, T_k_2, X):
                X_ = sp.csr_matrix(X, copy=True)
                return 2 * X_.dot(T_k_1) - T_k_2

            for i in range(2, MAX_DEGREE + 1):
                T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X))

            self.support = MAX_DEGREE + 1
            self.basis = [
                tf.convert_to_tensor(i.toarray(), dtype='float32') for i in T_k
            ]
Пример #34
0
def bregman_func(A_in,
                 En_in,
                 method=5,
                 mu=0.001,
                 lbd=3.,
                 maxIter=100,
                 tol=1E-4):
    # Open the A matrix file
    if isinstance(A_in, str):
        A = scipy.io.mmread(A_in).astype(np.double)
    else:
        A = A_in
    Nstruc, Ncorr = A.shape
    if isinstance(En_in, str):
        En = np.loadtxt(En_in).astype(np.double)
    else:
        En = En_in
    # print(A, En, En.shape)
    assert Nstruc == En.shape[0]
    ecis = np.zeros(Ncorr)

    if method <= 2:
        # FPC requires that max eigenvector of A.A^T <=1
        rescale_factor = eigsh(A.dot(A.T),
                               3,
                               which='LM',
                               return_eigenvectors=False)[-1]
        rescale_factor = np.sqrt(rescale_factor)
        A = A / rescale_factor
        En = En / rescale_factor
    tau = min(1.999, -1.665 * float(Nstruc) / float(Ncorr) + 2.665)
    # print("FPC step size tau =", tau)

    # print("method=", method)
    # print("mu=", mu)
    # print("lambda=", lbd)

    # Values in Fortan are passed by reference, so we need to convert the types
    # and then pass by reference
    if (method in (1, 3, 5)) and scipy.sparse.issparse(A):
        A = A.todense()

    # Now scale the mu parameter by Nstruc for convenience
    mu2 = mu * Nstruc

    if method == 1:
        ecis = bregman.bregmanfpc(maxIter, tol, tau, mu, A, En)
    elif method == 2:
        ecis = bregman.sparsebregmanfpc(maxIter, tol, tau, mu, A, En)
    elif method == 3:
        ecis = bregman.splitbregman(maxIter, tol, tau, mu, A, En)
    elif method == 4:
        ecis = bregman.sparsesplitbregman(maxIter, tol, tau, 1 / mu, A, En)
    elif method == 5:
        # right preconditioning
        ecis = bregman.bregmanrprecond(maxIter, A, En, 1 / mu, lbd, tol)
    elif method == 6:
        # sparse right preconditioning
        print(" WARNING: DO NOT USE; NOT IMPLEMENTED YET")
        ecis = bregman.sparsebregmanrprecond(A, En, 1 / mu, lbd)
    else:
        raise ValueError("Unknown bregman method %d", method)

    return ecis
Пример #35
0
def ED_2D(n,m,h):
	J = 1 
	
	row = np.array(range(2**(n*m)))

	l = [bin(x)[2:].rjust(n*m, '0') for x in range(2**(n*m))]
	b = np.array([np.array(map(int, i)) for i in l])
	d = np.array([np.array(map(int, i)) for i in l])
	onlyone1 = []
	for i in range((n*m)):
		onlyone1.append(b[2**i])
	onlyone1 = np.asarray(onlyone1)

	###########################################################
	'''Sort Tags'''
	T = []
	for i in range(2**(n*m)):
		 T.append(calculateTag(b[i]))

	Tsorted = np.asarray(qsort(T))

	#for i in range(len(T)):
	#	print T[i], Tsorted[i]

	Tindex = sorted(range(len(T)), key=lambda k: T[k])
	#print Tindex
	#print Tindex[4]
	#print sorted(range(len(Tsorted)), key=lambda k: Tsorted[k])

	###########################################################
	data = []
	data1 = []
	data2 = []
	data3 = []
	rowcol = []
	abc1 = np.zeros(((n*m)-1), dtype=np.double)
	abc2 = np.zeros(((n*m)-1), dtype=np.double)
	v = []
	f = []
	off_row = []
	off_col = []
	off_data = []
	
	for i in range(2**(n*m)):
		'''Diagonal'''

		'''horizontal bonds'''
		v1 = 0
		v2 = 0
		for j in range(n):
			for k in range(m-1):
				if b[i,k+j*m] == b[i,k+1+j*m]:
					v1 +=1
				else:
					v1 -=1
		data1.append(-J*v1)

		'''vertical bonds'''
		for j in range((n-1)*m):
			if b[i,j] == b[i,j+m]:
				v2 += 1
			else:
				v2 -= 1
		data2.append(-J*v2)
		
		'''Off Diagonal'''		
		for j in range(n*m):
				off_col.append(Tindex[binary_search(Tsorted,calculateTag(np.bitwise_xor(d[i],onlyone1[j])))])
				
				#print d[i], onlyone1[j], np.bitwise_xor(d[i],onlyone1[j]), 'index:', Tindex[binary_search(Tsorted,calculateTag(np.bitwise_xor(d[i],onlyone1[j])))]
				
				off_row.append(i)
				off_data.append(-h)
	
	for i in range(len(data1)):
		data3.append(data1[i] + data2[i])
	for i in range(len(data3)):
		if data3[i] != 0:
			data.append(data3[i])
			rowcol.append(i)
	
	Diagonal = sparse.csr_matrix((data,(rowcol,rowcol)), dtype=np.double).toarray()
	Off_Diagonal = sparse.csr_matrix((off_data, (off_row,off_col)), dtype=np.double).toarray()
	
	##########################################################
	'''Diagonalize Full Hamiltonian'''
	#print Off_Diagonal
	Ham = Diagonal + Off_Diagonal
	
	#print Ham
	
	vals, vecs = arp.eigsh(Ham, k=1, which='SA')
	return vals[0],vecs
Пример #36
0
    D[i][i] = g.vs.degree()[i]

D_I = np.zeros((g.vcount(), g.vcount()))
for i in range(0, g.vcount()):
    if g.vs.degree()[i] == 1:
        D_I[i][i] = 0
    else:
        D_I[i][i] = 1.0 / (g.vs.degree()[i] - 1)
D_ = np.zeros((g.vcount(), g.vcount()))
for i in range(0, g.vcount()):
    if g.vs.degree()[i] == 0:
        D_[i][i] = 0
    else:
        D_[i][i] = 1.0 / (g.vs.degree()[i])
I = np.identity(g.vcount())
Z = np.zeros((g.vcount(), g.vcount()))

M = np.bmat([[0.5 * np.dot(np.dot(A, D_I), np.subtract(I, D_)), 0.5 * I],
             [0.5 * I, 0.5 * np.dot(np.dot(A, D_I), np.subtract(I, D_))]])

#eigva, eigvec = tf.self_adjoint_eig(M)
#eigva, eigvec = np.linalg.eig(M)
#eigva, eigvec = eigh(M, eigvals=(size(M,1)-100,size(M,1)-1))
with tf.Session() as sess:
    #The eigsh function with 'LA' return largest eigs but in ascenting order!!!
    eigva_result, eigvec_result = eigsh(M, 100, which='LA')

    #eigva_result, eigvec_result = sess.run([eigva,eigvec])
    print eigva_result
    np.savetxt(outfile, eigvec_result[:g.vcount(), 0:100][:, ::-1])