Ejemplo n.º 1
0
def EigenValueGap(t):
    """Returns the eigenvalue gap in the second-order transition matrix of a 
    temporal network, as well as in the corresponding null model. Returns 
    the tuple (lambda(T2), lambda(T2_null))
    
    @param t: The temporalnetwork instance to work on
    """
    
    #NOTE to myself: most of the time goes for construction of the 2nd order
    #NOTE            null graph, then for the 2nd order null transition matrix
    
    g2 = t.igraphSecondOrder().components(mode="STRONG").giant()
    g2n = t.igraphSecondOrderNull().components(mode="STRONG").giant()
    
    Log.add('Calculating eigenvalue gap ... ', Severity.INFO)

    # Build transition matrices
    T2 = Utilities.RWTransitionMatrix(g2)
    T2n = Utilities.RWTransitionMatrix(g2n)
    
    # Compute eigenvector sequences
    # NOTE: ncv=13 sets additional auxiliary eigenvectors that are computed
    # NOTE: in order to be more confident to find the one with the largest
    # NOTE: magnitude, see
    # NOTE: https://github.com/scipy/scipy/issues/4987
    w2 = sla.eigs(T2, which="LM", k=2, ncv=13, return_eigenvectors=False)
    evals2_sorted = np.sort(-np.absolute(w2))

    w2n = sla.eigs(T2n, which="LM", k=2, ncv=13, return_eigenvectors=False)
    evals2n_sorted = np.sort(-np.absolute(w2n))

    Log.add('finished.', Severity.INFO)
    
    return (np.abs(evals2_sorted[1]), np.abs(evals2n_sorted[1]))
Ejemplo n.º 2
0
def speigen_range(matrix, retry=True, coerce=True):
    """
    Construct the eigenrange of a potentially sparse matrix.
    """
    if spar.issparse(matrix):
        try:
            emax = spla.eigs(matrix, k=1, which='LR')[0]
        except (spla.ArpackNoConvergence, spla.ArpackError) as e:
            rowsums = np.unique(np.asarray(matrix.sum(axis=1)).flatten())
            if np.allclose(rowsums, np.ones_like(rowsums)):
                emax = np.array([1])
            else:
                Warn('Maximal eigenvalue computation failed to converge'
                     ' and matrix is not row-standardized.')
                raise e
        emin = spla.eigs(matrix, k=1, which='SR')[0]
        if coerce:
            emax = emax.real.astype(float)
            emin = emin.real.astype(float)
    else:
        try:
            eigs = nla.eigvals(matrix)
            emin, emax = eigs.min().astype(float), eigs.max().astype(float)
        except Exception as e:
            Warn('Dense eigenvector computation failed!')
            if retry:
                Warn('Retrying with sparse matrix...')
                spmatrix = spar.csc_matrix(matrix)
                speigen_range(spmatrix)
            else:
                Warn('Bailing...')
                raise e
    return emin, emax
Ejemplo n.º 3
0
def SlowDownFactor(t):    
    """Returns a factor S that indicates how much slower (S>1) or faster (S<1)
    a diffusion process in the temporal network evolves on a second-order model 
    compared to a first-order model. This value captures the effect of order
    correlations on a diffusion process in the temporal network.
    
    @param t: The temporalnetwork instance to work on
    """
    
    #NOTE to myself: most of the time goes for construction of the 2nd order
    #NOTE            null graph, then for the 2nd order null transition matrix
    
    g2 = t.igraphSecondOrder().components(mode="STRONG").giant()
    g2n = t.igraphSecondOrderNull().components(mode="STRONG").giant()
    
    Log.add('Calculating slow down factor ... ', Severity.INFO)

    # Build transition matrices
    T2 = Utilities.RWTransitionMatrix(g2)
    T2n = Utilities.RWTransitionMatrix(g2n)
    
    # Compute eigenvector sequences
    # NOTE: ncv=13 sets additional auxiliary eigenvectors that are computed
    # NOTE: in order to be more confident to find the one with the largest
    # NOTE: magnitude, see
    # NOTE: https://github.com/scipy/scipy/issues/4987
    w2 = sla.eigs(T2, which="LM", k=2, ncv=13, return_eigenvectors=False)
    evals2_sorted = np.sort(-np.absolute(w2))

    w2n = sla.eigs(T2n, which="LM", k=2, ncv=13, return_eigenvectors=False)
    evals2n_sorted = np.sort(-np.absolute(w2n))

    Log.add('finished.', Severity.INFO)
    
    return np.log(np.abs(evals2n_sorted[1]))/np.log(np.abs(evals2_sorted[1]))
 def test_initialize_reservoir(self):
     w1 = EchoStateNetwork.initialize_reservoir(10, 0.1, self.rs1, 1.0, 1.0)
     self.assertAlmostEqual(1.0, max(abs(eigs(w1)[0])))
     w2 = EchoStateNetwork.initialize_reservoir(10, 0.1, self.rs1, 1.0, 5.0)
     self.assertAlmostEqual(5.0, max(abs(eigs(w2)[0])))
     w3 = EchoStateNetwork.initialize_reservoir(10, 0.1, self.rs1, 1.0, 0.1)
     self.assertAlmostEqual(0.1, max(abs(eigs(w3)[0])))
Ejemplo n.º 5
0
 def get_eigenvalues_v_cycle(self, nu0=0, nu1=1, all_eigvals=False, k_max=5, k_min=5):
     T, P_inv = self.get_v_cycle_it_matrix(nu0, nu1)
     if all_eigvals:
         return sp.linalg.eigvals(T.todense())
     else:
         eigval_list = sprsla.eigs(T, k=k_max, which='LM', return_eigenvectors=False)
         eigval_list.append(sprsla.eigs(T, k=k_min, which='SM', return_eigenvectors=False))
         return eigval_list
Ejemplo n.º 6
0
def hits(A, num=1):
    # https://cs7083.wordpress.com/2013/01/31/demystifying-the-pagerank-and-hits-algorithms/
    m, n = A.shape
    Hu = np.dot(A.T, A)
    Au = np.dot(A, A.T)
    w, a = eigs(Au, k=1, which='LM')
    w, h = eigs(Hu, k=1, which='LM')
    i, j = k_max_indices(a, num), k_max_indices(h, num)
    return i, j
Ejemplo n.º 7
0
def dynmodes(Nsq, depth, nmodes):
    """Calculate the 1st nmodes ocean dynamic vertical modes
    given a profile of Brunt-Vaisala (buoyancy) frequencies squared.

    Based on
    http://woodshole.er.usgs.gov/operations/sea-mat/klinck-html/dynmodes.html
    by John Klinck, 1999.

    :arg Nsq: Brunt-Vaisala (buoyancy) frequencies squared in [1/s^2]
    :type Nsq: :class:`numpy.ndarray`

    :arg depth: Depths in [m]
    :type depth: :class:`numpy.ndarray`

    :arg nmodes: Number of modes to calculate
    :type nmodes: int

    :returns: :obj:`(wmodes, pmodes, rmodes), ce, (dz_w, dz_p)` (vertical velocity modes,
              horizontal velocity modes, vertical density modes), modal speeds,
              (box size of vertical velocity grid, box size of pressure grid)
    :rtype: tuple of :class:`numpy.ndarray`
    """
    if np.all(depth >= 0.):
        z = -depth
    else:
        z = depth

    nmodes = min((nmodes, len(z)-2))
    # 2nd derivative matrix plus boundary conditions
    d2dz2_w, dz_w = build_d2dz2_matrix_w(z)
    # N-squared diagonal matrix
    Nsq_mat = np.diag(Nsq)
    # Solve generalized eigenvalue problem for eigenvalues and vertical
    # velocity modes
    eigenvalues_w, wmodes = la.eigs(d2dz2_w, k=nmodes, M=Nsq_mat, which='SM')
    eigenvalues_w, wmodes = clean_up_modes(eigenvalues_w, wmodes, nmodes)
    # Horizontal velocity modes
    d2dz2_p, dz_p = build_d2dz2_matrix_p(z, Nsq)
    eigenvalues_p, pmodes = la.eigs(d2dz2_p, k=nmodes, which='SM')
    eigenvalues_p, pmodes = clean_up_modes(eigenvalues_p, pmodes, nmodes)
    nmodes = min(pmodes.shape[1], wmodes.shape[1])
    eigenvalues_p, eigenvalues_w, pmodes, wmodes = (
        eigenvalues_p[:nmodes], eigenvalues_w[:nmodes], pmodes[:, :nmodes], wmodes[:, :nmodes])
    
    # Vertical density modes
    rmodes = wmodes * Nsq[:, np.newaxis]
    # Modal speeds
    ce = 1 / np.sqrt(eigenvalues_p)
    print "Mode speeds do correspond: %s" % np.allclose(ce * np.sqrt(eigenvalues_w), 1.)
    # unify sign, that pressure modes are alwas positive at the surface
    modes = unify_sign(wmodes, pmodes, rmodes)
    return modes, ce, (dz_w, dz_p)
Ejemplo n.º 8
0
    def fit(self, t1, t2):
        assert self.Ais != None, "!!!! First, distribute rows of A using disRand or disBAM"
        d = self.d # number of distributed matrice
        m = np.shape(self.A)[1] # dimension of row space
        Bis = [None] * d # outputs of local PCA
        Atis = [None] * d # rank t1 approximation of each Ai

        # local PCA
        for i in range(d):
            # U, S, Vh = svd(Ais[i])
            # Bis[i] = np.diag(S[:t1]).dot(Vh[:t1,:])
            # Atis[i] = U[:,:t1].dot(Bis[i])
            ni = self.Ais[i].shape[0]
            if t1 < ni: # Target rank t1 is less than Number of Rows
                U, S, Vt = svds(self.Ais[i], k=t1)
                Bis[i] = np.diag(S).dot(Vt)
                Atis[i] = U.dot(Bis[i])
            else: # Number of Rows is less than t1
                U, S, Vt = svd(self.Ais[i])
                Bis[i] = np.diag(S[:ni]).dot(Vt[:ni,:])
                Atis[i] = self.Ais[i]
    
        # global PCA
        K = np.zeros((m,m))
        for i in range(d):
            K += Bis[i].T.dot(Bis[i])
        
        # L,Q = eig(K)
        # C = Q[:,:t2]
        # C = C.real
        L,Q = eigs(K, k=t2)
        self.C = Q.real
        self.Bis = Bis
        self.Atis = Atis
Ejemplo n.º 9
0
def rv_pca(data, n_datasets):
    """
    Get weights for tables by calculating their similarity.

    :return:
    """

    print("Rv-PCA")
    C = np.zeros([n_datasets, n_datasets])

    print("Rv-PCA: Hilbert-Schmidt inner products... ", end='')
    for i in range(n_datasets):
        for j in range(n_datasets):
            C[i, j] = np.sum(data[i].affinity_ * data[j].affinity_)

    print('Done!')

    print("Rv-PCA: Decomposing the inner product matrix... ", end ='')
    e, u = eigs(C, k=8)
    print("Done!")
    weights = (np.real(u[:,0]) / np.sum(np.real(u[:,0]))).flatten()

    print("Rv-PCA: Done!")

    return weights, np.real(e), np.real(u)
Ejemplo n.º 10
0
def calc_pca(feature):
    # Filter out super high numbers due to some instability in the network
    feature[feature>5] = 5
    feature[feature<-5] = -5
    #### Missing an image guided filter with the image as input
    ##
    ##########
    # change to double precision
    feature = np.float64(feature)
    # retrieve size of feature array
    shape = feature.shape
    [h, w, d] = feature.shape
    # resize to a two-dimensional array
    feature = np.reshape(feature, (h*w,d))
    # calculate average of each column
    featmean = np.average(feature,0)
    onearray = np.ones((h*w,1))
    featmeanarray = np.multiply(np.ones((h*w,1)),featmean)
    feature = np.subtract(feature,featmeanarray)
    feature_transpose = np.transpose(feature)
    cover = np.dot(feature_transpose, feature)
    # get largest eigenvectors of the array
    val,vecs = eigs(cover, k=3, which='LI')
    pcafeature = np.dot(feature, vecs)
    pcafeature = np.reshape(pcafeature,(h,w,3))
    pcafeature = np.float64(pcafeature)
    return pcafeature
Ejemplo n.º 11
0
def GetEigs(T, k, P, take_diagonal=0):
	""" return k largest magnitude eigenvalues for the matrix T.
	:param T: Matrix to find eigen values/vectors of
	:param k: number of eigen values/vectors to return
	:param P: in the case of symmetric normalizations, 
	this is the NxN diagonal matrix which relates the nonsymmetric 
	version to the symmetric form via conjugation
	:param take_diagonal: if 1, returns the eigenvalues as a vector rather than as a diagonal matrix.
	"""
	D, V = eigs(T, k, tol=1e-4, maxiter=1000)
	D = np.real(D)
	V = np.real(V)
	inds = np.argsort(D)[::-1]
	D = D[inds]
	V = V[:, inds]
	if P is not None:
	    V = P.dot(V)

	# Normalize
	for i in range(V.shape[1]):
	    V[:, i] = V[:, i] / norm(V[:, i])
	V = np.round(V, 10)

	if take_diagonal == 0:
		D = np.diag(D)
		
	return V, D
Ejemplo n.º 12
0
def principal_eigenvector(a):
    """Get eigenvector of square matrix `a`.

    Parameters
    ----------
    a : numpy.ndarray, shape = [n, n]
        Given matrix.

    Returns
    -------
    numpy.ndarray, shape = [n, ]
        Eigenvector of matrix `a`.

    """
    # Note that we prefer to use `eigs` even for dense matrix
    # because we need only one eigenvector. See #441, #438 for discussion.

    # But it doesn't work for dim A < 3, so we just handle this special case
    if len(a) < 3:
        vals, vecs = eig(a)
        ind = numpy.abs(vals).argmax()
        return vecs[:, ind]
    else:
        vals, vecs = eigs(a, k=1)
        return vecs[:, 0]
Ejemplo n.º 13
0
def est_CompGraph_norm(K, tol=1e-3, try_fast_norm=True):
    """Estimates operator norm for L = ||K||.

    Parameters
    ----------
    tol : float
        Accuracy of estimate if not trying for upper bound.
    try_fast_norm : bool
        Whether to try for a fast upper bound.

    Returns
    -------
    float
        Estimate of ||K||.
    """
    if try_fast_norm:
        output_mags = [NotImplemented]
        K.norm_bound(output_mags)
        if NotImplemented not in output_mags:
            return output_mags[0]

    input_data = np.zeros(K.input_size)
    output_data = np.zeros(K.output_size)

    def KtK(x):
        K.forward(x, output_data)
        K.adjoint(output_data, input_data)
        return input_data

    # Define linear operator
    A = LinearOperator((K.input_size, K.input_size),
                       KtK, KtK)

    Knorm = np.sqrt(eigs(A, k=1, M=None, sigma=None, which='LM', tol=tol)[0].real)
    return np.float(Knorm)
 def go(self,K=100, Y=0.6, DI=500, FREQ=18):
     print self._sourceDomain
     print self._targetDomain
     domainIndependentFeatures, domainDependentFeatures = self._getFeatures(DI,FREQ)
     numDomainIndep = len(domainIndependentFeatures)
     numDomainDep = len(domainDependentFeatures)
     print "number of independent " + str(numDomainIndep) + " number of dependent " + str(numDomainDep)
     print "creating cooccurrenceMatrix..."
     a = self._createCooccurrenceMatrix(domainIndependentFeatures, domainDependentFeatures)
     print "creating SquareAffinityMatrix..."
     a = self._createSquareAffinityMatrix(a)
     print "creating DiagonalMatrix..."
     b = self._createDiagonalMatrix(a)
     print "multiplying..." 
     c = b.dot(a)
     del a
     c = c.dot(b)
     del b
     print "calculating eigenvalues and eigenvectors"
     eigenValues,eigenVectors = eigs(c, k=K, which="LR")
     del c
     print "building document vectors..."
     documentVectorsTraining,classifications = self._createDocumentVectors(domainDependentFeatures, domainIndependentFeatures,self._sourceDomain)
     documentVectorsTesting,classificatons = self._createDocumentVectors(domainDependentFeatures, domainIndependentFeatures,self._targetDomain)
     print "training and testing..."
     self._lsvc = LinearSVC(C=10000)
     U  = [eigenVectors[:,x].reshape(np.size(eigenVectors,0),1) for x in eigenValues.argsort()]
     U = np.concatenate(U,axis=1)[:numDomainDep]
     clustering = [vector[1].dot(U).dot(Y).astype(np.float64) for vector in documentVectorsTraining]
     trainingVectors = [np.concatenate((documentVectorsTraining[x][0],documentVectorsTraining[x][1],clustering[x])) for x in range(np.size(documentVectorsTraining,axis=0))]
     self._trainClassifier(trainingVectors,classifications)
     clustering = [vector[1].dot(U).dot(Y).astype(np.float64) for vector in documentVectorsTesting]
     testVectors = [np.concatenate((documentVectorsTesting[x][0],documentVectorsTesting[x][1],clustering[x])) for x in range(np.size(documentVectorsTesting,axis=0))]
     print "accuracy: %f with K=%i AND DI=%i AND Y=%f" % (self._testClassifier(testVectors,classifications),K,DI,Y)
Ejemplo n.º 15
0
    def predict(self):

        m, n = self.A.shape # m observations

        convgraph = np.zeros(self.maxiter / 25)
        prevdist = 0.
        converged = False

        eps = 1e-6

        dd = np.array(self.A.sum(1))[:,0]
        D = diags(dd,0, format="csr")

        m, n = self.A.shape


        # random initialization, will initialize with K-means if told to
        H = csr_matrix(np.random.rand(m, self.k))

        EPS = csr_matrix(np.ones(H.shape) * eps)

        if self._embedding:
            # Apply eigenspace embedding K-means for initialization (Ng Weiss Jordan)

            Dz = diags(1 / (np.sqrt(dd) + eps), 0, format="csr")
            DAD = Dz.dot(self.A).dot(Dz)

            V = eigs(DAD, self.k)[1].real
            km_data = V / (np.linalg.norm(V, 2, axis=1).T * np.ones((self.k,1))).T

            km_predict = KMeans(n_clusters=self.k).fit_predict(km_data)

            indices = km_predict
            indptr = range(len(indices)+1)
            data = np.ones(len(indices))
            H = csr_matrix((data, indices, indptr))

        # Run separately for sparse and dense versions

        for i in range(self.maxiter):

            AH = self.A.dot(H)
            alpha = H.T.dot(AH)

            M1 = AH + EPS
            M2 = D.dot(H).dot(alpha) + EPS

            np.reciprocal(M2.data, out=M2.data)
            d1 = M1.multiply(M2).sqrt()

            H = H.multiply(d1)

            if i % 25 == 0:
                dist = sptrace(alpha)
                convgraph[i/25] = dist

                diff = dist / prevdist - 1
                prevdist = dist

        return NMFResult((H.toarray(),), convgraph, pdist)
Ejemplo n.º 16
0
def computeAbsoluteLimitingLinearCoefficient(n,multiplyO,multiplyN,multiplyL,multiplyR): # {{{
    if True: # n <= 3:
        matrix = []
        for i in range(n):
            matrix.append(multiplyO(array([0]*i+[1]+[0]*(n-1-i))))
        matrix = array(matrix)
        evals = eigvals(matrix)
        lam = evals[argmax(abs(evals))]
        tmatrix = matrix-lam*identity(n)
        ovecs = svd(dot(tmatrix,tmatrix))[-1][-2:]
        assert ovecs.shape == (2,n)
    else:
        ovals, ovecs = eigs(LinearOperator((n,n),matvec=multiplyO),k=2,which='LM',ncv=9)
        ovecs = ovecs.transpose()

    Omatrix = zeros((2,2),dtype=complex128)
    for i in range(2):
        for j in range(2):
            Omatrix[i,j] = dot(ovecs[i].conj(),multiplyO(ovecs[j]))
    numerator = sqrt(trace(dot(Omatrix.transpose().conj(),Omatrix))-2)

    lnvecs = multiplyL(ovecs)
    rnvecs = multiplyR(ovecs)
    Nmatrix = zeros((2,2),dtype=complex128)
    for i in range(2):
        for j in range(2):
            Nmatrix[i,j] = dot(lnvecs[i].conj(),multiplyN(rnvecs[j]))
    denominator = sqrt(trace(dot(Nmatrix.transpose().conj(),Nmatrix)))
    return numerator/denominator
Ejemplo n.º 17
0
 def fit(self, X, Y):
     self.N = min(self.N, X.shape[1]-2)
     y_max = int(np.max(Y) + 1)
     self.W = np.zeros((X.shape[1], self.N*y_max*(y_max-1)), dtype=X.dtype)
     off = 0
     for i in range(y_max):
         Xi = X[Y == i]
         covi = np.dot(Xi.T, Xi)
         covi /= np.float32(Xi.shape[0])
         for j in range(y_max):
             if j == i:
                 continue
             if self.verbose:
                 print("Finding eigenvectors for pair ({}/{})".format(i,j))
             Xj = X[Y == j]
             covj = np.dot(Xj.T, Xj) / np.float32(Xj.shape[0])
             E = np.linalg.pinv(np.linalg.cholesky(covj + np.eye(covj.shape[0]) * self.precond).T)
             C = np.dot(np.dot(E.T, covi), E)
             C2 = 0.5 * (C + C.T)
             S,U = eigs(C2, self.N)
             gev = np.dot(E, U[:, :self.N])
             self.W[:, off:off+self.N] = np.real(gev)
             off += self.N
     if self.verbose:
         print("DONE")
     return self
def inspect_coarse_graining(num_intervals, num_eigenvectors, g):
    """ Prints some quantitative comparisons between the 
        coarse grained and non - coarse grained graphs.
    """
    
    A = nx.adjacency_matrix(g)
    W_tilde = coarse_grain_W(num_intervals, num_eigenvectors,g)
    A = A / np.sum(A, 0)
    A = np.nan_to_num(A)
    print 'Dimension [After, Before]: ' + str([W_tilde.shape[0], A.shape[0]])
    l = eig(A, right = False)
    l_tilde = eigs(W_tilde, k = W_tilde.shape[0] - 2, which = 'LR', return_eigenvectors=False)
    l_tilde.sort()
    l_tilde = l_tilde[::-1]
    l.sort()
    l = l[::-1]
    print 'Eigenvalues Before: ' + str(l[:num_eigenvectors+1])
    print 'Eigenvalues After: ' + str(l_tilde[:num_eigenvectors+1])
    print '% Difference in eigenvalues: ' + str(100*abs(l_tilde[:num_eigenvectors+1] - l[:num_eigenvectors+1]) / abs(l[:num_eigenvectors+1]))
    plt.figure()
    plt.xlabel('Alpha')

    plt.ylabel('Eigenvalue')
    plt.title('For ' + str(num_intervals) + ' intervals')
    plt.plot(l_tilde[:num_eigenvectors+8], 'ko')
    plt.plot(l[:num_eigenvectors+8], 'ro')
    plt.legend()
Ejemplo n.º 19
0
def spectral_radius(net, typed=True, weighted=True):
    '''
    Spectral radius of the graph, defined as the eigenvalue of greatest module.
    
    Parameters
    ----------
    net : :class:`~nngt.Graph` or subclass
        Network to analyze.
    typed : bool, optional (default: True)
        Whether the excitatory/inhibitory type of the connnections should be
        considered.
    weighted : bool, optional (default: True)
        Whether the weights should be taken into account.
    
    Returns
    -------
    the spectral radius as a float.
    '''
    weights = None
    if typed and "type" in net.graph.eproperties.keys():
        weights = net.eproperties["type"].copy()
    if weighted and "weight" in net.graph.eproperties.keys():
        if weights is not None:
            weights = sp.multiply(weights,
                                  net.graph.eproperties["weight"])
        else:
            weights = net.graph.eproperties["weight"].copy()
    mat_adj = adjacency(net.graph,weights)
    eigenval = [0]
    try:
        eigenval = spl.eigs(mat_adj,return_eigenvectors=False)
    except spl.eigen.arpack.ArpackNoConvergence,err:
        eigenval = err.eigenvalues
Ejemplo n.º 20
0
def lsv_operator(A, N):
    """Computes largest singular value of AN
    
    Computation is done without computing AN or (AN)^T(AN)
    by using functions that act as these linear operators on a vector
    """

    # Build linear operator for AN
    def matmuldyad(v):
        return A.dot(N.dot(v))

    def rmatmuldyad(v):
        return N.T.dot(A.T.dot(v))
    normalized_lin_op = scipy.sparse.linalg.LinearOperator((A.shape[0], N.shape[1]), matmuldyad, rmatmuldyad)

    # Given v, computes (N^TA^TAN)v
    def matvec_XH_X(v):
        return normalized_lin_op.rmatvec(normalized_lin_op.matvec(v))

    which='LM'
    v0=None
    maxiter=None
    return_singular_vectors=False

    # Builds linear operator object
    XH_X = scipy.sparse.linalg.LinearOperator(matvec=matvec_XH_X, dtype=A.dtype, shape=(N.shape[1], N.shape[1]))
    # Computes eigenvalues of (N^TA^TAN), the largest of which is the LSV of AN
    eigvals = sla.eigs(XH_X, k=1, tol=0, maxiter=None, ncv=10, which=which, v0=v0, return_eigenvectors=False)
    lsv = np.sqrt(eigvals)
    # Take largest one
    return lsv[0].real
Ejemplo n.º 21
0
def spectral_partition(W,q,method = 'complete', metric = 'cosine'):

    n,m = W.shape
    K = Kmatrix(W)

    if n == m:
        try:
            e,v = linalg.eigen(K, q)
        except TypeError:
            e,v = linalg.eigs(K, q)

    else:
        try:
            u,e,v = linalg.svds(K, q)
        except AttributeError:
            u,e,v = linalg.svd(K, q)
           
        v = np.concatenate((u, v.T), 0)
                
    max_index = e.argmax()
    v = np.delete(v,max_index,1)
    Obs = np.real(v)
    D = distance.pdist(Obs,metric = metric)
    D = np.multiply(D >= 0, D)
    Z = linkage(D, method = method, metric = metric)
    cluster = fcluster(Z, q, criterion = 'maxclust')
            
    cluster += - 1
    cluster = {'spectral' : cluster}

    return cluster
def problem2(l):
    '''
    print the solution to the second problem in Beam Buckling
    Inputs:
        l -- length of beam in feet
    '''
    # initialize constants, unit conversions
    r = 1.0
    E1 = r*12**2*10**7
    E2 = 4.35*r*12**2*10**6
    E3 = 5*r*12**2*10**5
    L = 20.0
    I = np.pi*r**4/4
    n = 100
    h = L/n
    
    # build the sparse matrix B
    b_diag = np.ones(n)
    b_diag[0:n/3] = E1*I/h**2
    b_diag[n/3:n/3+n/3] = E2*I/h**2
    b_diag[n/3+n/3:] = E3*I/h**2
    B = spar.spdiags(b_diag, np.array([0]), n, n, format='csc')

    # build the sparse matrix A
    diag = -2*np.ones(n)
    odiag = np.ones(n)
    A = spar.spdiags(np.vstack((-odiag, -odiag, -diag)),
                     np.array([-1,1,0]), n, n, format='csc')
    
    # calculate and print the smallest eigenvalue                 
    evals = sparla.eigs(B.dot(A), which='SM')
    print evals[0].min()
Ejemplo n.º 23
0
def _compute_embedding(W, k, symmetric=True):
    """Calculates a partial ('k'-dimensional) eigendecomposition of W by first transforming into a self-adjoint matrix and then using the Lanczos algorithm.

    Args:
        W (array): symmetric, shape (npts, npts) array in which W[i,j] is the DMAPS kernel evaluation for points i and j
        k (int): the number of eigenvectors and eigenvalues to compute
        symmetric (bool): indicates whether the Markov matrix is symmetric or not. During standard useage with the default kernel, this will be true allowing for accelerated numerics. **However, if using custom_kernel(), this property may not hold.**

    Returns:
        eigvals (array): shape (k) vector with first 'k' eigenvectors of DMAPS embedding sorted from largest to smallest
        eigvects (array): shape ("number of data points", k) array with the k-dimensional DMAPS-embedding eigenvectors. eigvects[:,i] corresponds to the eigenvector of the :math:`i^{th}`-largest eigenvalue, eigval[i].
    """
    m = W.shape[0]
    # diagonal matrix D, inverse, sqrt
    D_half_inv = np.identity(m)/np.sqrt(np.sum(W,1))
    # transform into self-adjoint matrix and find partial eigendecomp of this transformed matrix
    eigvals, eigvects = None, None
    if symmetric:
        eigvals, eigvects = spla.eigsh(np.dot(np.dot(D_half_inv, W), D_half_inv), k=k) # eigsh (eigs hermitian)
    else:
        eigvals, eigvects = spla.eigs(np.dot(np.dot(D_half_inv, W), D_half_inv), k=k) # eigs (plain eigs)
    # transform eigenvectors to match W
    eigvects = np.dot(D_half_inv, eigvects)
    # sort eigvals and corresponding eigvects from largest to smallest magnitude  (reverse order)
    sorted_indices = np.argsort(np.abs(eigvals))[::-1]

    eigvals = eigvals[sorted_indices]
    # also scale eigenvectors to norm one
    eigvects = eigvects[:, sorted_indices]/np.linalg.norm(eigvects[:, sorted_indices], axis=0)
    return eigvals, eigvects
Ejemplo n.º 24
0
def find_phase_bounds(lead, p, B, k=0, num_bands=20):
    """Find the phase boundaries.

    Solve an eigenproblem that finds values of chemical potential at which the
    gap closes at momentum k=0. We are looking for all real solutions of the
    form H\psi=0 so we solve sigma_0 * tau_z H * psi = mu * psi.

    Parameters:
    -----------
    lead : kwant.builder.InfiniteSystem object
        The finalized infinite system.
    p : types.SimpleNamespace object
        A simple container that is used to store Hamiltonian parameters.
    B : tuple of floats
        A tuple that contains magnetic field strength in x, y and z-directions.
    k : float
        Momentum value, by default set to 0.

    Returns:
    --------
    chemical_potential : numpy array
        Twenty values of chemical potential at which a bandgap closes at k=0.
    """
    p.B_x, p.B_y, p.B_z = B
    h, t = lead.cell_hamiltonian(args=[p]), lead.inter_cell_hopping(args=[p])
    tk = lambda k: t * np.exp(1j * k)
    h_k = lambda k: h + tk(k) + tk(k).T.conj()
    sigma_z = np.array([[1, 0], [0, -1]])
    chemical_potentials = np.kron(np.eye(len(h) // 2), sigma_z) @ h_k(k)
    return sla.eigs(chemical_potentials, k=num_bands, sigma=0)[0]
Ejemplo n.º 25
0
    def get_spec(self, k0, get_wf=False, num_eigs=200):

        m0 = self.m0#.tocsr()
        mI = self.mI
        mIT = self.mI.conjugate().transpose() #mlil[self.Ny:2*self.Ny,:self.Ny].tocsr()

        #print 'mI', mI
        #print 'mIT', mIT.transpose()

        dz = self.coords[self.Ny][0] - self.coords[0][0]
        #print dz
        bloch_phase = cmath.exp(1j * k0 * dz)
        A = m0+ bloch_phase * mI + mIT / bloch_phase
        n_eigs=num_eigs
        #w, v = np.linalg.eig(A)
        if m0.shape[0] <= n_eigs:
            n_eigs = m0.shape[0]-2

        w,v = linalg.eigs(A, k=n_eigs, sigma=0)
        #w,v = np.linalg.eig(A.todense())

        if get_wf:
            wE, wV = self.__sort_spec(w, v, sortv=True)
            return wE, wV
        else:
            wE = self.__sort_spec(w)[0]
            return wE
Ejemplo n.º 26
0
def linearOrdering(matW):
    '''
    Uses spectral clustering algorithm to return a linear ordering
    :param matW: similiarityMatrix: square numpy array for similarities
    :return: numpy array containing same number of elements as rows as the input.
    '''

    sh = np.shape(matW)

    assert sh[0] == sh[1]
    assert (matW == matW.T).all()

    invRootMatD = np.diagflat(1/(np.sqrt(matW.sum(axis=1))))  # invRootMatD = D^-1/2

    matA = np.dot(invRootMatD, np.dot(matW, invRootMatD))  # matA = D^-1/2*W*D^-1/2

    w, v = eigs(matA, k=2, which='LM')

    z1 = v[:, 1]  # second largest eigenvector

    q1 = np.dot(invRootMatD, z1)

    ordering = np.argsort(q1)  # indices that sort q1

    matW_rowOrdered = matW[ordering, :]

    matW_ordered = matW_rowOrdered[:, ordering]

    return ordering, matW_ordered
    def test_minres_deflation(self):
        # Create sparse symmetric problem.
        num_unknowns = 100
        A = self._create_sparse_herm_indef_matrix(num_unknowns)
        rhs = np.ones( (num_unknowns,1) )
        x0 = np.zeros( (num_unknowns,1) )

        # get projection
        from scipy.sparse.linalg import eigs
        num_vecs = 6
        D, W = eigs(A, k=num_vecs, v0=np.ones((num_unknowns,1)))

        # Uncomment next lines to perturb W
        #W = W + 1.e-10*np.random.rand(W.shape[0], W.shape[1])
        #from scipy.linalg import qr
        #W, R = qr(W, mode='economic')

        AW = nm._apply(A, W)
        P, x0new = nm.get_projection( W, AW, rhs, x0 )

        # Solve using MINRES.
        tol = 1.0e-9
        out = nm.minres( A, rhs, x0new, Mr=P, tol=tol, maxiter=num_unknowns-num_vecs, full_reortho=True, return_basis=True )

        # TODO: move to new unit test
        o = nm.get_p_ritz( W, AW, out['Vfull'], out['Hfull'] )
        ritz_vals, ritz_vecs = nm.get_p_ritz( W, AW, out['Vfull'], out['Hfull'] )

        # Make sure the method converged.
        self.assertEqual(out['info'], 0)
        # Check the residual.
        res = rhs - A * out['xk']
        self.assertAlmostEqual( np.linalg.norm(res)/np.linalg.norm(rhs), 0.0, delta=tol )
Ejemplo n.º 28
0
    def hotCmntsForTest(self, postId, nCmnts = 5):
        self.buildgraph(postId)
        
        testsizes = [shape(self.prg)[0], 800, 600, 400, 200]
        
        for size in testsizes:
            
            self.prg = self.prg[0:size,0:size]
            lil = lil_matrix(self.prg)
            
            start = clock()
            #eig  = eigs(self.prg, k=1, return_eigenvectors =False)
            eig = eigs(lil, return_eigenvectors =False, maxiter=10, tol=1E-5)
            eig = eig[0].real
            eig = 1/eig
            eigTime = clock() - start            
            print 'test_size:',size, 'eigTime:',eigTime        

            one = ones(size)
            m = eye(size) - eig*lil  
            
            start = clock()
            cmnts_ranking = lu_solve((m, one), one)
            solveTime = clock() - start
            
            print 'test_size:',size, 'solveTime:',solveTime
 def calc_fisher_weight_vector(self, X, y):
     between_class_variance = self.calc_between_class_variance(X, y)
     within_class_variance = self.calc_within_class_variance(X, y)
     tmp_matrix = mat(np.linalg.inv(within_class_variance)) * mat(between_class_variance)
     w, v = eigs(tmp_matrix, k=self.y_vals.size - 1)
     # print(w.real)
     return v.real
Ejemplo n.º 30
0
def AlgebraicConn(temporalnet, model="SECOND"):
    """Returns the algebraic connectivity of the second-order (model=SECOND) or the
    second-order null (model=NULL) model for a temporal network.
    
     @param temporalnet: The temporalnetwork to work on
     @param model: either C{"SECOND"} or C{"NULL"}, where C{"SECOND"} is the 
      the default value.
    """
    
    if (model is "SECOND" or "NULL") == False:
        raise ValueError("model must be one of \"SECOND\" or \"NULL\"")
    
    Log.add('Calculating algebraic connectivity ... ', Severity.INFO)

    L = Laplacian(temporalnet, model)
    # NOTE: ncv=13 sets additional auxiliary eigenvectors that are computed
    # NOTE: in order to be more confident to find the one with the largest
    # NOTE: magnitude, see
    # NOTE: https://github.com/scipy/scipy/issues/4987
    w = sla.eigs( L, which="SM", k=2, ncv=13, return_eigenvectors=False )
    evals_sorted = np.sort(np.absolute(w))

    Log.add('finished.', Severity.INFO)

    return np.abs(evals_sorted[1])
Ejemplo n.º 31
0
 def spectral_clustering(self, cluster_num):
     W = self.graph_to_matrix()
     tmp = numpy.divide(1, numpy.sqrt(W.sum(1)))
     #tmp = W.sum(1)
     for n in xrange(len(tmp)):
         if tmp[n] == numpy.inf:
             tmp[n] = 0
     D = lil_matrix((self.nodes_size, self.nodes_size))
     D.setdiag(tmp)
     L = D * W * D
     lam, u = eigs(L, k=cluster_num, which='LR')
     '''
     for i in xrange(u.shape[0]):
         tmp = numpy.linalg.norm(u[i, :])
         u[i, :] = [j / tmp for j in u[i, :]]
     '''
     #convert u type to float32
     centroid, variance = kmeans(u.astype(numpy.float32), cluster_num, iter=100)
     label, distance = vq(u, centroid)
     self.label = dict()
     for node in self.network.nodes():
         self.label[node] = label[self.nodes_to_matrix_index[node]]
     return self.label
Ejemplo n.º 32
0
    def _initialize_internal_weights(self, n_internal_units, connectivity, spectral_radius):
        # The eigs function might not converge. Attempt until it does.
        convergence = False
        while (not convergence):
            # Generate sparse, uniformly distributed weights.
            internal_weights = sparse.rand(n_internal_units, n_internal_units, density=connectivity).todense()

            # Ensure that the nonzero values are uniformly distributed in [-0.5, 0.5]
            internal_weights[np.where(internal_weights > 0)] -= 0.5

            try:
                # Get the largest eigenvalue
                w,_ = slinalg.eigs(internal_weights, k=1, which='LM')

                convergence = True

            except:
                continue

        # Adjust the spectral radius.
        internal_weights /= np.abs(w)/spectral_radius

        return internal_weights
def functionQ1d(numev=3, numW=100):
    #import numpy as np
    import matplotlib.pyplot as plt
    from scipy.sparse.linalg import eigs
    N = 199
    alpha = 1.618034
    psi = 0.0
    W = np.linspace(1.2, 2.5, numW)
    IPR_allW = []
    plt.figure(figsize=(10, 10))
    for w in W:
        hamiltonian_matrix = harperHamiltonian(N, W=w, psi=psi, alpha=alpha)
        energy, wavefunc = eigs(hamiltonian_matrix, k=numev, sigma=-1.0)
        #wave_f.append(wavefunc)
        #label_fig = "Ground State, W = " + str(w)
        IPR_W = IPR(wavefunc)
        IPR_allW.append(IPR_W)
        #plt.plot(np.arange(0,N), abs(wavefunc[:,0])**2, label = label_fig)
    plt.plot(W, IPR_allW)
    plt.xlabel('W')
    plt.ylabel('IPR')
    plt.title('IPR vs W')
    return [W, IPR_allW]
Ejemplo n.º 34
0
def scaled_Laplacian(W):
    '''
    compute \tilde{L}

    Parameters
    ----------
    W: np.ndarray, shape is (N, N), N is the num of vertices

    Returns
    ----------
    scaled_Laplacian: np.ndarray, shape (N, N)

    '''

    assert W.shape[0] == W.shape[1]

    D = np.diag(np.sum(W, axis=1))

    L = D - W

    lambda_max = eigs(L, k=1, which='LR')[0].real

    return (2 * L) / lambda_max - np.identity(W.shape[0])
Ejemplo n.º 35
0
    def initialize_reservoir(n_units, density, randomstate, scale, spec_rad):
        """
        Initialize a sparse random reservoir as a square matrix representing connections among
        the n_units neurons with connections having specified density in range [-scale, scale].

        The weights are generated until they achieve a spectral radius of at least 0.01;
        due to the iterative nature of scipy.sparse.linalg.eigs, values under this threshold
        are unstable and do not produce consistent results over time.

        Keyword arguments:
        n_units     -- number of reservoir nodes
        density     -- density of connections (default 0.1)
        randomstate -- RandomState object for random number generation (default RandomState(1))
        scale       -- absolute value of minimum/maximum weight value (default 1.0)
        spec_rad    -- desired spectral radius to scale to (default 1.0)
        """
        while True:
            weights = EchoStateNetwork.initialize_weights(n_units, n_units, density, randomstate, scale)
            if max(abs(eigs(weights)[0])) >= 0.01:
                break

        weights = EchoStateNetwork.scale_spectral_radius(weights, spec_rad)
        return weights
Ejemplo n.º 36
0
def connected_components(network, lanczos_vecs=None, maxiter=None):
    """
    Calculates connected components based on the spectrum of the Laplacian matrix
    """
    L = network.laplacian_matrix(weighted=True)
    n = network.ncount() - 2
    if lanczos_vecs is None:
        lanczos_vecs = min(n, max(2 * n + 1, 20))
    if maxiter is None:
        maxiter = n * 10
    vals, vecs = _sla.eigs(L, k=n, which="SM", return_eigenvectors=True)

    components = defaultdict(set)
    c = 0

    # use eigenvectors of zero eigenvalues to map nodes to components
    for i in range(n):
        if _np.isclose(vals[i], 0, atol=1.e-12):
            min_v = _np.min(vecs[:, i])
            for i in _np.where(_np.isclose(vecs[:, i], min_v))[0]:
                components[c].add(i)
            c += 1
    return components
Ejemplo n.º 37
0
def solve_matrix(matrix):
    #n = matrix.shape[0];
    if (False):
        e_vals, e_vecs = sparce_linalg.eigs(matrix)

    if (True):
        matrix = matrix.todense()
        e_vals, e_vecs = linalg.eig(matrix)

        ## sort eigvals in increasing and sort vecs by same index
        idx = e_vals.argsort()
        e_vals = e_vals[idx]
        e_vecs = e_vecs[:, idx]

        print(e_vecs.shape)
        vecs = []
        for i in range(e_vecs.shape[0]):
            vecs.append(e_vecs[:, i])
        e_vecs = vecs

    #print(e_vals);
    #print(e_vecs);
    return [e_vals, e_vecs]
Ejemplo n.º 38
0
def DirectMode(ffdisc, shift, nev):

    OP = ffdisc.L - shift * ffdisc.B
    OP = OP.tocsc()
    print 'Build LU decomposition of (L-sB)'

    LU = splin.splu(OP, permc_spec=3)
    print 'done.'

    def op(x):
        r = ffdisc.B * x
        z = LU.solve(r)
        return z

    print 'define SOP'
    SOP = splin.LinearOperator((ffdisc.ndof, ffdisc.ndof),
                               matvec=op,
                               dtype='complex')
    print 'done.'

    # Compute modes
    print 'Calling eigs'
    try:
        w, v = splin.eigs(SOP,
                          k=nev,
                          M=None,
                          sigma=None,
                          which='LM',
                          v0=None,
                          ncv=60,
                          maxiter=100,
                          tol=tol_ev)
        print 'done.'
    except ArpackNoConvergence, err:
        w = err.eigenvalues
        v = err.eigenvectors
        print 'not fully converged'
Ejemplo n.º 39
0
def JDA(Xs , Xt , Ys , Yt0 , k=100 , labda = 0.1 , ker = 'primal' , gamma = 1.0 , data = 'default'):
    print 'begin JDA'
    X = np.hstack((Xs , Xt))
    X = np.diag(1/np.sqrt(np.sum(X**2)))
    (m,n) = X.shape
    ns = Xs.shape[1]
    nt = Xt.shape[1]
    C = len(np.unique(Ys))
    # Construct MMD matrix
    e1 = 1/ns*np.ones((ns,1))
    e2 = 1/nt*np.ones((nt,1))
    e = np.vstack((e1,e2))
    M = np.dot(e,e.T)*C
    
    if any(Yt0) and len(Yt0)==nt:
        for c in np.reshape(np.unique(Ys) ,-1 ,1):
            e1 = np.zeros((ns,1))
            e1[Ys == c] = 1/len(Ys[Ys == c])
            e2 = np.zeros((nt,1))
            e2[Yt0 ==c] = -1/len(Yt0[Yt0 ==c])
            e = np.hstack((e1 ,e2))
            e = e[np.isinf(e) == 0]
            M = M+np.dot(e,e.T)
            
    M = M/norm(M ,ord = 'fro' )
    
    # Construct centering matrix
    H = np.eye(n) - 1/(n)*np.ones((n,n))
    
    #% Joint Distribution Adaptation: JDA
    if ker == 'primal':
        A = eigs(np.dot(np.dot(X,M),X.T)+labda*np.eye(m), k=k, M=np.dot(np.dot(X,H),X.T),  which='SM')
        Z = np.dot(A.T,X)
    else:
        pass
    
    print 'JDA TERMINATED'
def pca_fun(X_data):
    '''
    
    '''
    global x_mean
    global e_vec
    global U
    
    # Extract dimensions of X_data
    row , col = X_data.shape
    
    # Obtain the mean of each feature for all images.
    x_mean = np.mean(X_data , axis = 0)
    
    # Clone the mean vector into a matrix.
    X_mean = np.array([x_mean,]*row)
    
    # Obtain the mean-subtracted matrix.
    X_corrected = np.subtract(X_data , X_mean)
    
    # Obtain the covariance matrix.
    X_cov = np.cov(X_data.transpose())
    
    # Determine eigenvalues and eigentvectors of X_cov and sort based on largest magnitude.
    e_val , e_vec = eigs(X_cov , k=100 , which = 'LM')

    # Return only the real components.
    e_val = np.real(e_val)
    e_vec = np.real(e_vec)

    # Return eigenvalues greater than or equal to 1.0.
    e_val = e_val[e_val >= 1.0]
    e_vec = e_vec[: , range(len(e_val))]
    
    U = np.matmul(X_corrected , e_vec)
    
    return U
Ejemplo n.º 41
0
def diffusion_mapping(X,
                      n_components=2,
                      n_neighbors=5,
                      alpha=1.0,
                      t=1,
                      gamma=0.5,
                      metric='minkowski',
                      p=2,
                      metric_params=None,
                      n_jobs=1):
    knn = kneighbors_graph(X,
                           n_neighbors,
                           mode='distance',
                           metric=metric,
                           metric_params=metric_params,
                           p=p,
                           n_jobs=n_jobs)

    K = sparse.csr_matrix(
        (np.exp(-gamma * knn.data**2), knn.indices, knn.indptr))

    mask = (K != 0).multiply(K.T != 0)
    L = K + K.T - K.multiply(mask)

    D = sparse.diags(np.asarray(L.sum(axis=0)).reshape(-1))

    L_a = D.power(-alpha) @ L @ D.power(-alpha)

    D_a = sparse.diags(np.asarray(L_a.sum(axis=1)).reshape(-1))

    m = D_a.power(-1) @ L_a

    w, v = eigs(m, n_components + 1)

    # eigs returns complex numbers, but for Markov matrices, all eigenvalues are
    # real and in [0, 1].
    return (m.dot(v[:, 1:]) * (w[1:]**t)).real
Ejemplo n.º 42
0
def main():
    A, b, x = blur(128, 5, 1)
    # plt.figure(figsize=(6, 6))
    # plt.imshow(x.reshape(256, 256), cmap='gray')
    # plt.show()
    # plt.figure(figsize=(6, 6))
    # plt.imshow(b.reshape(256, 256), cmap='gray')
    # plt.show()
    f = lambda x: np.linalg.norm(A @ x - b)**2
    g = lambda x: 2 * A.T @ (A @ x - b)
    x0 = np.zeros([128 * 128, 1])
    AtA = (A.T.dot(A)).toarray()
    vals, vecs = eigs(AtA, k=1)

    max_eign = np.real(vals[0])

    # for j in [1, 10, 100, 1000]:
    #     x_output1, f_s_output1, g_s_output1,ts_1 = gradient_method(f, g, exact_quad(A), x0, j)
    #     x_output2, f_s_output2, g_s_output2,ts_2 = gradient_method(f, g, const_step(1 / (2 * max_eign)), x0, j)
    #     x_fista, f_fista, g_fista, ts_fista = fista(f, g, 2 * max_eign, x0,10**-5, j)
    #     plt.figure(figsize=(6, 6))
    #     plt.imshow(x_fista.reshape(128, 128), cmap='gray')
    #     plt.show()

    # x_output1, f_s_output1, g_s_output1,ts_1 = gradient_method(f, g, exact_quad(A), x0, j)
    x_output2, f_s_output2, g_s_output2, ts_2 = gradient_method(
        f, g, const_step(1 / (2 * max_eign)), x0, 1000)
    x_fista, f_fista, g_fista, ts_fista = fista(f, g, 2 * max_eign, x0, 10**-5,
                                                1000)
    # plt.title('Gradient descend - gf value per second')
    p1 = plt.semilogy(range(len(ts_2)), ts_2)
    p2 = plt.semilogy(range(len(ts_fista)), ts_fista)
    p3 = plt.semilogy(range(len(f_s_output2)), f_s_output2)
    p4 = plt.semilogy(range(len(f_fista)), f_fista)
    plt.legend((p1[0], p2[0], p3[0], p4[0]),
               ('time fista', 'time const 2', 'fs method2', 'fs fista'))
    plt.show()
Ejemplo n.º 43
0
def approx_spectral_radius(M, pyamg=False, symmetric=False, tol=1e-03, sparse=True):
    """pyamg=False ... DEPRECATED for EC, without EC scipy.eigs is better

    Wrapper around existing methods to calculate spectral radius.
    1. Original method: function 'pyamg.util.linalg.approximate_spectral_radius'.
    Behaved strange at times, and packages needed time to import and returned errors.
    But kept as default since the other method from scipy sometimes gives wrong results!
    2. 'scipy.sparse.linalg.eigs' which seemed to work faster and apparently more reliably than the old method.
    However, it sometimes does not return the correct value!
    This happens when echo=True and the biggest value is negative. Then returns the next smaller positive.
    For example: returns 0.908 for [ 0.9089904+0.j -1.0001067+0.j], or 0.933 for [ 0.93376532+0.j -1.03019369+0.j]

    http://scicomp.stackexchange.com/questions/7369/what-is-the-fastest-way-to-compute-all-eigenvalues-of-a-very-big-and-sparse-adja
    http://www.netlib.org/utk/people/JackDongarra/etemplates/node138.html

    Both methods require matrix to have float entries (asfptype)
    Testing: scipy is faster up to at least graphs with 600k edges
    10k nodes, 100k edges: pyamp 0.4 sec, scipy: 0.04
    60k nodes, 600k edges: pyam 2 sec, scipy: 1 sec

    Allows both sparse matrices and numpy arrays: For both, transforms int into float structure.
    However, numpy astype makes a copy (optional attribute copy=False does not work for scipy.csr_matrix)

    'eigsh' is not consistently faster than 'eigs' for symmetric M

    k=2 does not work anymore for 'scipy.sparse.linalg.eigs'. We now have 'np.linalg.eigvalsh' as alternative method
    """
    pyamg=False          # TODO: pyamg is kept as better method
    if pyamg:
        # return approximate_spectral_radius(M.astype('float'), tol=tol, maxiter=20, restart=10)        # TODO: kept for historical reasons
        return 0
    else:
        k, _ = M.shape
        if k>2 and sparse:
            return np.absolute(eigs(M.astype('float'), k=1, return_eigenvectors=False, which='LM', tol=tol)[0])   # which='LM': largest magnitude; eigs / eigsh
        else:
            return np.max(np.absolute(np.linalg.eigvalsh(M)))
def spectral_clustering(G, k):
    L = nx.normalized_laplacian_matrix(G).astype(float) # Normalized Laplacian

    # Calculate k smallest in magnitude eigenvalues and corresponding eigenvectors of L

    ##################
    # your code here #
    ##################
    
    # hint: use eigs function of scipy
    eigval, eigvec = eigs(L, k=k, which='SR')

    eigval = eigval.real # Keep the real part
    eigvec = eigvec.real # Keep the real part
    # sort is implemented by default in increasing order
    idx = eigval.argsort() # Get indices of sorted eigenvalues
    eigvec = eigvec[:,idx] # Sort eigenvectors according to eigenvalues
    
    # Perform k-means clustering (store in variable "membership" the clusters to which points belong)
    
    ##################
    # your code here #
    ##################
    
    # hint: use KMeans class of scikit-learn
    km = cluster.KMeans(n_clusters=k, init='random').fit(eigvec)


    membership = list(km.labels_)
    # will contain node IDs as keys and membership as values
    clustering = {}
    nodes = G.nodes()
    for i in range(len(nodes)):
        clustering[nodes[i]] = membership[i]
    
    return clustering
Ejemplo n.º 45
0
def directed_laplacian_matrix(G, nodelist=None, weight='weight',alpha=0.95):
    import scipy as sp
    M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
                                  dtype=float)
    n, m = M.shape
    if not (0 < alpha < 1):
            raise nx.NetworkXError('alpha must be between 0 and 1')
    # this is using a dense representation
    M = M.todense()
    # add constant to dangling nodes' row
    dangling = sp.where(M.sum(axis=1) == 0)
    for d in dangling[0]:
        M[d] = 1.0 / n
    # normalize
    M = M / M.sum(axis=1)

    P = alpha * M + (1 - alpha) / n
    evals, evecs = linalg.eigs(P.T, k=1,tol=1E-2)
    v = evecs.flatten().real
    p = v / v.sum()
    sqrtp = sp.sqrt(p)
    I = sp.identity(len(G))
    Q = spdiags(sqrtp, [0], n, n) * (I-P) * spdiags(1.0 / sqrtp, [0], n, n)
    return Q
Ejemplo n.º 46
0
def spectral_clustering(G, k):
    """
    :param G: networkx graph
    :param k: number of clusters
    :return: clustering labels, dictionary of nodeID: clustering_id
    """
    A = nx.adjacency_matrix(G)
    d = np.array([G.degree(node) for node in G.nodes()])
    D_inv = sparse.diags(1 / d)
    n = len(d)  # number of nodes
    L_rw = sparse.eye(n) - D_inv @ A

    eig_values, eig_vectors = eigs(L_rw, k, which='SR')
    eig_vectors = eig_vectors.real

    kmeans = KMeans(n_clusters=k)
    kmeans.fit(eig_vectors)

    clustering_labels = {
        node: kmeans.labels_[i]
        for i, node in enumerate(G.nodes())
    }

    return clustering_labels
Ejemplo n.º 47
0
def spectral_radius(graph, typed=True, weighted=True):
    '''
    Spectral radius of the graph, defined as the eigenvalue of greatest module.

    Parameters
    ----------
    graph : :class:`~nngt.Graph` or subclass
        Network to analyze.
    typed : bool, optional (default: True)
        Whether the excitatory/inhibitory type of the connnections should be
        considered.
    weighted : bool, optional (default: True)
        Whether the weights should be taken into account.

    Returns
    -------
    the spectral radius as a float.
    '''
    weights = None
    if typed and "type" in graph.eproperties.keys():
        weights = graph.eproperties["type"].copy()
    if weighted and "weight" in graph.eproperties.keys():
        if weights is not None:
            weights = np.multiply(weights, graph.eproperties["weight"])
        else:
            weights = graph.eproperties["weight"].copy()
    mat_adj = nngt.analyze_graph["adjacency"](graph, weights)
    eigenval = [0]
    try:
        eigenval = spl.eigs(mat_adj, return_eigenvectors=False)
    except spl.eigen.arpack.ArpackNoConvergence as err:
        eigenval = err.eigenvalues
    if len(eigenval):
        return np.amax(np.absolute(eigenval))
    else:
        raise spl.eigen.arpack.ArpackNoConvergence()
Ejemplo n.º 48
0
def eigs(T, k=10, eps=1e-3, perc=None):
    try:
        eigvals, eigvecs = linalg.eigs(
            T.T, k=k, which='LR')  # find k eigs with largest real part

        p = np.argsort(eigvals)[::
                                -1]  # sort in descending order of eigenvalues
        eigvals = eigvals.real[p]
        eigvecs = eigvecs.real[:, p]

        idx = (eigvals >= 1 - eps)  # select eigenvectors with eigenvalue of 1
        eigvals = eigvals[idx]
        eigvecs = np.absolute(eigvecs[:, idx])

        if perc is not None:
            lbs, ubs = np.percentile(eigvecs, perc, axis=0)
            eigvecs[eigvecs < lbs] = 0
            eigvecs = np.clip(eigvecs, 0, ubs)
            eigvecs /= eigvecs.max(0)

    except:
        eigvals, eigvecs = np.empty(0), np.zeros(shape=(T.shape[0], 0))

    return eigvals, eigvecs
Ejemplo n.º 49
0
def Eingenval_of_Transfer_Matrix(Tn, lam, e_num=5):
    ## Output e_num leading eigenvalus of the Transfer Matrix
    period = len(Tn)
    chi_r = Tn[0].shape[2]
    if chi_r > 2:

        def BAv(v):
            v_new = TEBD.mult_right(v.reshape(chi_r, chi_r), lam[1], Tn[0])
            for j in range(1, period):
                v_new = TEBD.mult_right(v_new, lam[(period - j + 1) % period],
                                        Tn[(period - j) % period])
            return v_new.reshape(chi_r**2)

        T_mat = spr_linalg.LinearOperator((chi_r**2, chi_r**2), matvec=BAv)

        return spr_linalg.eigs(T_mat,
                               k=min(e_num, chi_r - 1),
                               return_eigenvectors=False)
    else:
        T_half = np.tensordot(Tn[0], np.diag(lam[1]), (2, 0))
        T_mat = np.tensordot(T_half, T_half.conj(),
                             (0, 0)).transpose(0, 2, 1, 3)
        for j in range(1, period):
            T_half = np.tensordot(Tn[j], np.diag(lam[(j + 1) % period]),
                                  (2, 0))
            T_mat = np.tensordot(np.tensordot(T_half, T_half.conj(), (0, 0)),
                                 T_mat, ([1, 3], [0, 1]))

        eig = linalg.eigvals(np.reshape(T_mat, (chi_r**2, chi_r**2)))
        if chi_r == 1:
            return eig
        else:
            if np.abs(eig[0]) > np.abs(eig[1]):
                return np.array([eig[1], eig[0]])
            else:
                return np.array([eig[0], eig[1]])
Ejemplo n.º 50
0
    def learn_embedding(self, graph=None, edge_f=None,
                        is_weighted=False, no_python=False):
        if not graph and not edge_f:
            raise Exception('graph/edge_f needed')
        if not graph:
            graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
        graph = graph.to_undirected()
        t1 = time()
        L_sym = nx.normalized_laplacian_matrix(graph)
        
        try:
            w, v = lg.eigs(L_sym, k=self._d + 1, which='SM')
            t2 = time()
            self._X = v[:, 1:]

            p_d_p_t = np.dot(v, np.dot(np.diag(w), v.T))
            eig_err = np.linalg.norm(p_d_p_t - L_sym)
            print('Laplacian matrix recon. error (low rank): %f' % eig_err)
            return self._X, (t2 - t1)
        except:
            print('SVD did not converge. Assigning random emebdding')
            self._X = np.random.randn(L_sym.shape[0], self._d)
            t2 = time()
            return self._X, (t2 - t1)
Ejemplo n.º 51
0
def sparse_eigensolve(func, arr_shape, guess, params,
                      *args,
                      hermitian=True, which="LM",
                      **kwargs):
    """
    Returns the dominant eigenvector of the linear map f(x) implicitly
    represented by func(x, *args, **kwargs).

    Internally within func, x has the shape arr_shape. It, along with the
    guess, will be reshaped appropriately.

    The eigenvector is computed by Arnoldi iteration to a tolerance tol.
    If tol is None, machine precision of guess's data type is used.

    The param 'sigma' is passed to eigs. Eigenvalues are found 'around it'.
    This can be used to find the most negative eigenvalue instead of the
    one with the largest magnitude, for example.
    """
    op = sparse_solver_op(func, arr_shape, *args, dtype=guess.dtype,
                          **kwargs)
    tol = params["Heff_tol"]
    ncv = params["Heff_ncv"]
    neigs = params["Heff_neigs"]
    # print("guess: ", guess)
    # print(guess.flatten())
    if hermitian:
        w, v = eigsh(op, k=neigs, which=which, tol=tol,
                     v0=guess.flatten(), ncv=ncv)
    else:
        w, v = eigs(op, k=neigs, which=which, tol=tol,
                    v0=guess.flatten(), ncv=ncv)

    w, v = utils.sortby(w.real, v, mode="SR")
    eV = v[:, 0]
    eV = eV.reshape(arr_shape)
    return eV
Ejemplo n.º 52
0
    def solve_ground_state(self):
        """Solve ground state by sparse eigensolver.

        Compute attributes:
        total_energy, kinetic_energy, potential_energy, density, wave_function.

        Returns:
          self
        """
        if (self.boundary_condition == 'open'
                or self.boundary_condition == 'periodic'):
            eigenvalues, eigenvectors = linalg.eigsh(
                self._h, k=self.num_electrons,
                which='SA', tol=self._tol)
        else:
            eigenvalues, eigenvectors = linalg.eigs(
                self._h, k=self.num_electrons,
                which='SR', tol=self._tol)
            idx = eigenvalues.argsort()
            eigenvalues = eigenvalues[idx]
            eigenvectors = eigenvectors[:, idx]

        return self._update_ground_state(
            eigenvalues, eigenvectors)
Ejemplo n.º 53
0
    def learn_embedding(self,
                        graph=None,
                        edge_f=None,
                        is_weighted=False,
                        no_python=False):
        if not graph and not edge_f:
            raise Exception('graph/edge_f needed')
        if not graph:
            graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
        graph = graph.to_undirected()
        t1 = time()
        L_sym = nx.normalized_laplacian_matrix(graph)

        w, v = lg.eigs(L_sym, k=self._d + 1, which='SM')
        idx = np.argsort(w)  # sort eigenvalues
        w = w[idx]
        v = v[:, idx]
        t2 = time()
        self._X = v[:, 1:]

        p_d_p_t = np.dot(v, np.dot(np.diag(w), v.T))
        eig_err = np.linalg.norm(p_d_p_t - L_sym)
        print('Laplacian matrix recon. error (low rank): %f' % eig_err)
        return self._X.real, (t2 - t1)
Ejemplo n.º 54
0
def ldos0d_wf(h, e=0.0, delta=0.01, num_wf=10, robust=False):
    """Calculates the local density of states of a hamiltonian and
     writes it in file, using arpack"""
    if h.dimensionality == 0:  # only for 0d
        intra = csc_matrix(h.intra)  # matrix
    else:
        raise  # not implemented...
    if robust:  # go to the imaginary axis for stability
        eig, eigvec = slg.eigs(intra,
                               k=int(num_wf),
                               which="LM",
                               sigma=e + 1j * 10 * delta)
        eig = eig.real  # real part only
    else:  # Hermitic Hamiltonian
        eig, eigvec = slg.eigsh(intra, k=int(num_wf), which="LM", sigma=e)
    d = np.array([0.0 for i in range(intra.shape[0])])  # initialize
    for (v, ie) in zip(eigvec.transpose(), eig):  # loop over wavefunctions
        v2 = (np.conjugate(v) * v).real  # square of wavefunction
        fac = delta / ((e - ie)**2 + delta**2)  # factor to create a delta
        d += fac * v2  # add contribution
    d /= num_wf  # normalize
    d = spatial_dos(h, d)  # resum if necessary
    g = h.geometry  # store geometry
    write_ldos(g.x, g.y, d)  # write in file
Ejemplo n.º 55
0
def spectral_clustering(G, k, seed=1234):
    L = nx.normalized_laplacian_matrix(G).astype(float) # Normalized Laplacian

    # Calculate k smallest in magnitude eigenvalues and corresponding eigenvectors of L
    # hint: use eigs function of scipy

    ##################
    # your code here #
    eigval,eigvec = eigs(L)
    ##################

    eigval = eigval.real # Keep the real part
    eigvec = eigvec.real # Keep the real part
    
    idx = eigval.argsort() # Get indices of sorted eigenvalues
    eigvec = eigvec[:,idx] # Sort eigenvectors according to eigenvalues
    
    # Perform k-means clustering (store in variable "membership" the clusters to which points belong)
    # hint: use KMeans class of scikit-learn
    

    ##################
    # your code here #
    kmeans = KMeans(n_clusters=k, random_state = seed)
    kmeans.fit(eigvec[:,:k])
    ##################

    # Create a dictionary "clustering" where keys are nodes and values the clusters to which the nodes belong
    
    ##################
    # your code here #
    clusters = kmeans.predict(eigvec[:,:k])
    clustering = {n:c for n,c in zip(GCC.nodes(), clusters)}
    ##################

    return clustering
Ejemplo n.º 56
0
def scipy_sparse_eigs(A, B, N, target, **kw):
    """
    Perform targeted eigenmode search using the scipy/ARPACK sparse solver
    for the reformulated generalized eigenvalue problem

        A.x = λ B.x  ==>  (A - σB)\B.x = (1/(λ-σ)) x

    for eigenvalues λ near the target σ.

    Parameters
    ----------
    A, B : scipy sparse matrices
        Sparse matrices for generalized eigenvalue problem
    N : int
        Number of eigenmodes to return
    target : complex
        Target σ for eigenvalue search

    Other keyword options passed to scipy.sparse.linalg.eigs.

    """
    # Build sparse linear operator representing (A - σB)\B = C\B = D
    C = A - target * B
    if STORE_LU:
        C_LU = spla.splu(C.tocsc(), permc_spec=PERMC_SPEC)
        def matvec(x):
            return C_LU.solve(B.dot(x))
    else:
        def matvec(x):
            return spla.spsolve(C, B.dot(x), use_umfpack=USE_UMFPACK, permc_spec=PERMC_SPEC)
    D = spla.LinearOperator(dtype=A.dtype, shape=A.shape, matvec=matvec)
    # Solve using scipy sparse algorithm
    evals, evecs = spla.eigs(D, k=N, which='LM', sigma=None, **kw)
    # Rectify eigenvalues
    evals = 1 / evals + target
    return evals, evecs
Ejemplo n.º 57
0
    def transfer_eig(self, A_array, B_array):
        """Calculates the largest eigenvalue of the mixed transfer matrix of
        the cell A_array wit the cell B_array.
        """
        if A_array is None or B_array is None:
            return 0

        # A_array and B_array have tensors of different shape
        if any([A.shape != B.shape for A, B in zip(A_array, B_array)]):
            return 0

        def transfer(A, B, x):
            x = x.reshape(A.shape[0], B.shape[0])
            x = tensordot(x, A, [[0], [0]])
            return tensordot(x, B.conj(), [[0, 1], [0, 1]]).ravel()

        def fullTF(x):
            for A, B in zip(A_array, B_array):
                x = transfer(A, B, x)
            return x

        LO = LinearOperator((A_array[0].shape[0]**2, ) * 2, matvec=fullTF)
        w, v = eigs(LO, k=1, which='LM')
        return w[0]
Ejemplo n.º 58
0
    def reorder_with_fiedler(self):
        """
        OLD DO NOT USE

        Reorder the vertices in the graph based on the Fiedler vector

        @:returns nothing
        """

        L = np.array(self.graph.laplacian(weights=self.graph.es['weight']),
                     dtype=float)

        # calculate eigenvalues and eigenvectors from the laplacian
        # TODO: look into making this more efficient, not all eigenvalues have
        # to be calculated

        # calculate the k eigenvalues/vectors with the lowest magnitude
        eigvals, eigvec = eigs(L,
                               k=np.linalg.matrix_rank(L),
                               sigma=0.001,
                               which="LM")

        eigorder = np.argsort(eigvals)
        eigvals = eigvals[eigorder]
        eigvec = eigvec[:, eigorder]

        for i, val in enumerate(eigvals):
            if not np.isclose(val, 0):
                fiedler = eigvec[:, i]
                break

        # find the reordering based on the Fiedler vector
        vertices = np.argsort(fiedler).tolist()
        order = [vertices.index(i) for i in range(self.graph.vcount())]
        # sort matrix on both the rows and columns
        self.graph = self.graph.permute_vertices(order)
Ejemplo n.º 59
0
def matrix_singular_values(matrix, n_sing, mode='LM'):
    filter_shape = matrix.shape
    matrix_reshape = np.reshape(matrix, [-1, filter_shape[-1]])

    # Semi-positive definite matrix A*A.T, A.T*A
    dim1, dim2 = matrix_reshape.shape
    if dim1 > dim2:
        aa_t = np.matmul(matrix_reshape.T, matrix_reshape)
    else:
        aa_t = np.matmul(matrix_reshape, matrix_reshape.T)

    # RuntimeWarning
    # Trows warning to use eig instead if the say too is small.1
    # Eigs is an approximation, Eig calculated all eigenvalues, and eigenvectors of the matrix.

    try:
        eigenvalues, eigenvectors = sp_linalg.eigs(A=aa_t,
                                                   k=n_sing,
                                                   which=mode)
    except RuntimeWarning:
        pass
    except RuntimeError:
        eigenvalues = None
        pass

    if eigenvalues is None:
        return None

    if n_sing > 1:
        eigenvalues = np.sort(eigenvalues)
        if 'LM' in mode:
            eigenvalues = eigenvalues[::-1]

    sing_matrix = np.sqrt(eigenvalues)

    return np.real(sing_matrix)
Ejemplo n.º 60
0
def recursive_eig(matrix, k, n_k_needed, k_buffer=1, sigma=1e-10, which='LM'):
    """
    Recursive function to iteratively get eigs until have enough to get fiedler + n_k_needed @ minimum.
    If one final
    :param matrix:
    :param k:
    :param n_k_needed:
    :param k_buffer:
    :param sigma:
    :param which:
    :return:
    """
    MIN_EIG_VAL = 1e-10

    print('Starting!')
    eig_vals, eig_vecs = eigs(matrix, k=k, sigma=sigma, which=which, ncv=4 * k)

    n_good_eigen_vals = sum(eig_vals > MIN_EIG_VAL)

    if n_good_eigen_vals < n_k_needed:
        print(
            'Not enough eigenvalues found, trying again with more eigenvalues!'
        )
        k += k_buffer + n_k_needed
        eig_vals, eig_vecs = recursive_eig(matrix, k, n_k_needed, k_buffer,
                                           sigma, which)

    eig_keep = np.where(eig_vals > MIN_EIG_VAL)[0]

    eig_vals = eig_vals[eig_keep]
    eig_vecs = eig_vecs[:, eig_keep]

    eig_vals = np.real(eig_vals)
    eig_vecs = np.real(eig_vecs)

    return eig_vals, eig_vecs