예제 #1
0
def medEigenSys(n,h,a,numE):
	#in specifying sigma, we need to alter the mode
	hamilPos,hamilNeg=isingHParitySplit(n,h,a)
	eigenPos=lin.eigsh(hamilPos,k=numE/2,which="LM",maxiter=1000,sigma=0,mode='normal')
	eigenNeg=lin.eigsh(hamilNeg,k=numE/2,which="LM",maxiter=1000,sigma=0,mode='normal')
	eigenNet=lin.eigsh(isingHInter(n,h,a),k=numE,which="LM",maxiter=1000,sigma=0,mode='normal')
	return (eigenPos[0],eigenNeg[0],eigenPos[0]-eigenNeg[0],[parityToFull(a,0) for a in np.transpose(eigenPos[1])],[parityToFull(a,1) for a in np.transpose(eigenNeg[1])],eigenNet[0],eigenNet[1])
예제 #2
0
파일: testfermi.py 프로젝트: amilacsw/dmrg
    def test_nonint(self):
        #get the exact solution.
        h_exact=self.model_exact.hgen.H()
        E_excit=eigvalsh(h_exact)
        Emin_exact=sum(E_excit[E_excit<0])

        #the solution in occupation representation.
        h_occ=self.model_occ.hgen.H()
        Emin=eigsh(h_occ,which='SA',k=1)[0]
        print 'The Ground State Energy for hexagon(t = %s, t2 = %s) is %s, tolerence %s.'%(self.t,self.t2,Emin,Emin-Emin_exact)
        assert_almost_equal(Emin_exact,Emin)

        #the solution through updates
        H_serial=op2collection(op=self.model_occ.hgen.get_opH())
        H=get_H(H=H_serial,hgen=self.expander)
        H2,bm2=get_H_bm(H=H_serial,hgen=self.expander2,bstr='QM')
        Emin=eigsh(H,k=1,which='SA')[0]
        Emin2=eigsh(H2,k=1,which='SA')[0]
        print 'The Ground State Energy is %s, tolerence %s.'%(Emin,Emin-Emin2)
        assert_almost_equal(Emin_exact,Emin)
        assert_almost_equal(Emin_exact,Emin2)

        #the solution through dmrg.
        bmgen=get_bmgen(self.expander3.spaceconfig,'QM')
        dmrgegn=DMRGEngine(hchain=H_serial,hgen=self.expander3,tol=0,bmg=bmgen,symmetric=True)
        EG2=dmrgegn.run_finite(endpoint=(5,'<-',0),maxN=[10,20,30,40,40],tol=0)[-1]
        assert_almost_equal(Emin_exact,EG2*H_serial.nsite,decimal=4)
def SCpolarongenerator(Ham,Ham_p,site,alpha):
    "Generate a 'polaron' by allowing the site energies to be self-consistently perturbed in proportion to the charge density on the site, until convergence of lowest energy state"

    SCFSTEPS = 2000
    init_diagonal = np.diagonal(Ham)
    
    np.fill_diagonal(Ham_p,init_diagonal)

    Ham_p[site,site]-=alpha

    Evals,Evecs=eigsh(Ham_p,1,which='LM',tol=1E-4)  # Solve Hamiltonian


    for i in range(SCFSTEPS): # Number of SCF steps
        #print i
        polaron=Evecs[:,0]*Evecs[:,0] # Find lowest energy state charge density
        np.fill_diagonal(Ham_p,init_diagonal-alpha*polaron) # Deepen site energies in proportion to density
        pvals,pvecs=eigsh(Ham_p,1,which='LM',tol=1E-4)  # Resolve Hamiltonian
        if np.isclose(pvals[0],Evals[0],rtol=1e-8,atol=1e-8):  # Repeat until convergence of lowest state energy
            break
        Evals=pvals
        Evecs=pvecs

#print i

    return Ham_p
예제 #4
0
def eigenSysCut(n,h,a,numE,cutoff):
	eCut=lambda x: eCutoff(x,cutoff)
	hamilPos,hamilNeg=map(eCut,JisingHParitySplit(n,h,a))
	eigenPos=lin.eigsh(hamilPos,k=numE/2,which="SA",maxiter=10000)
	eigenNeg=lin.eigsh(hamilNeg,k=numE/2,which="SA",maxiter=10000)
	eigenNet=lin.eigsh(isingHInter(n,h,a),k=numE,which="SA",maxiter=10000)
	return (eigenPos[0],eigenNeg[0],eigenPos[0]-eigenNeg[0],[parityToFull(a,0) for a in np.transpose(eigenPos[1])],[parityToFull(a,1) for a in np.transpose(eigenNeg[1])],eigenNet[0],eigenNet[1])
예제 #5
0
def solver(M, _k, _sigma=0., _tol=1e-7):

    #t_start = time()
    try:
        if scipy.__version__.split('.', 2)[1] == '10':
            #
            # eigsh sparse eigensolver, with sigma setting (in scipy>=0.10) 
            #
            eigval, eigvec = SparseLinalg.eigsh(M, k=_k, sigma=_sigma, tol=_tol)
        elif scipy.__version__.split('.', 2)[1] in ('8', '9'):
            #
            # eigsh sparse eigensolver, no sigma setting (in scipy<0.10) 
            # ask more then _k eigvecs, otherwise solver is unstable
            #
            eigval, eigvec = SparseLinalg.eigsh(M, k=_k*10, which='SM')
            #_, eigval, eigvec = SparseLinalg.svds(W, k=_k*10)
    except SparseLinalg.arpack.ArpackNoConvergence as excobj:
        print "ARPACK iteration did not converge"
        eigval, eigvec = excobj.eigenvalues, excobj.eigenvectors
        eigval = scipy.hstack((eigval, numpy.zeros(_k-eigval.shape[0])))
        eigvec = scipy.hstack((eigvec, numpy.zeros((n,_k-eigvec.shape[1]))))
        #
        # If eigval/eigvec pairs are not sorted on eigvals value
        #
        #ixEig = numpy.argsort(eigval)
        #eigval = eigval[ixEig]
        #eigvec = eigvec[:,ixEig]
        #print 'Eigen-values/vectors found in %.6fs' % (time()-t_start)
    return eigval, eigvec
예제 #6
0
 def right_sweep(self,R,M):
     """
     :param R: The right operator
     :param M:
     :return:
     """
     H=np.tensordot(self.MPO[0],R[1],axes=(3,1))
     H=np.squeeze(H)
     H=np.transpose(H,[0,2,1,3])
     d0,d1,d2,d3=H.shape
     H=np.reshape(H,[d0*d1,d2*d3])
     w,v=ssl.eigsh(H,which='SA',k=1,maxiter=5000)
     v=np.reshape(v,[self.d,1,d0*d1//self.d])
     l,u=self.left_cannonical(v)
     M[0]=l
     L=[[] for i in range(len(R))]
     L[0]=np.tensordot(l,self.MPO[0],axes=(0,0))
     L[0]=np.tensordot(L[0],np.conj(l),axes=(2,0))
     L[0]=np.transpose(L[0],[0,2,4,1,3,5])
     for i in range(1,len(R)-1):
         H=np.tensordot(self.MPO[i],R[i+1],axes=(3,1))
         H=np.tensordot(L[i-1],H,axes=(4,2))
         H=H.squeeze()
         H=np.transpose(H,[2,0,4,3,1,5])
         d1,d2,d3,d4,d5,d6=H.shape
         H=np.reshape(H,[d1*d2*d3,d1*d2*d3])
         w,v=ssl.eigsh(H,which='SA',k=1,maxiter=5000)
         v = np.reshape(v, [d1, d2, d3])
         l, u = self.left_cannonical(v)
         M[i] = l
         Li = np.tensordot(L[i-1],l,axes=(3,1))
         Li = np.tensordot(Li, self.MPO[i],axes=([5,3],[0,2]))
         L[i] = np.tensordot(Li, np.conj(l),axes=([3,5],[1,0]))
     M[-1]=np.tensordot(u,M[-1],axes=(1,1))
     return L,M
예제 #7
0
 def left_sweep(self,L,M):
     H = np.tensordot(L[-2], self.MPO[-1], axes=(4, 2))
     H = np.squeeze(H)
     H = np.transpose(H, [2,0,3,1])
     d0,d1,d2,d3=H.shape
     H = np.reshape(H, [d0*d1, d2 * d3])
     w, v = ssl.eigsh(H, which='SA', k=1,maxiter=5000)
     v=np.reshape(v,[self.d,d0*d1//self.d,1])
     v,u=self.right_canonical(v)
     M[-1] = v
     R=[[] for i in range(len(L))]
     R[-1]=np.tensordot(v,self.MPO[-1],axes=(0,0))
     R[-1]=np.tensordot(R[-1],np.conj(v),axes=[2,0])
     R[-1]=np.transpose(R[-1],[0,2,4,1,3,5])
     for i in range(1,len(L)-1):
         H = np.tensordot(L[-(i+2)],self.MPO[-(i+1)], axes=(4, 2))
         H = np.tensordot(H,R[-i], axes=(7, 1))
         H = np.squeeze(H)
         H = np.transpose(H, [2,0, 4, 3, 1, 5])
         d0,d1,d2,d3,d4,d5=H.shape
         H = np.reshape(H, [d0*d1*d2, d3*d4*d5])
         w, v = ssl.eigsh(H, which='SA', k=1,maxiter=5000)
         v=np.reshape(v,[d0,d1,d2])
         v,u=self.right_canonical(v)
         M[-(i+1)] = v
         Ri = np.tensordot(np.conj(v),R[-i],axes=(2,2))
         Ri = np.tensordot(self.MPO[-(i+1)],Ri,axes=([1,3],[0,3]))
         R[-(i+1)] = np.tensordot(v, Ri,axes=([0,2],[0,3]))
     M[0]=np.tensordot(M[0],u,axes=(2,0))
     return R,M
예제 #8
0
def min_max_hessian_eigs(net, dataloader, criterion, rank=0, use_cuda=False, verbose=False):
    """
        Compute the largest and the smallest eigenvalues of the Hessian marix.

        Args:
            net: the trained model.
            dataloader: dataloader for the dataset, may use a subset of it.
            criterion: loss function.
            rank: rank of the working node.
            use_cuda: use GPU
            verbose: print more information

        Returns:
            maxeig: max eigenvalue
            mineig: min eigenvalue
            hess_vec_prod.count: number of iterations for calculating max and min eigenvalues
    """

    params = [p for p in net.parameters() if len(p.size()) > 1]
    N = sum(p.numel() for p in params)

    def hess_vec_prod(vec):
        hess_vec_prod.count += 1  # simulates a static variable
        vec = npvec_to_tensorlist(vec, params)
        start_time = time.time()
        eval_hess_vec_prod(vec, params, net, criterion, dataloader, use_cuda)
        prod_time = time.time() - start_time
        if verbose and rank == 0: print("   Iter: %d  time: %f" % (hess_vec_prod.count, prod_time))
        return gradtensor_to_npvec(net)

    hess_vec_prod.count = 0
    if verbose and rank == 0: print("Rank %d: computing max eigenvalue" % rank)

    A = LinearOperator((N, N), matvec=hess_vec_prod)
    eigvals, eigvecs = eigsh(A, k=1, tol=1e-2)
    maxeig = eigvals[0]
    if verbose and rank == 0: print('max eigenvalue = %f' % maxeig)

    # If the largest eigenvalue is positive, shift matrix so that any negative eigenvalue is now the largest
    # We assume the smallest eigenvalue is zero or less, and so this shift is more than what we need
    shift = maxeig*.51
    def shifted_hess_vec_prod(vec):
        return hess_vec_prod(vec) - shift*vec

    if verbose and rank == 0: print("Rank %d: Computing shifted eigenvalue" % rank)

    A = LinearOperator((N, N), matvec=shifted_hess_vec_prod)
    eigvals, eigvecs = eigsh(A, k=1, tol=1e-2)
    eigvals = eigvals + shift
    mineig = eigvals[0]
    if verbose and rank == 0: print('min eigenvalue = ' + str(mineig))

    if maxeig <= 0 and mineig > 0:
        maxeig, mineig = mineig, maxeig

    return maxeig, mineig, hess_vec_prod.count
예제 #9
0
파일: testdmrg.py 프로젝트: amilacsw/dmrg
 def test_lanczos(self):
     '''test for directly construct and solve the ground state energy.'''
     model=self.get_model(10,1)
     hgen1=SpinHGen(spaceconfig=SpinSpaceConfig([2,1]),evolutor=NullEvolutor(hndim=2))
     hgen2=SpinHGen(spaceconfig=SpinSpaceConfig([2,1]),evolutor=Evolutor(hndim=2))
     dmrgegn=DMRGEngine(hchain=model.H_serial,hgen=hgen1,tol=0)
     H=get_H(H=model.H_serial,hgen=hgen1)
     H2,bm2=get_H_bm(H=model.H_serial,hgen=hgen2,bstr='M')
     Emin=eigsh(H,k=1)[0]
     Emin2=eigsh(bm2.lextract_block(H2,0.),k=1)[0]
     print 'The Ground State Energy is %s, tolerence %s.'%(Emin,Emin-Emin2)
     assert_almost_equal(Emin,Emin2)
예제 #10
0
파일: testdmrg.py 프로젝트: GiggleLiu/apps
 def test_lanczos(self):
     '''test for directly construct and solve the ground state energy.'''
     model=self.get_model(10,1)
     hgen1=RGHGen(spaceconfig=SpinSpaceConfig([2,1]),H=model.H_serial,evolutor_type='null')
     hgen2=RGHGen(spaceconfig=SpinSpaceConfig([2,1]),H=model.H_serial,evolutor_type='normal')
     dmrgegn=DMRGEngine(hgen=hgen1,tol=0)
     H=get_H(hgen=hgen1)
     H2,bm2=get_H_bm(hgen=hgen2,bstr='M')
     Emin=eigsh(H,k=1)[0]
     Emin2=eigsh(bm2.lextract_block(H2,(0.,0,)),k=1)[0]
     print 'The Ground State Energy is %s, tolerence %s.'%(Emin,Emin-Emin2)
     assert_almost_equal(Emin,Emin2)
예제 #11
0
def analyze_eigvects(
    non_normalized_Laplacian, num_first_eigvals_to_analyse, index_chars, permutations_limiter=10000000, fudge=10e-10
):
    # normalize the laplacian
    print "analyzing the laplacian with %s items and %s non-zero elts" % (
        non_normalized_Laplacian.shape[0] ** 2,
        len(non_normalized_Laplacian.nonzero()[0]),
    )
    t = time()
    init = time()
    normalized_Laplacian = Lapl_normalize(non_normalized_Laplacian)
    print time() - t
    t = time()
    # compute the eigenvalues and storre them
    true_eigenvals, true_eigenvects = eigsh(normalized_Laplacian, num_first_eigvals_to_analyse)
    print time() - t
    t = time()
    # permute randomly the off-diagonal terms
    triag_u = lil_matrix(triu(normalized_Laplacian))
    triag_u.setdiag(0)
    tnz = triag_u.nonzero()
    print "reassigning the indexes for %s items, with %s non-zero elts" % (triag_u.shape[0] ** 2, len(tnz[0]))
    eltsuite = zip(tnz[0].tolist(), tnz[1].tolist())
    shuffle(eltsuite)
    if eltsuite > permutations_limiter:
        # pb: we want it to affect any random number with reinsertion
        eltsuite = eltsuite[:permutations_limiter]
    print time() - t
    t = time()
    # take a nonzero pair of indexes
    for i, j in eltsuite:
        # select randomly a pair of indexes and permute it
        k = randrange(1, triag_u.shape[0] - 1)
        l = randrange(k + 1, triag_u.shape[0])
        triag_u[i, j], triag_u[k, l] = (triag_u[k, l], triag_u[i, j])
    print time() - t
    t = time()
    # recompute the diagonal terms
    fullmat = triag_u + triag_u.T
    diagterms = [-item for sublist in fullmat.sum(axis=0).tolist() for item in sublist]
    fullmat.setdiag(diagterms)
    print time() - t
    t = time()
    # recompute the normalized matrix
    normalized_rand = Lapl_normalize(fullmat)
    # recompute the eigenvalues
    rand_eigenvals, rand_eigenvects = eigsh(normalized_rand, num_first_eigvals_to_analyse)
    print time() - t
    t = time()
    show_eigenvals_and_eigenvects(true_eigenvals, true_eigenvects, 20, "true laplacian", index_chars)
    show_eigenvals_and_eigenvects(rand_eigenvals, rand_eigenvects, 20, "random")
    print "final", time() - t, time() - init
예제 #12
0
 def laplacian_pca(self, coordinates, num_vecs=None, beta=0.5):
   '''Graph-Laplacian PCA (CVPR 2013).
   Assumes coordinates are mean-centered.
   Parameter beta in [0,1], scales how much PCA/LapEig contributes.
   Returns an approximation of input coordinates, ala PCA.'''
   X = np.atleast_2d(coordinates)
   L = self.laplacian(normed=True)
   kernel = X.dot(X.T)
   kernel /= eigsh(kernel, k=1, which='LM', return_eigenvectors=False)
   L /= eigsh(L, k=1, which='LM', return_eigenvectors=False)
   W = (1-beta)*(np.identity(kernel.shape[0]) - kernel) + beta*L
   vals, vecs = eigh(W, eigvals=(0, num_vecs-1), overwrite_a=True)
   return X.T.dot(vecs).dot(vecs.T).T
예제 #13
0
def solve_sparse(Hs, minimal=False, verbose=False, more=False, exact=False,
                 k=None):
    '''Finds a subset of the eigenstates/eigenvalues for a sparse formatted
    Hamiltonian. Note: sparse solver will give inaccurate results if Hs is
    triangular.'''

    N = int(round(np.log2(Hs.shape[0])))    # number of effective cells
    if N > 22:
        print('Problem Hamiltonian larger than advised...')
        return [], []

    factors = {True: 10, False: 2}

    if verbose:
        print('-'*40+'\nEIGSH...\n')

    # select number of eigenstates to solve

    if isinstance(k, numbers.Number):
        K = k
    else:
        if minimal:
            K = 2
        else:
            K = 1 if N==1 else min(pow(2, N)-1, int(factors[more]*N))

    # force K < Hs size
    K = min(K, Hs.shape[0]-1)

    t = clock()

    # run eigsh

    try:
        if exact or N < 5:
            e_vals, e_vecs = eigh(Hs.todense())
        else:
            e_vals, e_vecs = eigsh(Hs, k=K, tol=TOL_EIGSH, which='SA')
    except:
        try:
            e_vals, e_vecs = eigsh(Hs, k=2, tol=TOL_EIGSH, which='SA')
        except:
            if verbose:
                print('Insufficient dim for sparse methods. Running eigh')
            e_vals, e_vecs = eigh(Hs.todense())

    if verbose:
        print('Time elapsed (seconds): {0:.3f}'.format(clock()-t))

    return e_vals, e_vecs
예제 #14
0
파일: mbl.py 프로젝트: 1119group/helloworld
def get_random_state(Sx,Sy,Sz,spin,N,h,mode='diag',seed=False):
    D = int(2*spin+1)**N
    j = 0
    redo = True
    while redo:
        if seed:
            H = get_heisenberg_chain_H(Sx,Sy,Sz,N,h,seed)
        else:
            H = get_heisenberg_chain_H(Sx,Sy,Sz,N,h)

        if mode == 'expm':
            E_max,eigvects_max = eigsh(H,k=1,which='LA',maxiter=1e6)
            E_min,eigvects_min = eigsh(H,k=1,which='SA',maxiter=1e6)
            E = np.append(E_min,E_max)
        if mode == 'diag':
            E,bi.eigvects = eigh(H.toarray())

        # Create initial state.
        counter = 0
        while True:
            counter += 1
            # Create random psi with magnetization of 0. Here we first form 
            #  a random binary number which has an equal number of 1's and 
            #  0's.
            index = np.zeros(N)
            for k in range(N//2):
                index[k] = 1
            index = np.random.permutation(index)
            # Then we convert the binary number into decimal and put a 1
            #  at the spot indicated by the binary into a zero vector. That
            #  represents a state with an equal number of up and down spins
            #  -- zero magnetization.
            s = 0
            for k in range(N):
                s += int(index[k] * 2**k)
            psi_0 = dok_matrix((D,1),complex)
            psi_0[s,0] = 1
            # Make sure psi's energy density is very close to 0.5.
            exp_val = psi_0.conjtransp().dot(H.dot(psi_0))
            e = (exp_val[0,0] - E[0]) / (E[-1] - E[0])
            if abs(e-0.5) < 0.001:
                redo = False
                j += 1
                break
            # Regenerate the Hamiltonian after failing to generate any state
            #  for a number of times.
            elif counter > D:
                j += 1
                break
    return H, E, psi_0
예제 #15
0
    def sparseEigs(self,S):
        """
        compute eigenspectrum in parts for sparse SPD S of size nxn.  sparse symmetric eigen problem should be doable in one quick shot, but not currently possible in scipy.sparse.linalg    
        use krylov based eigensolver here to get full spectrum in two phases (built-in scipy funcs won't return full eigenspectrum)

        this routine is only needed for nonuniform time spacing case
        """ 
        k1 = int(np.ceil(self.__T/2.))
        vals1 = sla.eigsh(S, k=k1,return_eigenvectors=False,which='LM') 
        k2 = int(np.floor(self.__T/2.))
        vals2 = sla.eigsh(S, k=k2,return_eigenvectors=False,which='SM')
        vals=np.concatenate((vals1,vals2))

        return vals
예제 #16
0
def energy_density(psi, H):
    """
    This function calculates the energy density (<ψ|H|ψ> - E_0)/(E_max - E_0).

    Args: "psi" is the state which energy density will be calculated.
          "H" is the Hamiltonian in question.
    Returns: 1. Energy density. A float.
             2. Expectation value of |ψ> and H. A float.
    """
    E_max = eigsh(H, k=1, which='LA', maxiter=1e6, return_eigenvectors=False)
    E_min = eigsh(H, k=1, which='SA', maxiter=1e6, return_eigenvectors=False)
    E = np.append(E_min, E_max)
    ev = qm.exp_value(H, psi)
    e = (ev - E[0]) / (E[-1] - E[0])
    return e, ev
예제 #17
0
파일: lattice2D.py 프로젝트: orswan/lattice
def eigs2(q,bs,amps,nbands,ys=[0,0,0],n=None,returnM=False,wind=False,DP=[0.,0.]):
	'''Returns nbands number of eigenvectors/values for quasimomentum q.
		bs are reciprocal lattice basis vectors (there should be 2), 
		amps are amplitudes (there should be three), and n (if supplied)
		is the wavevector cutoff (so eigenvectors have length 2n+1).  
		If not supplied, n is taken to be nbands.
		q may not be iterable.
		If returnM is True, then the lattice Hamiltonian matrix is also returned. 
		If wind is True, then the eigenvectors are wound into square matrices.
			Otherwise, they are (unwound) column vectors. 
		'''
	if n is None:
		n = nbands
	M = LHam(q,bs,amps,n,ys,DP)			# Get the Hamiltonian matrix
	# Amin is the bottom of the lattice potential, and a lower bound on the eigenenergies.  This is needed for eigsh
	Amin = -abs(amps[0])-abs(amps[1])-abs(amps[2])
	eigvals,eigvecs = eigsh(M,nbands,sigma=Amin)
	s = argsort(eigvals)
	eigvals = (eigvals[s])[:nbands]
	eigvecs = (eigvecs[:,s])[:,:nbands]
	
	if wind:
		eigmats = zeros((nbands,2*n+1,2*n+1),dtype=complex)
		for i in range(nbands):
			eigmats[i] = reshape(eigvecs[:,i],(2*n+1,2*n+1),'F')
		eigvecs = eigmats
	if returnM:
		return eigvals,eigvecs,M
	else:
		return eigvals,eigvecs
예제 #18
0
def mvn_ellipsoid(mu, sigma, alpha):
    """
    Calculates the parameters for an ellipsoid assuming
    a multivariate normal distribution

    Parameters
    ----------
    mu : np.array
        Mean vector
    sigma : np.array
        Covariance matrix
    alpha : float
        signficance value

    Returns
    -------
    eigvals : np.array
        Eigenvalues of covariance matrix decomposition
    eigvecs : np.array
        Eigenvectors of covariance matrix decomposition
    half_widths : np.array
        Length of ellipsoid along each eigenvector
    """
    D = len(mu)
    eigvals, eigvecs = eigsh(sigma)
    X2 = chi2.interval(alpha, df=D)
    half_widths = np.sqrt(eigvals * X2)
    return eigvals, eigvecs, half_widths
예제 #19
0
파일: utils.py 프로젝트: gcross/Carcassonne
def computeCompressor(old_dimension,new_dimension,multiplier,dtype,normalize=False): # {{{
    if new_dimension < 0:
        raise ValueError("New dimension ({}) must be non-negative.".format(new_dimension))
    elif new_dimension > old_dimension:
        raise ValueError("New dimension ({}) must be less than or equal to the old dimension ({}).".format(new_dimension,old_dimension))
    elif new_dimension == 0:
        return (zeros((new_dimension,old_dimension),dtype=dtype),)*2
    elif new_dimension >= old_dimension // 2:
        matrix = multiplier.formMatrix()
        if tuple(matrix.shape) != (old_dimension,)*2:
            raise ValueError("Multiplier matrix has shape {} but the old dimension is {}.".format(matrix.shape,old_dimension))
        evals, evecs = eigh(matrix)
        evals = evals[-new_dimension:]
        evecs = evecs[:,-new_dimension:]
    else:
        operator = \
            LinearOperator(
                shape=(old_dimension,)*2,
                matvec=multiplier,
                dtype=dtype
            )
        evals, evecs = eigsh(operator,k=new_dimension)
    evecs = evecs.transpose()
    while new_dimension > 0 and abs(evals[new_dimension-1]) < 1e-15:
        new_dimension -= 1
    if new_dimension == 0:
        raise ValueError("Input is filled with near-zero elements.")
    if normalize:
        evals = sqrt(evals).reshape(new_dimension,1)
        compressor = evecs * evals
        inverse_compressor_conj = evecs / evals
    else:
        compressor = evecs
        inverse_compressor_conj = evecs
    return compressor, inverse_compressor_conj
예제 #20
0
파일: FullCI.py 프로젝트: berquist/PyQuante
    def iterate(self):
        eva, eve = spspalin.eigsh(self.LinOp,k=self.k, sigma = self.sigma_eigs, which = self.which, v0 = self.v0, maxiter= self.maxiter, tol=self.tol, return_eigenvectors = self.return_eigenvectors)
        print "diagonalization sucessful"

        self.eva, self.eve = self.sort_and_add_enuke(eva,eve)
        
        return self.eva, self.eve
예제 #21
0
파일: spectrum.py 프로젝트: joselado/pygra
def selected_bands2d(h,output_file="BANDS2D_",nindex=[-1,1],
               nk=50,nsuper=1,reciprocal=True,
               operator=None,k0=[0.,0.]):
  """ Calculate a selected bands in a 2d Hamiltonian"""
  if h.dimensionality!=2: raise  # continue if two dimensional
  hk_gen = h.get_hk_gen() # gets the function to generate h(k)
  kxs = np.linspace(-nsuper,nsuper,nk)+k0[0]  # generate kx
  kys = np.linspace(-nsuper,nsuper,nk)+k0[1]  # generate ky
  kdos = [] # empty list
  kxout = []
  kyout = []
  if reciprocal: R = h.geometry.get_k2K() # get matrix
  else:  R = np.matrix(np.identity(3)) # get identity
  # setup a reasonable value for delta
  # setup the operator
  operator = operator2list(operator) # convert into a list
  os.system("rm -f "+output_file+"*") # delete previous files
  fo = [open(output_file+"_"+str(i)+".OUT","w") for i in nindex] # files        
  for x in kxs:
    for y in kxs:
      print("Doing",x,y)
      r = np.matrix([x,y,0.]).T # real space vectors
      k = np.array((R*r).T)[0] # change of basis
      hk = hk_gen(k) # get hamiltonian
      if not h.is_sparse: evals,waves = lg.eigh(hk) # eigenvalues
      else: evals,waves = slg.eigsh(hk,k=max(nindex)*2,sigma=0.0,
             tol=arpack_tol,which="LM") # eigenvalues
      waves = waves.transpose() # transpose
      epos,wfpos = [],[] # positive
      eneg,wfneg = [],[] # negative
      for (e,w) in zip(evals,waves): # loop
        if e>0.0: # positive
          epos.append(e)
          wfpos.append(w)
        else: # negative
          eneg.append(e)
          wfneg.append(w)
      # now sort the waves
      wfpos = [yy for (xx,yy) in sorted(zip(epos,wfpos))] 
      wfneg = [yy for (xx,yy) in sorted(zip(-np.array(eneg),wfneg))] 
      epos = sorted(epos)
      eneg = -np.array(sorted(-np.array(eneg)))
#      epos = sorted(evals[evals>0]) # positive energies
#      eneg = -np.array(sorted(np.abs(evals[evals<0]))) # negative energies
      for (i,j) in zip(nindex,range(len(nindex))): # loop over desired bands
        fo[j].write(str(x)+"     "+str(y)+"   ")
        if i>0: # positive
          fo[j].write(str(epos[i-1])+"  ")
          for op in operator: # loop over operators
            c = braket_wAw(wfpos[i-1],op).real # expectation value
            fo[j].write(str(c)+"  ") # write in file
          fo[j].write("\n") # write in file
          
        if i<0: # negative
          fo[j].write(str(eneg[abs(i)-1])+"\n")
          for op in operator: # loop over operators
            c = braket_wAw(wfneg[abs(i)-1],op).real # expectation value
            fo[j].write(str(c)+"  ") # write in file
          fo[j].write("\n") # write in file
  [f.close() for f in fo] # close file
def laplacian_eigenmaps(adjacency_matrix, k):
    """
    Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix.

    Introduced in: Belkin, M., & Niyogi, P. (2003).
                   Laplacian eigenmaps for dimensionality reduction and data representation.
                   Neural computation, 15(6), 1373-1396.

    Inputs:  -   A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix.
             -              k: The number of eigenvectors to extract.

    Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector.
    """
    # Calculate sparse graph Laplacian.
    laplacian = get_normalized_laplacian(adjacency_matrix)

    # Calculate bottom k+1 eigenvalues and eigenvectors of normalized Laplacian.
    try:
        eigenvalues, eigenvectors = spla.eigsh(laplacian,
                                               k=k,
                                               which='SM',
                                               return_eigenvectors=True)
    except spla.ArpackNoConvergence as e:
        print("ARPACK has not converged.")
        eigenvalue = e.eigenvalues
        eigenvectors = e.eigenvectors

    # Discard the eigenvector corresponding to the zero-valued eigenvalue.
    eigenvectors = eigenvectors[:, 1:]

    return eigenvectors
예제 #23
0
파일: linalg_test.py 프로젝트: dseuss/mpnum
def test_eig(nr_sites, local_dim, rank, which, var_sites, rgen, request):
    if nr_sites <= var_sites:
        pt.skip("Nothing to test")
        return  # No local optimization can be defined
    if not (_pytest_want_long(request) or
            (nr_sites, local_dim, rank, var_sites, which) in {
                (3, 2, 4, 1, 'SA'), (4, 3, 5, 1, 'LM'), (5, 2, 1, 2, 'LA'),
                (6, 2, 4, 2, 'SA'),
            }):
        pt.skip("Should only be run in long tests")
    # With startvec_rank = 2 * rank and this seed, eig() gets
    # stuck in a local minimum. With startvec_rank = 3 * rank,
    # it does not.
    mpo = factory.random_mpo(nr_sites, local_dim, rank, randstate=rgen,
                             hermitian=True, normalized=True)
    mpo.canonicalize()
    op = mpo.to_array_global().reshape((local_dim**nr_sites,) * 2)
    v0 = factory._zrandn([local_dim**nr_sites], rgen)
    eigval, eigvec = eigsh(op, k=1, which=which, v0=v0)
    eigval, eigvec = eigval[0], eigvec[:, 0]

    eig_rank = (4 - var_sites) * rank
    eigval_mp, eigvec_mp = mp.eig(
        mpo, num_sweeps=5, var_sites=1, startvec_rank=eig_rank, randstate=rgen,
        eigs=ft.partial(eigsh, k=1, which=which, tol=1e-6, maxiter=250))
    eigvec_mp = eigvec_mp.to_array().flatten()

    overlap = np.inner(eigvec.conj(), eigvec_mp)
    assert_almost_equal(eigval, eigval_mp, decimal=14)
    assert_almost_equal(1, abs(overlap), decimal=14)
    def maxEigenValRemoved(self, removeMe):
        """Find the maximum eigen value corresponding to the sparse matrix as if
        the vertices listed in "removeMe" were not in the graph."""

        # Remap the vertices and create a new adjacency matrix
        ignore = set(removeMe)
        vmap = {}
        index = 0
        for u in xrange(self.n):
            if u not in ignore:
                vmap[u] = index
                index += 1

        alNew = [[] for _ in xrange(len(vmap))]
        mNew = 0

        for u, neighbors in enumerate(self.al):
            if u not in ignore:
                alNew[vmap[u]] = [vmap[v] for v in neighbors if v not in ignore]
                mNew += len(alNew[vmap[u]])

        rows = [0] * 2 * mNew
        cols = [0] * 2 * mNew
        data = [0] * 2 * mNew
        index = 0

        for u, neighbors in enumerate(alNew):
            for v in neighbors:
                rows[index], cols[index] = u, v
                data[index] = 1
                index += 1

        smNew = csr_matrix((data, (rows, cols)), shape = (self.n, self.n), dtype = numpy.float64)
        evals, evecs = eigsh(smNew, k = 1)
        return evals[0]
예제 #25
0
파일: embed.py 프로젝트: all-umass/graphs
 def laplacian_pca(self, coordinates, num_dims=None, beta=0.5):
   '''Graph-Laplacian PCA (CVPR 2013).
   coordinates : (n,d) array-like, assumed to be mean-centered.
   beta : float in [0,1], scales how much PCA/LapEig contributes.
   Returns an approximation of input coordinates, ala PCA.'''
   X = np.atleast_2d(coordinates)
   L = self.laplacian(normed=True)
   kernel = X.dot(X.T)
   kernel /= eigsh(kernel, k=1, which='LM', return_eigenvectors=False)
   L /= eigsh(L, k=1, which='LM', return_eigenvectors=False)
   W = (1-beta)*(np.identity(kernel.shape[0]) - kernel) + beta*L
   if num_dims is None:
     vals, vecs = np.linalg.eigh(W)
   else:
     vals, vecs = eigh(W, eigvals=(0, num_dims-1), overwrite_a=True)
   return X.T.dot(vecs).dot(vecs.T).T
예제 #26
0
파일: views.py 프로젝트: GiggleLiu/apps
def get_mps(K,Jp,J1=1.,J2=0.2412,nsite=30,which='finite',data_file=None):
    '''
    Run finite-DMFT/lanczos for two impurity Kondo model to get the Ground state energy.

    Parameters:
        :J1/J2/K/Jp: float, the parameters defining the model.
        :nsite: integer, the number of sites, must be even.
        :which: 'finite'/'lanczos', select the method to solve the chain, 'finite dmrg' or 'lanczos'.
    '''
    assert(which=='finite' or which=='lanczos')
    assert(nsite%2==0)

    model=TIKM(J1=J1,J2=J2,K=K,Jp=Jp,nsite=nsite)
    hgen=SpinHGen(spaceconfig=model.spaceconfig,evolutor=MaskedEvolutor(hndim=model.spaceconfig.hndim) if which=='finite' else NullEvolutor(hndim=2))
    dmrgegn=DMRGEngine(hchain=model.H_serial,hgen=hgen,tol=0)
    if which=='lanczos':
        H=get_H(H=model.H_serial,hgen=hgen)
        EG,EV=eigsh(H,k=1)
        mps=state2MPS(EV[:,0])
    else:
        EG=dmrgegn.run_finite(endpoint=(2,'<-',0),maxN=[10,20],tol=0)[-1]
        mps=dmrgegn.get_mps(direction='<-')  #right normalized initial state
    if data_file is not None:
        mps.save(data_file)
    return mps
예제 #27
0
파일: hamiltonian.py 프로젝트: zerothi/sisl
    def eigsh(self, k=(0,0,0), n=10,
            atoms=None, eigvals_only=True,
            *args,
            **kwargs):
        """ Returns the eigenvalues of the tight-binding model

        Setup the Hamiltonian and overlap matrix with respect to
        the given k-point, then reduce the space to the specified atoms
        and calculate the eigenvalues.

        All subsequent arguments gets passed directly to :code:`scipy.linalg.eigh`
        """
        
        # We always request the smallest eigenvalues... 
        kwargs.update({'which':kwargs.get('which', 'SM')})
        
        H = self.Hk(k=k)
        if not self.orthogonal:
            raise ValueError("The sparsity pattern is non-orthogonal, you cannot use the Arnoldi procedure with scipy")
        
        # Reduce sparsity pattern
        if not atoms is None:
            orbs = self.a2o(atoms)
            # Reduce space
            H = H[orbs, orbs]

        return ssli.eigsh(H, k=n,
                          *args,
                          return_eigenvectors=not eigvals_only,
                          **kwargs)
예제 #28
0
파일: dmaps.py 프로젝트: glinka/dmaps
def _compute_embedding(W, k, symmetric=True):
    """Calculates a partial ('k'-dimensional) eigendecomposition of W by first transforming into a self-adjoint matrix and then using the Lanczos algorithm.

    Args:
        W (array): symmetric, shape (npts, npts) array in which W[i,j] is the DMAPS kernel evaluation for points i and j
        k (int): the number of eigenvectors and eigenvalues to compute
        symmetric (bool): indicates whether the Markov matrix is symmetric or not. During standard useage with the default kernel, this will be true allowing for accelerated numerics. **However, if using custom_kernel(), this property may not hold.**

    Returns:
        eigvals (array): shape (k) vector with first 'k' eigenvectors of DMAPS embedding sorted from largest to smallest
        eigvects (array): shape ("number of data points", k) array with the k-dimensional DMAPS-embedding eigenvectors. eigvects[:,i] corresponds to the eigenvector of the :math:`i^{th}`-largest eigenvalue, eigval[i].
    """
    m = W.shape[0]
    # diagonal matrix D, inverse, sqrt
    D_half_inv = np.identity(m)/np.sqrt(np.sum(W,1))
    # transform into self-adjoint matrix and find partial eigendecomp of this transformed matrix
    eigvals, eigvects = None, None
    if symmetric:
        eigvals, eigvects = spla.eigsh(np.dot(np.dot(D_half_inv, W), D_half_inv), k=k) # eigsh (eigs hermitian)
    else:
        eigvals, eigvects = spla.eigs(np.dot(np.dot(D_half_inv, W), D_half_inv), k=k) # eigs (plain eigs)
    # transform eigenvectors to match W
    eigvects = np.dot(D_half_inv, eigvects)
    # sort eigvals and corresponding eigvects from largest to smallest magnitude  (reverse order)
    sorted_indices = np.argsort(np.abs(eigvals))[::-1]

    eigvals = eigvals[sorted_indices]
    # also scale eigenvectors to norm one
    eigvects = eigvects[:, sorted_indices]/np.linalg.norm(eigvects[:, sorted_indices], axis=0)
    return eigvals, eigvects
예제 #29
0
파일: pca.py 프로젝트: glinka/pca
def pca(data, k, corr=False):
    """Calculates the top 'k' principal components of the data array and their corresponding variances. This is accomplished via the explicit calculation of the covariance or correlation matrix, depending on which version is desired, and a subsequent partial eigendecomposition.

    Args:
        data (array): a shape ("number of data points", "dimension of data") or (n, m) array containing the data to be operated on
        k (int): the number of principal componenets to be found
        corr (bool): determines whether the correlation matrix will be used instead of the more traditional covariance matrix. Useful when dealing with disparate scales in data measurements as the correlation between to random variables, given by :math:`corr(x,y) = \\frac{cov(x,y)}{\sigma_x \sigma_y}`, includes a natural rescaling of units

    Returns:
        pcs (array): shape (m, k) array in which column 'j' corresponds to the 'j'th principal component and corresponding variance. Thus the projection onto the these coordinates would be given by the (k, n) array np.dot(pcs.T, dat.T).
        variances (array): shape(k,) array containing the 'k' variances corresponding to the 'k' principal components in 'pcs', sorted from largest to smallest

    >>> from pca_test import test_pca
    >>> test_pca()
    """
    n = data.shape[0]
    m = data.shape[1]
    # center the data/subtract means
    data = data - np.average(data, 0)
    # calc experimental covariance matrix
    C = np.dot(data.T, data)/(n-1)
    if corr:
        # use correlation matrix
        # extract experimental std. devs.
        variances = np.sqrt(np.diag(C))
        variances.shape = (m, 1)
        # rescale to obtain correlation matrix
        C = C/np.dot(variances, variances.T)
    # calculate eigendecomp with arnoldi/lanczos if k < m, else use eigh for full decomposition
    if k < m:
        variances, pcs  = spla.eigsh(C, k=k)
    else:
        variances, pcs = np.linalg.eigh(C)
    index_order = np.argsort(variances)[::-1]
    return pcs[:,index_order[:k]], variances[index_order[:k]]
예제 #30
0
    def run(self, distance_matrix, num_dimensions_out=10):
        super(Eigsh, self).run(distance_matrix, num_dimensions_out)

        eigenvalues, eigenvectors = eigsh(distance_matrix,
                                          k=num_dimensions_out)

        return eigenvectors, eigenvalues
예제 #31
0
    def dist(
        self,
        G1,
        G2,
        normed=True,
        kernel='normal',
        hwhm=0.011775,
        measure='jensen-shannon',
        k=None,
        which='LM',
    ):
        """Graph distances using different measure between the Laplacian
        spectra of the two graphs

        The spectra of both Laplacian matrices (normalized or not) is
        computed. Then, the discrete spectra are convolved with a kernel to
        produce continuous ones. Finally, these distribution are compared
        using a metric.

        The results dictionary also stores a 2-tuple of the underlying
        adjacency matrices in the key `'adjacency_matrices'`, the Laplacian
        matrices in `'laplacian_matrices'`, the eigenvalues of the
        Laplacians in `'eigenvalues'`. If the networks being compared are
        directed, the augmented adjacency matrices are calculated and
        stored in `'augmented_adjacency_matrices'`.

        Parameters
        ----------

        G1, G2 (nx.Graph)
            two networkx graphs to be compared.

        normed (bool)
            If True, uses the normalized laplacian matrix, otherwise the
            raw laplacian matrix is used.

        kernel (str)
            kernel to obtain a continuous spectrum. Choices available are
            'normal', 'lorentzian', or None. If None is chosen, the
            discrete spectrum is used instead, and the measure is simply
            the euclidean distance between the vector of eigenvalues for
            each graph.

        hwhm (float)
            half-width at half-maximum for the kernel. The default value is
            chosen such that the standard deviation for the normal
            distribution is :math:`0.01`, as in reference [1]_. This option
            is relevant only if kernel is not None.

        measure (str)
            metric between the two continuous spectra. Choices available
            are 'jensen-shannon' or 'euclidean'. This option is relevant
            only if kernel is not None.

        k (int)
            number of eigenvalues kept for the (discrete) spectrum, also
            used to create the continuous spectrum. If None, all the
            eigenvalues are used. k must be smaller (strictly) than the
            size of both graphs.

        which (str)
            if k is not None, this option specifies the eigenvalues that
            are kept. See the choices offered by
            `scipy.sparse.linalg.eigsh`.  The largest eigenvalues in
            magnitude are kept by default.

        Returns
        -------

        dist (float)
            the distance between G1 and G2.

        Notes
        -----
        The methods are usually applied to undirected (unweighted)
        networks. We however relax this assumption using the same method
        proposed for the Hamming-Ipsen-Mikhailov. See [2]_.

        References
        ----------

        .. [1] https://www.sciencedirect.com/science/article/pii/S0303264711001869.

        .. [2] https://ieeexplore.ieee.org/abstract/document/7344816.

        """
        adj1 = nx.to_numpy_array(G1)
        adj2 = nx.to_numpy_array(G2)
        self.results['adjacency_matrices'] = adj1, adj2
        directed = nx.is_directed(G1) or nx.is_directed(G2)

        if directed:
            # create augmented adjacency matrices
            N1 = len(G1)
            N2 = len(G2)
            null_mat1 = np.zeros((N1, N1))
            null_mat2 = np.zeros((N2, N2))
            adj1 = np.block([[null_mat1, adj1.T], [adj1, null_mat1]])
            adj2 = np.block([[null_mat2, adj2.T], [adj2, null_mat2]])
            self.results['augmented_adjacency_matrices'] = adj1, adj2

        # get the laplacian matrices
        lap1 = laplacian(adj1, normed=normed)
        lap2 = laplacian(adj2, normed=normed)
        self.results['laplacian_matrices'] = lap1, lap2

        # get the eigenvalues of the laplacian matrices
        if k is None:
            ev1 = np.abs(eigvalsh(lap1))
            ev2 = np.abs(eigvalsh(lap2))
        else:
            # transform the dense laplacian matrices to sparse representations
            lap1 = csgraph_from_dense(lap1)
            lap2 = csgraph_from_dense(lap2)
            ev1 = np.abs(eigsh(lap1, k=k, which=which)[0])
            ev2 = np.abs(eigsh(lap2, k=k, which=which)[0])
        self.results['eigenvalues'] = ev1, ev2

        if kernel is not None:
            # define the proper support
            a = 0
            if normed:
                b = 2
            else:
                b = np.inf

            # create continuous spectra
            density1 = _create_continuous_spectrum(ev1, kernel, hwhm, a, b)
            density2 = _create_continuous_spectrum(ev2, kernel, hwhm, a, b)

            # compare the spectra
            dist = _spectra_comparison(density1, density2, a, b, measure)
            self.results['dist'] = dist
        else:
            # euclidean distance between the two discrete spectra
            dist = np.linalg.norm(ev1 - ev2)
            self.results['dist'] = dist

        return dist
def main():
    #Create the Matrix to be tri-diagonalized
    n = 75  #Size of input matrix (nxn)
    A = SymmMat(n, density=1)  #Input matrix. (Hermitian,Sparse)
    #print(A)

    #Check that the matrix is symmetric. The difference should have no non-zero elements
    assert (A - A.T).nnz == 0

    #Hamiltonian of tV Model for L = 4, N = 2, ell=2
    #A = -1.0*np.array(((0,1,0,1,0,0),
    #                   (1,0,1,0,1,1),
    #                   (0,1,0,1,0,0),
    #                   (1,0,1,0,1,1),
    #                   (0,1,0,1,0,0),
    #                   (0,1,0,1,0,0)))

    #Test Sparse Matrix
    #A = 1.0*np.diag((1,2,3,4,5,6))
    #A[-1,0] = 5
    #A[0,-1] = 5

    #A = -1.0*np.array(((0,0,1,0),
    #                   (0,0,1,0),
    #                   (1,1,0,1),
    #                   (0,0,1,0)))

    #Change print format to decimal instead of scientific notation
    np.set_printoptions(formatter={'float_kind': '{:f}'.format})

    #Transform the matrix A to tridiagonal form via Lanczos
    T = LanczosTri(A)

    #Find Eigenvalues for Real, Symmetric, Tridiagonal Matrix via QR Iteration
    t2 = TicToc()
    t2.tic()
    lam = NSI(T)
    t2.toc()
    print("Eigs(T) (NSI): ", np.sort(lam)[:-1][0], "\n")

    t4 = TicToc()
    t4.tic()
    lam_IPI = IPI(T)
    t4.toc()
    print("Eigs(T) (IPI): ", lam_IPI, "\n")

    #Get eigenpairs of untransformed hermitian matrix A and time the process using blackbox function
    t1 = TicToc()
    t1.tic()
    e_gs_T, gs_T = eigsh(T, k=n - 1, which='SA', maxiter=1000)
    #e_gs_A = NSI(A,maxiter=1000)
    t1.toc()
    print("Eigs(T) (np.eigsh): ", e_gs_T[0], "\n")
    #print("Eigs(A): ",np.sort(e_gs_A[:-1]))

    t3 = TicToc()
    t3.tic()
    e_gs_A, gs_A = eigsh(A, k=n - 1, which='SA', maxiter=1000)
    #e_gs_A = NSI(A,maxiter=1000)
    t3.toc()
    print("Eigs(A) (np.eigsh): ", e_gs_A[0], "\n")
예제 #33
0
def single_dmrg_step(sys, env, m):
    """Performs a single DMRG step using `sys` as the system and `env` as the
    environment, keeping a maximum of `m` states in the new basis.
    """
    assert is_valid_block(sys)
    assert is_valid_block(env)

    # Enlarge each block by a single site.
    sys_enl = enlarge_block(sys)
    if sys is env:  # no need to recalculate a second time
        env_enl = sys_enl
    else:
        env_enl = enlarge_block(env)

    assert is_valid_enlarged_block(sys_enl)
    assert is_valid_enlarged_block(env_enl)

    # Construct the full superblock Hamiltonian.
    m_sys_enl = sys_enl.basis_size
    m_env_enl = env_enl.basis_size
    sys_enl_op = sys_enl.operator_dict
    env_enl_op = env_enl.operator_dict
    superblock_hamiltonian = kron(sys_enl_op["H"], identity(m_env_enl)) + kron(identity(m_sys_enl), env_enl_op["H"]) + \
                             H2(sys_enl_op["conn_Sz"], sys_enl_op["conn_Sp"], env_enl_op["conn_Sz"], env_enl_op["conn_Sp"])

    # Call ARPACK to find the superblock ground state.  ("SA" means find the
    # "smallest in amplitude" eigenvalue.)
    (energy,), psi0 = eigsh(superblock_hamiltonian, k=1, which="SA")

    # Construct the reduced density matrix of the system by tracing out the
    # environment
    #
    # We want to make the (sys, env) indices correspond to (row, column) of a
    # matrix, respectively.  Since the environment (column) index updates most
    # quickly in our Kronecker product structure, psi0 is thus row-major ("C
    # style").
    psi0 = psi0.reshape([sys_enl.basis_size, -1], order="C")
    rho = np.dot(psi0, psi0.conjugate().transpose())

    # Diagonalize the reduced density matrix and sort the eigenvectors by
    # eigenvalue.
    evals, evecs = np.linalg.eigh(rho)
    possible_eigenstates = []
    for eval, evec in zip(evals, evecs.transpose()):
        possible_eigenstates.append((eval, evec))
    possible_eigenstates.sort(reverse=True, key=lambda x: x[0])  # largest eigenvalue first

    # Build the transformation matrix from the `m` overall most significant
    # eigenvectors.
    my_m = min(len(possible_eigenstates), m)
    transformation_matrix = np.zeros((sys_enl.basis_size, my_m), dtype='d', order='F')
    for i, (eval, evec) in enumerate(possible_eigenstates[:my_m]):
        transformation_matrix[:, i] = evec

    truncation_error = 1 - sum([x[0] for x in possible_eigenstates[:my_m]])
    print("truncation error:", truncation_error)

    # Rotate and truncate each operator.
    new_operator_dict = {}
    for name, op in sys_enl.operator_dict.items():
        new_operator_dict[name] = rotate_and_truncate(op, transformation_matrix)

    newblock = Block(length=sys_enl.length,
                     basis_size=my_m,
                     operator_dict=new_operator_dict)

    return newblock, energy
예제 #34
0
def null_space(M, k, k_skip=1, eigen_solver='arpack',
               random_state=None, solver_kwds=None):
    """
    Find the null space of a matrix M: eigenvectors associated with 0 eigenvalues

    Parameters
    ----------
    M : {array, matrix, sparse matrix, LinearOperator}
        Input covariance matrix: should be symmetric positive semi-definite
    k : integer
        Number of eigenvalues/vectors to return
    k_skip : integer, optional
        Number of low eigenvalues to skip.
    eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
        'auto' :
            algorithm will attempt to choose the best method for input data
        'dense' :
            use standard dense matrix operations for the eigenvalue decomposition.
            For this method, M must be an array or matrix type.  This method should be avoided for large problems.
        'arpack' :
            use arnoldi iteration in shift-invert mode. For this method,
            M may be a dense matrix, sparse matrix, or general linear operator.
            Warning: ARPACK can be unstable for some problems.  It is best to
            try several random seeds in order to check results.
        'lobpcg' :
            Locally Optimal Block Preconditioned Conjugate Gradient Method.
            A preconditioned eigensolver for large symmetric positive definite
            (SPD) generalized eigenproblems.
        'amg' :
            AMG requires pyamg to be installed. It can be faster on very large,
            sparse problems, but may also lead to instabilities.
    random_state: numpy.RandomState or int, optional
        The generator or seed used to determine the starting vector for arpack
        iterations.  Defaults to numpy.random.
    solver_kwds : any additional keyword arguments to pass to the selected eigen_solver

    Returns
    -------
    null_space : estimated k vectors of the null space
    error : estimated error (sum of eigenvalues)

    Notes
    -----
    dense solver key words: see
        http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eigh.html
        for symmetric problems and
        http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig
        for non symmetric problems.
    arpack sovler key words: see
        http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigsh.html
        for symmetric problems and http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs
        for non symmetric problems.
    lobpcg solver keywords: see
        http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html
    amg solver keywords: see
        http://pyamg.googlecode.com/svn/branches/1.0.x/Docs/html/pyamg.aggregation.html#module-pyamg.aggregation.aggregation
        (Note amg solver uses lobpcg and also accepts lobpcg keywords)
    """
    eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
                                                   size=M.shape[0],
                                                   nvec=k + k_skip)
    random_state = check_random_state(random_state)

    if eigen_solver == 'arpack':
        # This matches the internal initial state used by ARPACK
        v0 = random_state.uniform(-1, 1, M.shape[0])
        try:
            eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
                                                v0=v0,**(solver_kwds or {}))
        except RuntimeError as msg:
            raise ValueError("Error in determining null-space with ARPACK. "
                             "Error message: '%s'. "
                             "Note that method='arpack' can fail when the "
                             "weight matrix is singular or otherwise "
                             "ill-behaved.  method='dense' is recommended. "
                             "See online documentation for more information."
                             % msg)

        return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
    elif eigen_solver == 'dense':
        if hasattr(M, 'toarray'):
            M = M.toarray()
        eigen_values, eigen_vectors = eigh(M, eigvals=(0, k+k_skip),overwrite_a=True,
                                           **(solver_kwds or {}))
        index = np.argsort(np.abs(eigen_values))
        eigen_vectors = eigen_vectors[:, index]
        eigen_values = eigen_values[index]
        return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
        # eigen_values, eigen_vectors = eigh(
            # M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
        # index = np.argsort(np.abs(eigen_values))
        # return eigen_vectors[:, index], np.sum(eigen_values)
    elif (eigen_solver == 'amg' or eigen_solver == 'lobpcg'):
        # M should be positive semi-definite. Add 1 to make it pos. def.
        try:
            M = sparse.identity(M.shape[0]) + M
            n_components = min(k + k_skip + 10, M.shape[0])
            eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
                                                              eigen_solver = eigen_solver,
                                                              drop_first = False,
                                                              largest = False,
                                                              random_state=random_state,
                                                              solver_kwds=solver_kwds)
            eigen_values = eigen_values -1
            index = np.argsort(np.abs(eigen_values))
            eigen_values = eigen_values[index]
            eigen_vectors = eigen_vectors[:, index]
            return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
        except np.linalg.LinAlgError: # try again with bigger increase
            warnings.warn("LOBPCG failed the first time. Increasing Pos Def adjustment.")
            M = 2.0*sparse.identity(M.shape[0]) + M
            n_components = min(k + k_skip + 10, M.shape[0])
            eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
                                                              eigen_solver = eigen_solver,
                                                              drop_first = False,
                                                              largest = False,
                                                              random_state=random_state,
                                                              solver_kwds=solver_kwds)
            eigen_values = eigen_values - 2
            index = np.argsort(np.abs(eigen_values))
            eigen_values = eigen_values[index]
            eigen_vectors = eigen_vectors[:, index]
            return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
    else:
        raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
예제 #35
0
for i in range(L):
    operator_dict["z"+str(i)] = [["z", [[1.0, i]]]]

no_checks={"check_herm":False,"check_pcon":False,"check_symm":False}
H_dict = quantum_operator(operator_dict, basis = sp_basis, **no_checks)
params_dict = dict(H0=1.0)
if dis_flag == 1:
    for j in range(L):
        params_dict["z"+str(j)] = W_i*np.cos(2*math.pi*0.721*j+2*math.pi*phi) # create quasiperiodic fields list
else:
    for j in range(L):
        params_dict["z"+str(j)] = W_i*np.random.uniform(0,1) # create random fields list

HAM = H_dict.tohamiltonian(params_dict) # build initial Hamiltonian through H_dict
HAM = np.real(HAM.tocsc())
psi_0 = linalg.eigsh(HAM, k=1)[1].flatten() # initialize system in GS of pre-quench Hamiltonian

params_dict_quench = dict(H0=1.0)
if dis_flag ==1:
    for j in range(L):
        params_dict_quench["z"+str(j)] = W*np.cos(2*math.pi*0.721*j+2*math.pi*phi) # create quench quasiperiodic fields list
else:
    for j in range(L):
        params_dict_quench["z"+str(j)] = W*np.random.uniform(0,1) # create quench random fields list

HAM_quench = H_dict.tohamiltonian(params_dict_quench) # build post-quench Hamiltonian
psi_t = HAM_quench.evolve(psi_0, 0.0, t_tab) # evolve with post-quench Hamiltonian
SxSx_t = hf.SxSxCorr(L, psi_t, sp_basis)

if dis_flag == 1:
    directory = '../DATA/GSQPCorr/L'+str(L)+'/D'+str(W)+'/'
    x = eye(w - 1, w, 1) - eye(w - 1, w)  # path of length W
    y = eye(h - 1, h, 1) - eye(h - 1, h)  # path of length H
    B = vstack([kron(eye(h), x), kron(y, eye(w))])  # kronecker sum
    return B


w = 64  # image de taille wxw

M = image_of_a_disk(0.7, w)

B = grid_incidence(w, w)
BM = B @ diags(M.flatten(
))  # conditions de Dirichlet : on impose 0 à l'extérieur du disque
L = -BM.T @ BM  # laplacien du domaine M

D, U = eigsh(-L, k=100, which="SM")

Df = D[D > 1e-10]  # selection des valeurs propres non nulles
Uf = U.T[D > 1e-10]  # selection des vectuers propres associes

# affichage

plt.set_cmap("bwr")  # blue-white-red palette

num_shown = 25

vv = [Uf[i] for i in range(0, num_shown)]

fig, axes = plt.subplots(nrows=5,
                         ncols=5)  # a modifier si l'on modifie num_shown
def spectral_embedding(adjacency,
                       *,
                       n_components=8,
                       eigen_solver=None,
                       random_state=None,
                       eigen_tol=0.0,
                       norm_laplacian=True,
                       drop_first=True):
    """Project the sample on the first eigenvectors of the graph Laplacian.

    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigenvectors associated to the
    smallest eigenvalues) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.

    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).

    However care must taken to always make the affinity matrix symmetric
    so that the eigenvector decomposition works as expected.

    Note : Laplacian Eigenmaps is the actual algorithm implemented here.

    Read more in the :ref:`User Guide <spectral_embedding>`.

    Parameters
    ----------
    adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
        The adjacency matrix of the graph to embed.

    n_components : int, default=8
        The dimension of the projection subspace.

    eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities. If None, then ``'arpack'`` is
        used.

    random_state : int, RandomState instance or None, default=None
        Determines the random number generator used for the initialization of
        the lobpcg eigenvectors decomposition when ``solver`` == 'amg'. Pass
        an int for reproducible results across multiple function calls.
        See :term: `Glossary <random_state>`.

    eigen_tol : float, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.

    norm_laplacian : bool, default=True
        If True, then compute normalized Laplacian.

    drop_first : bool, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.

    Returns
    -------
    embedding : ndarray of shape (n_samples, n_components)
        The reduced samples.

    Notes
    -----
    Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
    has one connected component. If there graph has many components, the first
    few eigenvectors will simply uncover the connected components of the graph.

    References
    ----------
    * https://en.wikipedia.org/wiki/LOBPCG

    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      https://doi.org/10.1137%2FS1064827500366124
    """
    adjacency = check_symmetric(adjacency)

    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError as e:
        if eigen_solver == "amg":
            raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
                             "not available.") from e

    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'" %
                         eigen_solver)

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1

    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")

    laplacian, dd = csgraph_laplacian(adjacency,
                                      normed=norm_laplacian,
                                      return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
        (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
        # for details see the source code in scipy:
        # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
        # /lobpcg/lobpcg.py#L237
        # or matlab:
        # https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        try:
            # We are computing the opposite of the laplacian inplace so as
            # to spare a memory allocation of a possibly very large array
            laplacian *= -1
            v0 = _init_arpack_v0(laplacian.shape[0], random_state)
            _, diffusion_map = eigsh(laplacian,
                                     k=n_components,
                                     sigma=1.0,
                                     which='LM',
                                     tol=eigen_tol,
                                     v0=v0)
            embedding = diffusion_map.T[n_components::-1]
            if norm_laplacian:
                embedding = embedding / dd
        except RuntimeError:
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"
            # Revert the laplacian to its opposite to have lobpcg work
            laplacian *= -1

    elif eigen_solver == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        if not sparse.issparse(laplacian):
            warnings.warn("AMG works better for sparse matrices")
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        # The Laplacian matrix is always singular, having at least one zero
        # eigenvalue, corresponding to the trivial eigenvector, which is a
        # constant. Using a singular matrix for preconditioning may result in
        # random failures in LOBPCG and is not supported by the existing
        # theory:
        #     see https://doi.org/10.1007/s10208-015-9297-1
        # Shift the Laplacian so its diagononal is not all ones. The shift
        # does change the eigenpairs however, so we'll feed the shifted
        # matrix to the solver and afterward set it back to the original.
        diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
        laplacian += diag_shift
        ml = smoothed_aggregation_solver(
            check_array(laplacian, accept_sparse='csr'))
        laplacian -= diag_shift

        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        _, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-5, largest=False)
        embedding = diffusion_map.T
        if norm_laplacian:
            embedding = embedding / dd
        if embedding.shape[0] == 1:
            raise ValueError

    if eigen_solver == "lobpcg":
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lobpcg has problems with small
            # number of nodes
            # lobpcg will fallback to eigh, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.toarray()
            _, diffusion_map = eigh(laplacian)
            embedding = diffusion_map.T[:n_components]
            if norm_laplacian:
                embedding = embedding / dd
        else:
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            _, diffusion_map = lobpcg(laplacian,
                                      X,
                                      tol=1e-15,
                                      largest=False,
                                      maxiter=2000)
            embedding = diffusion_map.T[:n_components]
            if norm_laplacian:
                embedding = embedding / dd
            if embedding.shape[0] == 1:
                raise ValueError

    embedding = _deterministic_vector_sign_flip(embedding)
    if drop_first:
        return embedding[1:n_components].T
    else:
        return embedding[:n_components].T
예제 #38
0
 def solver(K, M, **solve_time_kwargs):
     params.update(solve_time_kwargs)
     from scipy.sparse.linalg import eigsh
     return eigsh(K, M=M, **params)
예제 #39
0
    def _eigsh(self,
               H,
               v0,
               projector=None,
               tol=1e-10,
               sigma=None,
               lc_search_space=1,
               k=1):
        '''
        solve eigenvalue problem.
        '''
        maxiter = 5000
        N = H.shape[0]
        if self.iprint == 10 and projector is not None and check_commute:
            assert (is_commute(H, projector))
        if self.eigen_solver == 'LC':
            k = max(lc_search_space, k)
            if H.shape[0] < 100:
                e, v = eigh(H.toarray())
                e, v = e[:k], v[:, :k]
            else:
                try:
                    e, v = eigsh(H,
                                 k=k,
                                 which='SA',
                                 maxiter=maxiter,
                                 tol=tol,
                                 v0=v0)
                except:
                    e, v = eigsh(H,
                                 k=k + 1,
                                 which='SA',
                                 maxiter=maxiter,
                                 tol=tol,
                                 v0=v0)
            order = argsort(e)
            e, v = e[order], v[:, order]
        else:
            iprint = 0
            maxiter = 500
            if projector is not None:
                e, v = JDh(H,
                           v0=v0,
                           k=k,
                           projector=projector,
                           tol=tol,
                           maxiter=maxiter,
                           sigma=sigma,
                           which='SA',
                           iprint=iprint)
            else:
                if sigma is None:
                    e, v = JDh(H,
                               v0=v0,
                               k=max(lc_search_space, k),
                               projector=projector,
                               tol=tol,
                               maxiter=maxiter,
                               which='SA',
                               iprint=iprint)
                else:
                    e,v=JDh(H,v0=v0,k=k,projector=projector,tol=tol,sigma=sigma,which='SL',\
                            iprint=iprint,converge_bound=1e-10,maxiter=maxiter)

        nstate = len(e)
        if nstate == 0:
            raise Exception('No Converged Pair!!')
        elif nstate == k or k > 1:
            return e, v

        #filter out states meeting projector.
        if projector is not None and lc_search_space != 1:
            overlaps = array([
                abs(projector.dot(v[:, i]).conj().dot(v[:, i]))
                for i in xrange(nstate)
            ])
            mask0 = overlaps > 0.1
            if not any(mask0):
                raise Exception(
                    'Can not find any states meeting specific parity!')
            mask = overlaps > 0.9
            if sum(mask) == 0:
                #check for degeneracy.
                istate = where(mask0)[0][0]
                warnings.warn('Wrong result or degeneracy accur!')
            else:
                istate = where(mask)[0][0]
            v = projector.dot(v[:, istate:istate + 1])
            v = v / norm(v)
            return e[istate:istate + 1], v
        else:
            #get the state with maximum overlap.
            v0H = v0.conj() / norm(v0)
            overlaps = array([abs(v0H.dot(v[:, i])) for i in xrange(nstate)])
            istate = argmax(overlaps)
            if overlaps[istate] < 0.7:
                warnings.warn(
                    'Do not find any states same correspond to the one from last iteration!%s'
                    % overlaps)
        e, v = e[istate:istate + 1], v[:, istate:istate + 1]
        return e, v
예제 #40
0
파일: E_Phi.py 프로젝트: tbcole/majoranaJJ
if PLOT != 'P':
    eig_arr = np.zeros((phi_steps, k))
    for i in range(phi_steps):
        print(phi_steps - i)
        H = spop.HBDG(coor,
                      ax,
                      ay,
                      NN,
                      NNb=NNb,
                      Wj=Wj,
                      mu=mu,
                      gamx=gx,
                      alpha=alpha,
                      delta=delta,
                      phi=phi[i])
        eigs, vecs = spLA.eigsh(H, k=k, sigma=0, which='LM')
        idx_sort = np.argsort(eigs)
        eigs = eigs[idx_sort]
        eig_arr[i, :] = np.sort(eigs)

    np.save(
        "%s/eig_arr Lx = %.1f Ly = %.1f Wsc = %.1f Wj = %.1f nodx = %.1f nody = %.1f alpha = %.1f delta = %.2f mu = %.1f gx = %.1f.npy"
        % (dirS, Lx * .1, Ly * .1, Wsc, Junc_width, Nod_widthx, Nod_widthy,
           alpha, delta, mu, gx), eig_arr)
    gc.collect()
    sys.exit()
else:
    eig_arr = np.load(
        "%s/eig_arr Lx = %.1f Ly = %.1f Wsc = %.1f Wj = %.1f nodx = %.1f nody = %.1f alpha = %.1f delta = %.2f mu = %.1f gx = %.1f.npy"
        % (dirS, Lx * .1, Ly * .1, Wsc, Junc_width, Nod_widthx, Nod_widthy,
           alpha, delta, mu, gx))
    def _fit_transform(self, K):
        """ Fit's using kernel K"""
        # center kernel
        K = self._centerer.fit_transform(K)

        if self.n_components is None:
            n_components = K.shape[0]
        else:
            n_components = min(K.shape[0], self.n_components)

        # compute eigenvectors
        if self.eigen_solver == 'auto':
            if K.shape[0] > 200 and n_components < 10:
                eigen_solver = 'arpack'
            else:
                eigen_solver = 'dense'
        else:
            eigen_solver = self.eigen_solver

        if eigen_solver == 'dense':
            self.lambdas_, self.alphas_ = linalg.eigh(
                K, eigvals=(K.shape[0] - n_components, K.shape[0] - 1))
        elif eigen_solver == 'arpack':
            v0 = _init_arpack_v0(K.shape[0], self.random_state)
            self.lambdas_, self.alphas_ = eigsh(K,
                                                n_components,
                                                which="LA",
                                                tol=self.tol,
                                                maxiter=self.max_iter,
                                                v0=v0)

        # make sure that the eigenvalues are ok and fix numerical issues
        self.lambdas_ = _check_psd_eigenvalues(self.lambdas_,
                                               enable_warnings=False)

        # flip eigenvectors' sign to enforce deterministic output
        self.alphas_, _ = svd_flip(self.alphas_, np.zeros_like(self.alphas_).T)

        # sort eigenvectors in descending order
        indices = self.lambdas_.argsort()[::-1]
        self.lambdas_ = self.lambdas_[indices]
        self.alphas_ = self.alphas_[:, indices]

        # remove eigenvectors with a zero eigenvalue (null space) if required
        if self.remove_zero_eig or self.n_components is None:
            self.alphas_ = self.alphas_[:, self.lambdas_ > 0]
            self.lambdas_ = self.lambdas_[self.lambdas_ > 0]

        # Maintenance note on Eigenvectors normalization
        # ----------------------------------------------
        # there is a link between
        # the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)'
        # if v is an eigenvector of K
        #     then Phi(X)v  is an eigenvector of Phi(X)Phi(X)'
        # if u is an eigenvector of Phi(X)Phi(X)'
        #     then Phi(X)'u is an eigenvector of Phi(X)'Phi(X)
        #
        # At this stage our self.alphas_ (the v) have norm 1, we need to scale
        # them so that eigenvectors in kernel feature space (the u) have norm=1
        # instead
        #
        # We COULD scale them here:
        #       self.alphas_ = self.alphas_ / np.sqrt(self.lambdas_)
        #
        # But choose to perform that LATER when needed, in `fit()` and in
        # `transform()`.

        return K
 def maxEigenVector(self):
     """Get the eigen vector corresponding to the largest eigen value for the
     sparse matrix eigen decomposition of the adjacency matrix."""
     evals, evecs = eigsh(self.sm, k=1)
     return evecs
예제 #43
0
 def _eigevectors(self, X):
     _, vectors = eigsh(X, k=self.k_partition, which="LM")
     return vectors
예제 #44
0
    def matrix_eig(
        self,
        chis=None,
        eps=0,
        print_errors="deprecated",
        hermitian=False,
        break_degenerate=False,
        degeneracy_eps=1e-6,
        sparse=False,
        trunc_err_func=None,
	evenTrunc = False,
    ):
        """Find eigenvalues and eigenvectors of a matrix.

        The input must be a square matrix.

        If `hermitian` is True the matrix is assumed to be hermitian.

        Truncation works like for SVD, see the documentation there for more.

        If `sparse` is True, a sparse eigenvalue decomposition, using power
        methods from `scipy.sparse.eigs` or `eigsh`, is used. This
        decomposition is done to find ``max(chis)`` eigenvalues, after which
        the decomposition may be truncated further if the truncation error so
        allows. Thus ``max(chis)`` should be much smaller than the full size of
        the matrix, if `sparse` is True.

        The return values is ``S, U, rel_err``, where `S` is a vector of
        eigenvalues and `U` is a matrix that has as its columns the
        eigenvectors. `rel_err` is the truncation error.
        """
        if print_errors != "deprecated":
            msg = (
                "The `print_errors` keyword argument has been deprecated, "
                "and has no effect. Rely instead on getting the error as a "
                "return value, and print it yourself."
            )
            warnings.warn(msg)
        chis = self._matrix_decomp_format_chis(chis, eps)
        mindim = min(self.shape)
        maxchi = max(chis)
        if sparse and maxchi < mindim - 1:
            if hermitian:
                S, U = spsla.eigsh(self, k=maxchi, return_eigenvectors=True)
            else:
                S, U = spsla.eigs(self, k=maxchi, return_eigenvectors=True)
            norm_sq = self.norm_sq()
        else:
            if hermitian:
                S, U = np.linalg.eigh(self)
            else:
                S, U = np.linalg.eig(self)
            norm_sq = None
        order = np.argsort(-np.abs(S))
        S = S[order]
        U = U[:, order]
        # Truncate, if truncation dimensions are given.
        chi, rel_err = type(self)._find_trunc_dim(
            S,
            chis=chis,
            eps=eps,
            break_degenerate=break_degenerate,
            degeneracy_eps=degeneracy_eps,
            trunc_err_func=trunc_err_func,
            norm_sq=norm_sq,
        )
        # Truncate
        S = S[:chi]
        U = U[:, :chi]
        if not isinstance(S, TensorCommon):
            S = type(self).from_ndarray(S)
        if not isinstance(U, TensorCommon):
            U = type(self).from_ndarray(U)
        return S, U, rel_err
예제 #45
0
def optimize_ols(features, with_momentum=True, warn=True, sparse=False):
    """
  This only applies to ordinary least squares regression -- in terms of a neural
  net: a linear model with MSE loss.  Returns the optimal learning_rate and
  (optionally, 0.0) momentum. Reports the condition number of A = X^T*X where
  X is the design matrix.
  """
    from scipy.linalg import eigh
    from scipy.sparse.linalg import eigsh
    from scipy.sparse import issparse

    problematic = False
    print("optimizing ... ", end='')

    # ADD A COLUMN OF ONES
    features = torch.cat((torch.ones(len(features), 1), features), 1)

    features = features.numpy().astype('float64')
    A = features.transpose() @ features
    eigs = eigh(A, eigvals_only=True)
    if not all(map(lambda x: x.imag == 0.0, eigs)) == True and warn:
        print("\nwarning: eigenvalues should be real but some are not due to num"+\
            "erical\n"+' '*9+"ill-conditioning (largest imaginary part is "+\
            '{:.3g}'.format(max([x.imag for x in eigs]))+").")
        problematic = True
    eigs = [x.real for x in eigs]
    if not all(map(lambda x: x >= 0.0, eigs)) == True and warn:
        print("\nwarning: eigenvalues should be positive but some are not due to "+\
            "numerical\n"+' '*9+"ill-conditioning (most negative eigenvalue is "+\
            '{:.3g}'.format(min([x for x in eigs]))+").")
        problematic = True

    if problematic:
        print("checking for sparseness ... ", end='')
        is_sparse = issparse(A)
        print(sparse)
        largest = eigsh(A, 1, which='LM', return_eigenvectors=False).item()
        smallest = eigsh(A,
                         1,
                         which='SA',
                         return_eigenvectors=False,
                         sigma=1.0).item()
    else:
        eigs = [0.0 if x.real < 0.0 else x for x in eigs]
        largest = max(eigs)
        smallest = min(eigs)

    if (smallest != 0):
        print("condition number: {:.3g}".format(largest / smallest))
    else:
        print("condition number: infinite")

    if not with_momentum:
        learning_rate = 2 / (smallest + largest)
        momentum = 0.0
    else:
        learning_rate = (2 / (smallest**0.5 + largest**0.5))**2
        momentum = ((largest**0.5 - smallest**0.5) /
                    (largest**0.5 + smallest**0.5))**2

    return learning_rate, momentum
예제 #46
0
def eigen_decomposition(G, n_components=8, eigen_solver='auto',
                        random_state=None,
                        drop_first=True, largest=True, solver_kwds=None):
    """
    Function to compute the eigendecomposition of a square matrix.

    Parameters
    ----------
    G : array_like or sparse matrix
        The square matrix for which to compute the eigen-decomposition.
    n_components : integer, optional
        The number of eigenvectors to return
    eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
        'auto' :
            attempt to choose the best method for input data (default)
        'dense' :
            use standard dense matrix operations for the eigenvalue decomposition.
            For this method, M must be an array or matrix type.
            This method should be avoided for large problems.
        'arpack' :
            use arnoldi iteration in shift-invert mode. For this method,
            M may be a dense matrix, sparse matrix, or general linear operator.
            Warning: ARPACK can be unstable for some problems.  It is best to
            try several random seeds in order to check results.
        'lobpcg' :
            Locally Optimal Block Preconditioned Conjugate Gradient Method.
            A preconditioned eigensolver for large symmetric positive definite
            (SPD) generalized eigenproblems.
        'amg' :
            Algebraic Multigrid solver (requires ``pyamg`` to be installed)
            It can be faster on very large, sparse problems, but may also lead
            to instabilities.
    random_state : int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
        By default, arpack is used.
    solver_kwds : any additional keyword arguments to pass to the selected eigen_solver

    Returns
    -------
    lambdas, diffusion_map : eigenvalues, eigenvectors
    """
    n_nodes = G.shape[0]
    if drop_first:
        n_components = n_components + 1

    eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
                                                   size=n_nodes,
                                                   nvec=n_components)
    random_state = check_random_state(random_state)

    # Convert G to best type for eigendecomposition
    if sparse.issparse(G):
        if G.getformat() is not 'csr':
            G.tocsr()
    G = G.astype(np.float)

    # Check for symmetry
    is_symmetric = _is_symmetric(G)

    # Try Eigen Methods:
    if eigen_solver == 'arpack':
        # This matches the internal initial state used by ARPACK
        v0 = random_state.uniform(-1, 1, G.shape[0])
        if is_symmetric:
            if largest:
                which = 'LM'
            else:
                which = 'SM'
            lambdas, diffusion_map = eigsh(G, k=n_components, which=which,
                                           v0=v0,**(solver_kwds or {}))
        else:
            if largest:
                which = 'LR'
            else:
                which = 'SR'
            lambdas, diffusion_map = eigs(G, k=n_components, which=which,
                                          **(solver_kwds or {}))
        lambdas = np.real(lambdas)
        diffusion_map = np.real(diffusion_map)
    elif eigen_solver == 'amg':
        # separate amg & lobpcg keywords:
        if solver_kwds is not None:
            amg_kwds = {}
            lobpcg_kwds = solver_kwds.copy()
            for kwd in AMG_KWDS:
                if kwd in solver_kwds.keys():
                    amg_kwds[kwd] = solver_kwds[kwd]
                    del lobpcg_kwds[kwd]
        else:
            amg_kwds = None
            lobpcg_kwds = None
        if not is_symmetric:
            raise ValueError("lobpcg requires symmetric matrices.")
        if not sparse.issparse(G):
            warnings.warn("AMG works better for sparse matrices")
        # Use AMG to get a preconditioner and speed up the eigenvalue problem.
        ml = smoothed_aggregation_solver(check_array(G, accept_sparse = ['csr']),**(amg_kwds or {}))
        M = ml.aspreconditioner()
        n_find = min(n_nodes, 5 + 2*n_components)
        X = random_state.rand(n_nodes, n_find)
        X[:, 0] = (G.diagonal()).ravel()
        lambdas, diffusion_map = lobpcg(G, X, M=M, largest=largest,**(lobpcg_kwds or {}))
        sort_order = np.argsort(lambdas)
        if largest:
            lambdas = lambdas[sort_order[::-1]]
            diffusion_map = diffusion_map[:, sort_order[::-1]]
        else:
            lambdas = lambdas[sort_order]
            diffusion_map = diffusion_map[:, sort_order]
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == "lobpcg":
        if not is_symmetric:
            raise ValueError("lobpcg requires symmetric matrices.")
        n_find = min(n_nodes, 5 + 2*n_components)
        X = random_state.rand(n_nodes, n_find)
        lambdas, diffusion_map = lobpcg(G, X, largest=largest,**(solver_kwds or {}))
        sort_order = np.argsort(lambdas)
        if largest:
            lambdas = lambdas[sort_order[::-1]]
            diffusion_map = diffusion_map[:, sort_order[::-1]]
        else:
            lambdas = lambdas[sort_order]
            diffusion_map = diffusion_map[:, sort_order]
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == 'dense':
        if sparse.isspmatrix(G):
            G = G.todense()
        if is_symmetric:
            lambdas, diffusion_map = eigh(G,**(solver_kwds or {}))
        else:
            lambdas, diffusion_map = eig(G,**(solver_kwds or {}))
            sort_index = np.argsort(lambdas)
            lambdas = lambdas[sort_index]
            diffusion_map[:,sort_index]
        if largest:# eigh always returns eigenvalues in ascending order
            lambdas = lambdas[::-1] # reverse order the e-values
            diffusion_map = diffusion_map[:, ::-1] # reverse order the vectors
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    return (lambdas, diffusion_map)
예제 #47
0
def eigenvectors(h, nk=10, kpoints=False, k=None, sparse=False, numw=None):
    import scipy.linalg as lg
    from scipy.sparse import csc_matrix as csc
    shape = h.intra.shape
    if h.dimensionality == 0:
        vv = lg.eigh(h.intra)
        vecs = [v for v in vv[1].transpose()]
        if kpoints: return vv[0], vecs, [[0., 0., 0.] for e in vv[0]]
        else: return vv[0], vecs
    elif h.dimensionality > 0:
        f = h.get_hk_gen()
        if k is None:
            from klist import kmesh
            kp = kmesh(h.dimensionality, nk=nk)  # generate a mesh
        else:
            kp = np.array([k])  # kpoint given on input
        #    vvs = [lg.eigh(f(k)) for k in kp] # diagonalize k hamiltonian
        nkp = len(kp)  # total number of k-points
        if sparse:  # sparse Hamiltonians
            vvs = [
                slg.eigsh(csc(f(k)), k=numw, which="LM", sigma=0.0, tol=1e-10)
                for k in kp
            ]  #

        else:  # dense Hamiltonians
            import parallel
            if parallel.cores > 1:  # in parallel
                vvs = parallel.multieigh([f(k)
                                          for k in kp])  # multidiagonalization
            else:
                vvs = [lg.eigh(f(k)) for k in kp]  #
        nume = sum([len(v[0])
                    for v in vvs])  # number of eigenvalues calculated
        eigvecs = np.zeros((nume, h.intra.shape[0]),
                           dtype=np.complex)  # eigenvectors
        eigvals = np.zeros(nume)  # eigenvalues

        #### New way ####
        #    eigvals = np.array([iv[0] for iv in vvs]).reshape(nkp*shape[0],order="F")
        #    eigvecs = np.array([iv[1].transpose() for iv in vvs]).reshape((nkp*shape[0],shape[1]),order="F")
        #    if kpoints: # return also the kpoints
        #      kvectors = [] # empty list
        #      for ik in kp:
        #        for i in range(h.intra.shape[0]): kvectors.append(ik) # store
        #      return eigvals,eigvecs,kvectors
        #    else:
        #      return eigvals,eigvecs

        #### Old way, slightly slower but clearer ####
        iv = 0
        kvectors = []  # empty list
        for ik in range(len(kp)):  # loop over kpoints
            vv = vvs[ik]  # get eigenvalues and eigenvectors
            for (e, v) in zip(vv[0], vv[1].transpose()):
                eigvecs[iv] = v.copy()
                eigvals[iv] = e.copy()
                kvectors.append(kp[ik])
                iv += 1
        if kpoints:  # return also the kpoints
            #      for iik in range(len(kp)):
            #        ik = kp[iik] # store kpoint
            #        for e in vvs[iik][0]: kvectors.append(ik) # store
            return eigvals, eigvecs, kvectors
        else:
            return eigvals, eigvecs
    else:
        raise
def first_eigv(A):
    return eigsh(A, k=1, which='SM')[1][:, 0]
예제 #49
0
def make_leads(Ny):
    a, b, c, d = subs
    hoppings = [((0, 0), b, a), ((1, 0), b, a), ((0, -1), d, a),
                ((0, 0), c, b), ((0, 0), d, c), ((-1, 0), d, c)]

    sym_left = kwant.lattice.TranslationalSymmetry((-1, 0))
    sym_right = kwant.lattice.TranslationalSymmetry((1, 0))
    lead_left = kwant.Builder(sym_left)
    lead_right = kwant.builder.Builder(sym_right)
    lead_left[[sub(0, ny) for sub in subs for ny in range(Ny)]] = EL
    lead_left[[kwant.builder.HoppingKind(*hopping)
               for hopping in hoppings]] = t
    lead_right[[sub(Nx - 1, ny) for sub in subs for ny in range(Ny)]] = ER
    lead_right[[kwant.builder.HoppingKind(*hopping)
                for hopping in hoppings]] = t
    return lead_left, lead_right


sys, sysvx, sysvy = make_system(Nx, Ny)
lead_left, lead_ringht = make_leads(Ny)
sys = sys.finalized()
sysvx = sysvx.finalized()
sysvy = sysvy.finalized()
ham = sys.hamiltonian_submatrix()
eig_values, eig_vectors = eigsh(ham, k=15, which="SM")
#N = len(eig_values)
kwant.plotter.map(sys, np.abs(eig_vectors[:, 4])**2)

#kwant.plot(sys)
예제 #50
0
def test_lindblad_zero_eigenvalue():
    lind_mat = lind.to_sparse()
    w, v = linalg.eigsh(lind_mat.H * lind_mat, which="SM")
    assert w[0] <= 10e-10
예제 #51
0
 def get_weight(hk):
   es,waves = slg.eigsh(hk,k=num_waves,sigma=e,tol=arpack_tol,which="LM",
                         maxiter = arpack_maxiter)
   return np.sum(delta/((e-es)**2+delta**2)) # return weight
예제 #52
0
def null_space(M,
               k,
               k_skip=1,
               eigen_solver='arpack',
               tol=1E-6,
               max_iter=100,
               random_state=None):
    """
    Find the null space of a matrix M.

    Parameters
    ----------
    M : {array, matrix, sparse matrix, LinearOperator}
        Input covariance matrix: should be symmetric positive semi-definite

    k : int
        Number of eigenvalues/vectors to return

    k_skip : int, default=1
        Number of low eigenvalues to skip.

    eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack'
        auto : algorithm will attempt to choose the best method for input data
        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.
        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.

    tol : float, default=1e-6
        Tolerance for 'arpack' method.
        Not used if eigen_solver=='dense'.

    max_iter : int, default=100
        Maximum number of iterations for 'arpack' method.
        Not used if eigen_solver=='dense'

    random_state : int, RandomState instance, default=None
        Determines the random number generator when ``solver`` == 'arpack'.
        Pass an int for reproducible results across multiple function calls.
        See :term: `Glossary <random_state>`.
    """
    if eigen_solver == 'auto':
        if M.shape[0] > 200 and k + k_skip < 10:
            eigen_solver = 'arpack'
        else:
            eigen_solver = 'dense'

    if eigen_solver == 'arpack':
        random_state = check_random_state(random_state)
        # initialize with [-1,1] as in ARPACK
        v0 = random_state.uniform(-1, 1, M.shape[0])
        try:
            eigen_values, eigen_vectors = eigsh(M,
                                                k + k_skip,
                                                sigma=0.0,
                                                tol=tol,
                                                maxiter=max_iter,
                                                v0=v0)
        except RuntimeError as msg:
            raise ValueError("Error in determining null-space with ARPACK. "
                             "Error message: '%s'. "
                             "Note that method='arpack' can fail when the "
                             "weight matrix is singular or otherwise "
                             "ill-behaved.  method='dense' is recommended. "
                             "See online documentation for more information." %
                             msg)

        return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
    elif eigen_solver == 'dense':
        if hasattr(M, 'toarray'):
            M = M.toarray()
        eigen_values, eigen_vectors = eigh(M,
                                           eigvals=(k_skip, k + k_skip - 1),
                                           overwrite_a=True)
        index = np.argsort(np.abs(eigen_values))
        return eigen_vectors[:, index], np.sum(eigen_values)
    else:
        raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
예제 #53
0
파일: gap.py 프로젝트: zx-sdu/pygra
def gap2d(h,
          nk=40,
          k0=None,
          rmap=1.0,
          recursive=False,
          iterations=10,
          sparse=True,
          mode="refine"):
    """Calculates the gap for a 2d Hamiltonian by doing
  a kmesh sampling. It will return the positive energy with smaller value"""
    if mode == "optimize":  # using optimize library
        from scipy.optimize import minimize
        hk_gen = h.get_hk_gen()  # generator

        def minfun(k):  # function to minimize
            hk = hk_gen(k)  # Hamiltonian
            if h.is_sparse:
                es, ew = lgs.eigsh(hk, k=10, which="LM", sigma=0.0, tol=1e-06)
            else:
                es = lg.eigvalsh(hk)  # get eigenvalues
            es = es[es > 0.]
            return np.min(es)  # retain positive

        gaps = [
            minimize(minfun,
                     np.random.random(h.dimensionality),
                     method="Powell").fun for i in range(iterations)
        ]
        #    print(gaps)
        return np.min(gaps)

    else:  # classical way
        if k0 is None: k0 = np.random.random(2)  # random shift
        if h.dimensionality != 2: raise
        hk_gen = h.get_hk_gen()  # get hamiltonian generator
        emin = 1000.  # initial values
        for ix in np.linspace(-.5, .5, nk):
            for iy in np.linspace(-.5, .5, nk):
                k = np.array([ix, iy])  # generate kvector
                if recursive: k = k0 + k * rmap  # scale vector
                hk = hk_gen(k)  # generate hamiltonian
                if h.is_sparse:
                    es, ew = lgs.eigsh(csc_matrix(hk),
                                       k=4,
                                       which="LM",
                                       sigma=0.0)
                else:
                    es = lg.eigvalsh(hk)  # get eigenvalues
                es = es[es > 0.]  # retain positive
                if min(es) < emin:
                    emin = min(es)  # store new minimum
                    kbest = k.copy()  # store the best k
        if recursive:  # if it has been chosen recursive
            if iterations > 0:  # if still iterations left
                emin = gap2d(h,
                             nk=nk,
                             k0=kbest,
                             rmap=rmap / 4,
                             recursive=recursive,
                             iterations=iterations - 1,
                             sparse=sparse)
        return emin  # gap
예제 #54
0
def rescal(X, D, rank, **kwargs):
    """
    RESCAL 

    Factors a three-way tensor X such that each frontal slice 
    X_k = A * R_k * A.T. The frontal slices of a tensor are 
    N x N matrices that correspond to the adjacency matrices 
    of the relational graph for a particular relation.

    For a full description of the algorithm see: 
      Maximilian Nickel, Volker Tresp, Hans-Peter-Kriegel, 
      "A Three-Way Model for Collective Learning on Multi-Relational Data",
      ICML 2011, Bellevue, WA, USA

    Parameters
    ----------
    X : list
        List of frontal slices X_k of the tensor X. The shape of each X_k is ('N', 'N')
    D : matrix
        A sparse matrix involved in the tensor factorization (aims to incorporate
        the entity-term matrix aka document-term matrix)
    rank : int 
        Rank of the factorization
    lmbda : float, optional 
        Regularization parameter for A and R_k factor matrices. 0 by default 
    init : string, optional
        Initialization method of the factor matrices. 'nvecs' (default) 
        initializes A based on the eigenvectors of X. 'random' initializes 
        the factor matrices randomly.
    proj : boolean, optional 
        Whether or not to use the QR decomposition when computing R_k.
        True by default 
    maxIter : int, optional 
        Maximium number of iterations of the ALS algorithm. 50 by default. 
    conv : float, optional 
        Stop when residual of factorization is less than conv. 1e-5 by default    

    Returns 
    -------
    A : ndarray 
        matrix of latent embeddings for entities A
    R : list
        list of 'M' arrays of shape ('rank', 'rank') corresponding to the factor matrices R_k 
    f : float 
        function value of the factorization 
    iter : int 
        number of iterations until convergence 
    exectimes : ndarray 
        execution times to compute the updates in each iteration
    V : ndarray
        matrix of latent embeddings for words V
    """

    # init options
    ainit = kwargs.pop('init', __DEF_INIT)
    proj = kwargs.pop('proj', __DEF_PROJ)
    maxIter = kwargs.pop('maxIter', __DEF_MAXITER)
    conv = kwargs.pop('conv', __DEF_CONV)
    lmbda = kwargs.pop('lmbda', __DEF_LMBDA)
    preheatnum = kwargs.pop('preheatnum', __DEF_PREHEATNUM)

    if not len(kwargs) == 0:
        raise ValueError('Unknown keywords (%s)' % (kwargs.keys()))

    sz = X[0].shape
    dtype = X[0].dtype
    n = sz[0]

    _log.debug('[Config] rank: %d | maxIter: %d | conv: %7.1e | lmbda: %7.1e' %
               (rank, maxIter, conv, lmbda))

    # precompute norms of X
    normX = [squareFrobeniusNormOfSparse(M) for M in X]
    sumNormX = sum(normX)
    normD = squareFrobeniusNormOfSparse(D)
    _log.debug('[Algorithm] The tensor norm: %.5f' % sumNormX)
    _log.debug('[Algorithm] The extended matrix norm: %.5f' % normD)
    # initialize A
    if ainit == 'random':
        _log.debug('[Algorithm] The random initialization will be performed.')
        A = array(rand(n, rank), dtype=np.float64)
    elif ainit == 'nvecs':
        _log.debug(
            '[Algorithm] The eigenvector based initialization will be performed.'
        )
        avgX = lil_matrix((n, n))
        for i in range(len(X)):
            avgX += (X[i] + X[i].T)
        eigvalsX, A = eigsh(avgX, rank)
    else:
        raise 'Unknown init option ("%s")' % ainit

    # initialize R
    if proj:
        Q, A2 = qr(A)
        X2 = __projectSlices(X, Q)
        R = __updateR(X2, A2, lmbda)
    else:
        raise 'Projection via QR decomposition is required; pass proj=true'

    _log.debug('[Algorithm] Finished initialization.')
    # compute factorization
    fit = fitchange = fitold = 0
    exectimes = []

    for iterNum in xrange(maxIter):
        tic = time.clock()

        V = updateV(A, D, lmbda)

        A = updateA(X, A, R, V, D, lmbda)
        if proj:
            Q, A2 = qr(A)
            X2 = __projectSlices(X, Q)
            R = __updateR(X2, A2, lmbda)
        else:
            raise 'Projection via QR decomposition is required; pass proj=true'

        # compute fit values
        fit = 0
        tensorFit = 0
        regularizedFit = 0
        extRegularizedFit = 0
        regRFit = 0
        fitDAV = 0
        if iterNum >= preheatnum:
            if lmbda != 0:
                for i in xrange(len(R)):
                    regRFit += norm(R[i])**2
                regularizedFit = lmbda * (norm(A)**2) + lmbda * regRFit
            if lmbda != 0:
                extRegularizedFit = lmbda * (norm(V)**2)

            fitDAV = normD + matrixFitNormWithoutNormD(D, A, V)

            for i in xrange(len(R)):
                tensorFit += (normX[i] + fitNormWithoutNormX(X[i], A, R[i]))

            fit = 0.5 * tensorFit
            fit += regularizedFit
            fit /= sumNormX
            fit += (0.5 * fitDAV + extRegularizedFit) / normD

        else:
            _log.debug('[Algorithm] Preheating is going on.')

        toc = time.clock()
        exectimes.append(toc - tic)
        fitchange = abs(fitold - fit)
        _log.debug(
            '[%3d] total fit: %.10f | tensor fit: %.10f | matrix fit: %.10f | delta: %.10f | secs: %.5f'
            % (iterNum, fit, tensorFit, fitDAV, fitchange, exectimes[-1]))

        fitold = fit
        if iterNum > preheatnum and fitchange < conv:
            break
    return A, R, fit, iterNum + 1, array(exectimes), V
예제 #55
0
if not os.path.exists(dirS):
    os.makedirs(dirS)
try:
    PLOT = str(sys.argv[1])
except:
    PLOT = 'F'
if PLOT != 'P':
    for i in range(q_steps):
        if i == 0:
            Q = 1e-4*(np.pi/Lx)
        else:
            Q = qx[i]

        H0 = spop.HBDG(coor, ax, ay, NN, NNb=NNb, Wj=Wj, cutx=cutx, cuty=cuty, V=V, mu=mu, alpha=alpha, delta=delta, phi=phi, gamx=1e-4, qx=Q) #gives low energy basis

        eigs_0, vecs_0 = spLA.eigsh(H0, k=k, sigma=0, which='LM')
        vecs_0_hc = np.conjugate(np.transpose(vecs_0)) #hermitian conjugate

        H_G0 = spop.HBDG(coor, ax, ay, NN, NNb=NNb, Wj=Wj, cutx=cutx, cuty=cuty, V=V, mu=mu, gamx=0, alpha=alpha, delta=delta, phi=phi, qx=qx[i]) #Matrix that consists of everything in the Hamiltonian except for the Zeeman energy in the x-direction
        H_G1 = spop.HBDG(coor, ax, ay, NN, NNb=NNb, Wj=Wj, cutx=cutx, cuty=cuty, V=V, mu=mu, gamx=1, alpha=alpha, delta=delta, phi=phi, qx=qx[i]) #Hamiltonian with ones on Zeeman energy along x-direction sites
        HG = H_G1 - H_G0 #the proporitonality matrix for gamma-x, it is ones along the sites that have a gamma value
        HG0_DB = np.dot(vecs_0_hc, H_G0.dot(vecs_0))
        HG_DB = np.dot(vecs_0_hc, HG.dot(vecs_0))
        for g in range(gx.shape[0]):
            print(qx.shape[0]-i,  gx.shape[0]-g)
            H_DB = HG0_DB + gx[g]*HG_DB
            eigs_DB, U_DB = LA.eigh(H_DB)
            LE_Bands[i, g] = eigs_DB[int(k/2)]

    gap = np.zeros((gx.shape[0]))
    q_minima = []
예제 #56
0
def test_randomized_eigsh_compared_to_others(k):
    """Check that `_randomized_eigsh` is similar to other `eigsh`

    Tests that for a random PSD matrix, `_randomized_eigsh` provides results
    comparable to LAPACK (scipy.linalg.eigh) and ARPACK
    (scipy.sparse.linalg.eigsh).

    Note: some versions of ARPACK do not support k=n_features.
    """

    # make a random PSD matrix
    n_features = 200
    X = make_sparse_spd_matrix(n_features, random_state=0)

    # compare two versions of randomized
    # rough and fast
    eigvals, eigvecs = _randomized_eigsh(X,
                                         n_components=k,
                                         selection="module",
                                         n_iter=25,
                                         random_state=0)
    # more accurate but slow (TODO find realistic settings here)
    eigvals_qr, eigvecs_qr = _randomized_eigsh(
        X,
        n_components=k,
        n_iter=25,
        n_oversamples=20,
        random_state=0,
        power_iteration_normalizer="QR",
        selection="module",
    )

    # with LAPACK
    eigvals_lapack, eigvecs_lapack = linalg.eigh(X,
                                                 eigvals=(n_features - k,
                                                          n_features - 1))
    indices = eigvals_lapack.argsort()[::-1]
    eigvals_lapack = eigvals_lapack[indices]
    eigvecs_lapack = eigvecs_lapack[:, indices]

    # -- eigenvalues comparison
    assert eigvals_lapack.shape == (k, )
    # comparison precision
    assert_array_almost_equal(eigvals, eigvals_lapack, decimal=6)
    assert_array_almost_equal(eigvals_qr, eigvals_lapack, decimal=6)

    # -- eigenvectors comparison
    assert eigvecs_lapack.shape == (n_features, k)
    # flip eigenvectors' sign to enforce deterministic output
    dummy_vecs = np.zeros_like(eigvecs).T
    eigvecs, _ = svd_flip(eigvecs, dummy_vecs)
    eigvecs_qr, _ = svd_flip(eigvecs_qr, dummy_vecs)
    eigvecs_lapack, _ = svd_flip(eigvecs_lapack, dummy_vecs)
    assert_array_almost_equal(eigvecs, eigvecs_lapack, decimal=4)
    assert_array_almost_equal(eigvecs_qr, eigvecs_lapack, decimal=6)

    # comparison ARPACK ~ LAPACK (some ARPACK implems do not support k=n)
    if k < n_features:
        v0 = _init_arpack_v0(n_features, random_state=0)
        # "LA" largest algebraic <=> selection="value" in randomized_eigsh
        eigvals_arpack, eigvecs_arpack = eigsh(X,
                                               k,
                                               which="LA",
                                               tol=0,
                                               maxiter=None,
                                               v0=v0)
        indices = eigvals_arpack.argsort()[::-1]
        # eigenvalues
        eigvals_arpack = eigvals_arpack[indices]
        assert_array_almost_equal(eigvals_lapack, eigvals_arpack, decimal=10)
        # eigenvectors
        eigvecs_arpack = eigvecs_arpack[:, indices]
        eigvecs_arpack, _ = svd_flip(eigvecs_arpack, dummy_vecs)
        assert_array_almost_equal(eigvecs_arpack, eigvecs_lapack, decimal=8)
예제 #57
0
def decompose_laplacian(A, normalized=True, n_eig=100):
    l = adj_to_laplacian(A, normalized)
    D, V = spsl.eigsh(l, n_eig, which='SM')
    return [D, V]
예제 #58
0
    vpp = (e**2 / (4 * np.pi * eps_0 * R))
    vpp_arr = diags([vpp], [0], shape=[M * (N - 1) + 1,
                                       M * (N - 1) + 1]).toarray()

    return vpp_arr


#------------------------------------------------------------

#Initialize Hamiltonian
T = (-hbar**2) / (2 * m_e * dr**2) * Lap()
V_1 = Potential_1()
V_2 = Potential_2()
H = T + V_1 + V_2

eigvals, eigvecs = eigsh(H, k=1, which='SA')

print("R/a_0:", R / a_0, "Numeric Energy:", eigvals[0])

fig = plt.figure(figsize=(10, 7))
ax = plt.axes(projection="3d")

u = eigvecs[:, 0]
ax.scatter3D(0, 0, u[0], color='blue')

k = 1
for rad in range(1, N):
    for theta in range(M):
        ax.scatter3D(rad * dr, theta * dtheta, u[k], color='blue')
        k += 1
예제 #59
0
def asalsan(X, rank, **kwargs):
    """
    ASALSAN algorithm to compute the three-way DEDICOM decomposition
    of a tensor

    See
    ---
    .. [1] Brett W. Bader, Richard A. Harshman, Tamara G. Kolda
       "Temporal analysis of semantic graphs using ASALSAN"
       7th International Conference on Data Mining, 2007

    .. [2] Brett W. Bader, Richard A. Harshman, Tamara G. Kolda
       "Temporal analysis of Social Networks using Three-way DEDICOM"
       Technical Report, 2006
    """
    # init options
    ainit = kwargs.pop('init', _DEF_INIT)
    proj = kwargs.pop('proj', _DEF_PROJ)
    maxIter = kwargs.pop('maxIter', _DEF_MAXITER)
    conv = kwargs.pop('conv', _DEF_CONV)
    nne = kwargs.pop('nne', _DEF_NNE)
    optfunc = kwargs.pop('optfunc', _DEF_OPTFUNC)
    if not len(kwargs) == 0:
        raise BaseException('Unknown keywords (%s)' % (kwargs.keys()))

    # init starting points
    D = ones((len(X), rank))
    sz = X[0].shape
    n = sz[0]
    R = rand(rank, rank)
    if ainit == 'random':
        A = rand(n, rank)
    elif ainit == 'nvecs':
        S = zeros((n, n))
        T = zeros((n, n))
        for i in range(len(X)):
            T = X[i]
            S = S + T + T.T
        evals, A = eigsh(S, rank)
        if nne > 0:
            A[A < 0] = 0
        if proj:
            Q, A2 = qr(A)
            X2 = __projectSlices(X, Q)
            R = __updateR(X2, A2, D, R, nne)
        else:
            R = __updateR(X, A, D, R, nne)
    elif isinstance(ainit, np.ndarray):
        A = ainit
    else:
        raise 'Unknown init option ("%s")' % ainit

    # perform decomposition
    if issparse(X[0]):
        normX = [norm(M.data)**2 for M in X]
        Xflat = [M.tolil().reshape((1, prod(M.shape))).tocsr() for M in X]
    else:
        normX = [norm(M)**2 for M in X]
        Xflat = [M.flatten() for M in X]
    M = zeros((n, n))
    normXSum = sum(normX)
    #normX = norm(X)**2
    fit = fitold = f = fitchange = 0
    exectimes = []
    for iters in xrange(maxIter):
        tic = time.clock()
        fitold = fit
        A = __updateA(X, A, D, R, nne)
        if proj:
            Q, A2 = qr(A)
            X2 = __projectSlices(X, Q)
            R = __updateR(X2, A2, D, R, nne)
            D, f = __updateD(X2, A2, D, R, nne, optfunc)
        else:
            R = __updateR(X, A, D, R, nne)
            D, f = __updateD(X, A, D, R, nne, optfunc)

        # compute fit
        f = 0
        for i in xrange(len(X)):
            AD = dot(A, diag(D[i, :]))
            M = dot(dot(AD, R), AD.T)
            f += normX[i] + norm(M)**2 - 2 * Xflat[i].dot(M.flatten())
        f *= 0.5
        fit = 1 - (f / normXSum)
        fitchange = abs(fitold - fit)

        exectimes.append(time.clock() - tic)

        # print iter info when debugging is enabled
        _log.debug('[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' %
                   (iters, fit, fitchange, exectimes[-1]))

        if iters > 1 and fitchange < conv:
            break
    return A, R, D, fit, iters, array(exectimes)
예제 #60
0
파일: exact.py 프로젝트: chrisrothUT/netket
def steady_state(lindblad, *, sparse=None, method="ed", rho0=None, **kwargs):
    r"""Computes the numerically exact steady-state of a lindblad master equation.
    The computation is performed either through the exact diagonalization of the
    hermitian :math:`L^\dagger L` matrix, or by means of an iterative solver (bicgstabl)
    targeting the solution of the non-hermitian system :math:`L\rho = 0`
    and :math:`\mathrm{Tr}[\rho] = 1`.

    Note that for systems with 7 or more sites it is usually computationally impossible
    to build the full lindblad operator and therefore only `iterative` will work.

    Note that for systems with hilbert spaces with dimensions above 40k, tol
    should be set to a lower value if the steady state has non-trivial correlations.

    Args:
        lindblad: The lindbladian encoding the master equation.
        sparse: Whever to use sparse matrices (default: False for ed, True for iterative)
        method: 'ed' (exact diagonalization) or 'iterative' (iterative bicgstabl)
        rho0: starting density matrix for the iterative diagonalization (default: None)
        kwargs...: additional kwargs passed to bicgstabl

    For full docs please consult SciPy documentation at
    https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.bicgstab.html

    Keyword Args:
        maxiter: maximum number of iterations for the iterative solver (default: None)
        tol: The precision for the calculation (default: 1e-05)
        callback: User-supplied function to call after each iteration. It is called as callback(xk),
                  where xk is the current solution vector

    Returns:
        The steady-state density matrix.
    """
    from numpy import sqrt, array

    if sparse is None:
        sparse = True

    M = lindblad.hilbert.physical.n_states

    if method == "ed":
        if not sparse:
            from numpy.linalg import eigh
            from warnings import warn

            warn(
                """For reasons unknown to me, using dense diagonalisation on this
                matrix results in very low precision of the resulting steady-state
                since the update to numpy 1.9.
                We suggest using sparse=True, however, if you wish not to, you have
                been warned.
                Your digits are your reponsability now.""")

            lind_mat = lindblad.to_dense()

            ldagl = lind_mat.H * lind_mat
            w, v = eigh(ldagl)

        else:
            from scipy.sparse.linalg import eigsh

            lind_mat = lindblad.to_sparse()
            ldagl = lind_mat.H * lind_mat

            w, v = eigsh(ldagl, which="SM", k=2)

        print("Minimum eigenvalue is: ", w[0])
        rho = v[:, 0].reshape((M, M))
        rho = rho / rho.trace()

    elif method == "iterative":
        # An extra row is added at the bottom of the therefore M^2+1 long array,
        # with the trace of the density matrix. This is needed to enforce the
        # trace-1 condition.
        L = lindblad.to_linear_operator(sparse=sparse, append_trace=True)

        # Initial density matrix ( + trace condition)
        Lrho_start = np.zeros((M**2 + 1), dtype=L.dtype)
        if rho0 is None:
            Lrho_start[0] = 1.0
            Lrho_start[-1] = 1.0
        else:
            Lrho_start[:-1] = rho0.reshape(-1)
            Lrho_start[-1] = rho0.trace()

        # Target residual (everything 0 and trace 1)
        Lrho_target = np.zeros((M**2 + 1), dtype=L.dtype)
        Lrho_target[-1] = 1.0

        # Iterative solver
        print("Starting iterative solver...")
        res, info = bicgstab(L, Lrho_target, x0=Lrho_start, **kwargs)

        rho = res[:-1].reshape((M, M))
        if info == 0:
            print("Converged trace is ", rho.trace())
        elif info > 0:
            print("Failed to converge after ", info, " ( trace is ",
                  rho.trace(), " )")
        elif info < 0:
            print("An error occured: ", info)

    else:
        raise ValueError("method must be 'ed' or 'iterative'")

    return rho