示例#1
0
def geneigh(A,B,tol=1e-12):
    """
    Solves the generalized eigenvalue problem also in the case where A and B share a common
    null-space. The eigenvalues corresponding to the null-space are given a Nan value.
    The null-space is defined with the tolereance tol.
    """
    # first check if there is a null-space issue
    if matrix_rank(B,tol)==shape(B)[0]:
        return eigh(A,B)
    # first diagonalize the overlap matrix B
    Be,Bv=eigh(B)
    # rewrite the A matrix in the B-matrix eigenspace
    At=dot(conj(Bv.T),dot(A,Bv))
    Bt=diag(Be)
    # detect shared null-space. that is given by the first n null eigenvalues of B
    idx=find(Be>tol)
    idx=idx[0]
    # check that the B matrix null-space is shared by A.
    m=amax(abs(At[0:idx,:].flatten()))
    if m>tol:
        warnings.warn('Maximum non-diagonal element in A written in B null-space is bigger than the tolerance \''+str(tol)+'\'.',UserWarning)
    # diagonalize the non-null-space part of the problem
    Et,Vt=eigh(At[idx:,idx:],Bt[idx:,idx:])
    # define Ut, the change of basis in the non-truncated space
    Ut=zeros(shape(A),A.dtype)
    Ut[0:idx,0:idx]=eye(idx)
    Ut[idx:,idx:]=Vt
    U=dot(Bv,Ut)
    E=append(float('NaN')*ones(idx),Et)
    return E,U
示例#2
0
def jitEigh(A,maxTries=10,warning=True):
    """
    Do a Eigenvalue Decompsition with Jitter,

    works as jitChol
    """
    warning = True
    jitter = 0
    i = 0

    while(True):
        if jitter == 0:
            jitter = abs(SP.trace(A))/A.shape[0]*1e-6
            S,U = linalg.eigh(A)

        else:
            if warning:
                # pdb.set_trace()
		# plt.figure()
		# plt.imshow(A, interpolation="nearest")
		# plt.colorbar()
		# plt.show()
                logging.error("Adding jitter of %f in jitEigh()." % jitter)
            S,U = linalg.eigh(A+jitter*SP.eye(A.shape[0]))

        if S.min()>1E-10:
            return S,U
            
        if i<maxTries:
            jitter = jitter*10
        i += 1
            
    raise linalg.LinAlgError, "Matrix non positive definite, jitter of " +  str(jitter) + " added but failed after " + str(i) + " trials."
示例#3
0
def eigenvectors(h,nk=10):
  import scipy.linalg as lg
  from scipy.sparse import csc_matrix as csc
  if h.dimensionality==0:
    vv = lg.eigh(h.intra)
    vecs = [v for v in vv[1].transpose()]
    return vv[0],vecs
  elif h.dimensionality>0:
    f = h.get_hk_gen()
    if h.dimensionality==1: kp = np.linspace(0.,1.0,nk,endpoint=False)
    if h.dimensionality==2: 
      kp = []
      for k1 in np.linspace(0.,1.0,nk,endpoint=False):
        for k2 in np.linspace(0.,1.0,nk,endpoint=False):
          kp.append([k1,k2]) # store
    eigvecs = np.zeros((len(kp)*h.intra.shape[0],h.intra.shape[0]),dtype=np.complex) # eigenvectors
    eigvals = np.zeros((len(kp)*h.intra.shape[0])) # eigenvalues
    iv = 0
    for k in kp: # loop over kpoints
      hk = f(k)  # kdependent hamiltonian
      vv = lg.eigh(hk) # diagonalize k hamiltonian
      for (e,v) in zip(vv[0],vv[1].transpose()):
        eigvecs[iv] = v.copy()
        eigvals[iv] = e.copy()
        iv += 1
#      eigvals += vv[0].tolist() # store eigenvalues
#      vecs = [v for v in vv[1].transpose()]
#      eigvecs += vecs # store eigenvectors 
    return eigvals,eigvecs
  else:
    raise
示例#4
0
def jitEigh(A,maxTries=10,warning=True):
    """
    Do a Eigenvalue Decompsition with Jitter,

    works as jitChol
    """
    warning = True
    jitter = 0
    i = 0

    while(True):
        try:
            if jitter == 0:
                jitter = abs(SP.trace(A))/A.shape[0]*1e-6
                S,U = linalg.eigh(A)

            else:
                logging.error("Adding jitter of %f in jitEigh()." % jitter)
                S,U = linalg.eigh(A+jitter*SP.eye(A.shape[0]))

            if S.min()>1E-10:
                return S,U
        
        except:
            pass
            
        if i<maxTries:
            jitter = jitter*10
        i += 1
            
    raise linalg.LinAlgError, "Matrix non positive definite, jitter of " +  str(jitter) + " added but failed after " + str(i) + " trials."
示例#5
0
def numpy_matrix_operator_with_arrays_and_products_factory(dim_source, dim_range, count_source, count_range, seed,
                                                           source_id=None, range_id=None):
    from scipy.linalg import eigh
    op, _, U, V = numpy_matrix_operator_with_arrays_factory(dim_source, dim_range, count_source, count_range, seed,
                                                            source_id=source_id, range_id=range_id)
    if dim_source > 0:
        while True:
            sp = np.random.random((dim_source, dim_source))
            sp = sp.T.dot(sp)
            evals = eigh(sp, eigvals_only=True)
            if np.min(evals) > 1e-6:
                break
        sp = NumpyMatrixOperator(sp, source_id=source_id, range_id=source_id)
    else:
        sp = NumpyMatrixOperator(np.zeros((0, 0)), source_id=source_id, range_id=source_id)
    if dim_range > 0:
        while True:
            rp = np.random.random((dim_range, dim_range))
            rp = rp.T.dot(rp)
            evals = eigh(rp, eigvals_only=True)
            if np.min(evals) > 1e-6:
                break
        rp = NumpyMatrixOperator(rp, source_id=range_id, range_id=range_id)
    else:
        rp = NumpyMatrixOperator(np.zeros((0, 0)), source_id=range_id, range_id=range_id)
    return op, None, U, V, sp, rp
示例#6
0
 def getEig1(self):
     '''
     '''
     if self.eig1 is None:
         #compute eig1
         if self.K1 is not None:
             if self.K1rot is None:
                 self.K1rot = rotSymm(self.K1, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
                 self.K1rot = rotSymm(self.K1rot.T, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
             self.eig1 = LA.eigh(self.K1rot)
         elif self.G1 is not None:
             [N,k] = self.G1.shape
             if self.G1rot is None:
                 self.G1rot = rotSymm(self.G1, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
             
             try:
                 [U,S,V] = LA.svd(self.G1rot,full_matrices = False)
                 self.eig1 = [S*S,U]
             except LA.LinAlgError:  # revert to Eigenvalue decomposition
                 print "Got SVD exception, trying eigenvalue decomposition of square of G. Note that this is a little bit less accurate"
                 [S_,V_] = LA.eigh(self.G1rot.T.dot(self.G1rot))
                 S_nonz=(S_>0.0)
                 S1 = S_[S_nonz]
                 U1=self.G1rot.dot(V_[:,S_nonz]/SP.sqrt(S1))
                 self.eig1=[S1,U1]
     return self.eig1
示例#7
0
def _eval_cov_learner(X, train_ix, test_ix, model_prec, model_cov,
                      cov_learner, ips_flag=True):
    X_train = X[train_ix, ...]
    alpha_max_ = alpha_max(X_train)
    if model_prec is None and model_cov is None:
        X_test = X[test_ix, ...]
    elif model_cov is None:
        eigvals, eigvecs = linalg.eigh(model_prec)
        X_test = np.diag(1. / np.sqrt(eigvals)).dot(eigvecs.T)
    else:
        eigvals, eigvecs = linalg.eigh(model_prec)
        X_test = np.diag(np.sqrt(eigvals)).dot(eigvecs.T)
    cov_learner_ = clone(cov_learner)
    cov_learner_.__setattr__('alpha', cov_learner_.alpha * alpha_max_)
    if not ips_flag:
        score = cov_learner_.fit(X_train).score(X_test)
    elif cov_learner.score_norm != "ell0":
        # dual split variable contains exact zeros!
        aux_prec = cov_learner_.fit(X_train).auxiliary_prec_
        mask = np.abs(aux_prec) > machine_eps(0.)
        ips = IPS(support=mask, score_norm=cov_learner_.score_norm)
        score = ips.fit(X_train).score(X_test)
    else:
        raise ValueError('ell0 scoring in CV_loop and IPS are incompatible')

    # make scores maximal at optimum
    if cov_learner_.score_norm not in {'loglikelihood', None}:
        score *= -1.
    return score
示例#8
0
def test_hermitian():
    np.random.seed(1234)

    sizes = [3, 10, 50]
    ks = [1, 3, 10, 50]
    gens = [True, False]

    for size, k, gen in itertools.product(sizes, ks, gens):
        if k > size:
            continue

        H = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
        H = 10 * np.eye(size) + H + H.T.conj()

        X = np.random.rand(size, k)

        if not gen:
            B = np.eye(size)
            w, v = lobpcg(H, X, maxiter=5000)
            w0, v0 = eigh(H)
        else:
            B = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
            B = 10 * np.eye(size) + B.dot(B.T.conj())
            w, v = lobpcg(H, X, B, maxiter=5000)
            w0, v0 = eigh(H, B)

        for wx, vx in zip(w, v.T):
            # Check eigenvector
            assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx) / np.linalg.norm(H.dot(vx)),
                            0, atol=5e-4, rtol=0)

            # Compare eigenvalues
            j = np.argmin(abs(w0 - wx))
            assert_allclose(wx, w0[j], rtol=1e-4)
示例#9
0
    def set(self,gse):
        '''
        Set the Lambda matrix, Q matrix and QT matrix of the block.

        Parameters
        ----------
        gse : number
            The groundstate energy.
        '''
        sign=self.controllers['sign']
        if self.method=='S':
            lczs,Qs=self.controllers['lczs'],self.controllers['Qs']
            self.data['niters']=np.zeros(Qs.shape[0],dtype=np.int32)
            self.data['Lambdas']=np.zeros((Qs.shape[0],Qs.shape[2]),dtype=np.float64)
            self.data['Qs']=np.zeros(Qs.shape,dtype=Qs.dtype)
            self.data['QTs']=np.zeros((Qs.shape[0],Qs.shape[2]),dtype=Qs.dtype)
            for i,(lcz,Q) in enumerate(zip(lczs,Qs)):
                if lcz.niter>0:
                    E,V=sl.eigh(lcz.T,eigvals_only=False)
                    self.data['niters'][i]=lcz.niter
                    self.data['Lambdas'][i,0:lcz.niter]=sign*(E-gse)
                    self.data['Qs'][i,:,0:lcz.niter]=Q[:,0:lcz.niter].dot(V)
                    self.data['QTs'][i,0:lcz.niter]=lcz.P[0,0]*V[0,:].conjugate()
        else:
            lanczos=self.controllers['lanczos']
            E,V=sl.eigh(lanczos.T,eigvals_only=False)
            self.data['Lambda']=sign*(E-gse)
            self.data['Q']=lanczos.P[:min(lanczos.nv0,lanczos.niter),:].T.conjugate().dot(V[:min(lanczos.nv0,lanczos.niter),:])
            self.data['QT']=HM.dagger(self.data['Q'])
示例#10
0
def TBAEB(engine,app):
    nmatrix=len(engine.generators['h'].table)
    if app.path!=None:
        key=app.path.mesh.keys()[0]
        result=zeros((app.path.rank[key],nmatrix+1))
        if len(app.path.mesh[key].shape)==1:
            result[:,0]=app.path.mesh[key]
        else:
            result[:,0]=array(xrange(app.path.rank[key]))
        for i,parameter in enumerate(list(app.path.mesh[key])):
            result[i,1:]=eigh(engine.matrix(**{key:parameter}),eigvals_only=True)
    else:
        result=zeros((2,nmatrix+1))
        result[:,0]=array(xrange(2))
        result[0,1:]=eigh(engine.matrix(),eigvals_only=True)
        result[1,1:]=result[0,1:]
    if app.save_data:
        savetxt(engine.dout+'/'+engine.name.full+'_EB.dat',result)
    if app.plot:
        plt.title(engine.name.full+'_EB')
        plt.plot(result[:,0],result[:,1:])
        if app.show:
            plt.show()
        else:
            plt.savefig(engine.dout+'/'+engine.name.full+'_EB.png')
        plt.close()
示例#11
0
文件: proc.py 项目: EPFL-LQM/gpvmc
def geneigh(A,B,tol=1e-12):
    """
    Solves the generalized eigenvalue problem also in the case where A and B share a common
    null-space. The eigenvalues corresponding to the null-space are given a Nan value.
    The null-space is defined with the tolereance tol.
    """
    # first check if there is a null-space issue
    if lg.matrix_rank(B,tol)==np.shape(B)[0]:
        return eigh(A,B)
    # first diagonalize the overlap matrix B
    Be,Bv=eigh(B)
    # rewrite the A matrix in the B-matrix eigenspace
    At=np.dot(np.conj(Bv.T),np.dot(A,Bv))
    Bt=np.diag(Be)
    # detect shared null-space. that is given by the first n null eigenvalues of B
    try:
        idx=next(i for i,v in enumerate(Be) if v>tol)
    except StopIteration:
        raise(RuntimeError('geneigh: Rank of B < B.shape[0] but null-space could not be found!'))
    # check that the B matrix null-space is shared by A.
    m=np.amax(abs(At[0:idx,:].flatten()))
    if m>tol:
        warnings.warn('Maximum non-diagonal element in A written in B null-space is bigger than the tolerance \''+str(tol)+'\'.',UserWarning)
    # diagonalize the non-null-space part of the problem
    Et,Vt=eigh(At[idx:,idx:],Bt[idx:,idx:])
    # define Ut, the change of basis in the non-truncated space
    Ut=np.zeros(np.shape(A),A.dtype)
    Ut[0:idx,0:idx]=np.eye(idx)
    Ut[idx:,idx:]=Vt
    U=np.dot(Bv,Ut)
    E=np.concatenate((float('NaN')*np.ones(idx),Et))
    return E,U
示例#12
0
 def simulate(self,standardize=True):
     self._update_cache()
     RV = SP.zeros((self.N,self.P))
     # region
     Z = SP.randn(self.S,self.P)
     Sc,Uc = LA.eigh(self.Cr.K())
     Sc[Sc<1e-9] = 0
     USh_c = Uc*Sc[SP.newaxis,:]**0.5 
     RV += SP.dot(SP.dot(self.Xr,Z),USh_c.T)
     # background
     Z = SP.randn(self.N,self.P)
     USh_r = self.cache['Lr'].T*self.cache['Srstar'][SP.newaxis,:]**0.5
     Sc,Uc = LA.eigh(self.Cg.K())
     Sc[Sc<1e-9] = 0
     USh_c = Uc*Sc[SP.newaxis,:]**0.5
     RV += SP.dot(SP.dot(USh_r,Z),USh_c.T)
     # noise
     Z = SP.randn(self.N,self.P)
     Sc,Uc = LA.eigh(self.Cn.K())
     Sc[Sc<1e-9] = 0
     USh_c = Uc*Sc[SP.newaxis,:]**0.5 
     RV += SP.dot(Z,USh_c.T)
     # standardize
     if standardize:
         RV-=RV.mean(0)
         RV/=RV.std(0) 
     return RV
示例#13
0
文件: ica.py 项目: edamaraju/ica
def pca_whiten(x2d, n_comp, verbose=True):
    """ data Whitening
    *Input
    x2d : 2d data matrix of observations by variables
    n_comp: Number of components to retain
    *Output
    Xwhite : Whitened X
    white : whitening matrix (Xwhite = np.dot(white,X))
    dewhite : dewhitening matrix (X = np.dot(dewhite,Xwhite))
    """
    x2d_demean = x2d - x2d.mean(axis=1).reshape((-1, 1))
    samples, features = x2d_demean.shape
    M = min((samples, features))
    if samples > features:
        cov = dot(x2d_demean.T, x2d_demean) / (x2d.shape[0] - 1)
        w, v = eigh(cov, eigvals=(M-n_comp, M-1))
        D, Di = diagsqrts(w)
        u = dot(dot(x2d_demean,v),Di)
        x_white = v.T
        white = dot(Di, u.T)
        dewhite = dot(u, D)
    else:
        cov = dot(x2d_demean, x2d_demean.T) / (x2d.shape[1] - 1)
        w, u = eigh(cov, eigvals=(M-n_comp, M-1))
        D, Di = diagsqrts(w)        
        white = dot(Di, u.T)
        x_white = dot(white, x2d_demean)
        dewhite = dot(u, D)
    return (x_white, white, dewhite)
示例#14
0
文件: fit_utils.py 项目: PMBio/mtSet
def fitPairwiseModel(Y,XX=None,S_XX=None,U_XX=None,verbose=False):
    N,P = Y.shape
    """ initilizes parameters """
    RV = fitSingleTraitModel(Y,XX=XX,S_XX=S_XX,U_XX=U_XX,verbose=verbose)
    Cg = covariance.freeform(2)
    Cn = covariance.freeform(2)
    gp = gp2kronSum(mean(Y[:,0:2]),Cg,Cn,XX=XX,S_XX=S_XX,U_XX=U_XX)
    conv2 = SP.ones((P,P),dtype=bool)
    rho_g = SP.ones((P,P))
    rho_n = SP.ones((P,P))
    for p1 in range(P):
        for p2 in range(p1):
            if verbose:
                print '.. fitting correlation (%d,%d)'%(p1,p2)
            gp.setY(Y[:,[p1,p2]])
            Cg_params0 = SP.array([SP.sqrt(RV['varST'][p1,0]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,0])])
            Cn_params0 = SP.array([SP.sqrt(RV['varST'][p1,1]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,1])])
            params0 = {'Cg':Cg_params0,'Cn':Cn_params0}
            conv2[p1,p2],info = OPT.opt_hyper(gp,params0,factr=1e3)
            rho_g[p1,p2] = Cg.K()[0,1]/SP.sqrt(Cg.K().diagonal().prod())
            rho_n[p1,p2] = Cn.K()[0,1]/SP.sqrt(Cn.K().diagonal().prod())
            conv2[p2,p1] = conv2[p1,p2]; rho_g[p2,p1] = rho_g[p1,p2]; rho_n[p2,p1] = rho_n[p1,p2]
    RV['Cg0'] = rho_g*SP.dot(SP.sqrt(RV['varST'][:,0:1]),SP.sqrt(RV['varST'][:,0:1].T))
    RV['Cn0'] = rho_n*SP.dot(SP.sqrt(RV['varST'][:,1:2]),SP.sqrt(RV['varST'][:,1:2].T))
    RV['conv2'] = conv2
    #3. regularizes covariance matrices
    offset_g = abs(SP.minimum(LA.eigh(RV['Cg0'])[0].min(),0))+1e-4
    offset_n = abs(SP.minimum(LA.eigh(RV['Cn0'])[0].min(),0))+1e-4
    RV['Cg0_reg'] = RV['Cg0']+offset_g*SP.eye(P)
    RV['Cn0_reg'] = RV['Cn0']+offset_n*SP.eye(P)
    RV['params0_Cg']=LA.cholesky(RV['Cg0_reg'])[SP.tril_indices(P)]
    RV['params0_Cn']=LA.cholesky(RV['Cn0_reg'])[SP.tril_indices(P)]
    return RV
示例#15
0
文件: cca.py 项目: osdf/utils
def cca(X, Y, k, SMALL=1e-5):
    """Standard CCA.

    Views _X_ and _Y_ are per *row*.

    For an explanation of the algorithm, 
    see Section 6.4 and 6.5 in
    Kernel Methods for Pattern Analysis.
    """
    n, dx = X.shape
    C = np.cov(X.T, Y.T)
    Cxy = C[:dx, dx:]
    Cxx = C[:dx, :dx] + SMALL * np.eye(dx)
    Cyy = C[dx:, dx:] + SMALL * np.eye(Y.shape[1])

    # Do not use la.sqrtm.
    # This can be done by hand...
    xeval, xevec = la.eigh(Cxx)
    yeval, yevec = la.eigh(Cyy)

    # ... because the inverses are then simple
    isqrtx = np.dot(xevec, (xevec/np.sqrt(xeval)).T)
    isqrty = np.dot(yevec, (yevec/np.sqrt(yeval)).T)

    tmp = np.dot(isqrtx, Cxy)
    tmp = np.dot(tmp, isqrty)
    [U, S, V] = la.svd(tmp, full_matrices=False)

    ccX = np.dot(isqrtx, U[:,:k])
    ccY = np.dot(isqrty, V[:k].T)
    
    return ccX, ccY
示例#16
0
文件: csp.py 项目: rajul/mne-python
    def _fit(self, cov_a, cov_b):
        """Aux Function (modifies cov_a and cov_b in-place)."""
        cov_a /= np.trace(cov_a)
        cov_b /= np.trace(cov_b)
        # computes the eigen values
        lambda_, u = linalg.eigh(cov_a + cov_b)
        # sort them
        ind = np.argsort(lambda_)[::-1]
        lambda2_ = lambda_[ind]

        u = u[:, ind]
        p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)

        # Compute the generalized eigen value problem
        w_a = np.dot(np.dot(p, cov_a), p.T)
        w_b = np.dot(np.dot(p, cov_b), p.T)
        # and solve it
        vals, vecs = linalg.eigh(w_a, w_b)
        # sort vectors by discriminative power using eigen values
        ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
        vecs = vecs[:, ind]
        # and project
        w = np.dot(vecs.T, p)

        self.filters_ = w
        self.patterns_ = linalg.pinv(w).T
示例#17
0
 def __init__(self,matrix,error,overlap=None,overlap_err=None):
   if overlap_err is None:
     self.func = lambda mat:lin.eigh(mat,overlap)[0]
     self.resample = lambda: gaussian_matrix_resample(matrix,error)
   else:
     self.func = lambda mats:lin.eigh(mats[0],mats[1])[0]
     self.resample = lambda: (gaussian_matrix_resample(matrix,error),
                                gaussian_matrix_resample(overlap,overlap_err))
示例#18
0
 def time_sakurai(self, n, solver):
     m = 3
     if solver == 'lobpcg':
         X = rand(n, m)
         eigs, vecs, resnh = lobpcg(self.A, X, self.B, tol=1e-6, maxiter=500,
                                    retResidualNormsHistory=1)
     else:
         eigh(self.A_dense, self.B_dense, eigvals_only=True, eigvals=(0, m - 1))
示例#19
0
def eigh_gen(A, B):
    """Solve the generalised eigenvalue problem. :math:`\mathbf{A} \mathbf{v} =
    \lambda \mathbf{B} \mathbf{v}`
    
    This routine will attempt to correct for when `B` is not positive definite
    (usually due to numerical precision), by adding a constant diagonal to make
    all of its eigenvalues positive.
    
    Parameters
    ----------
    A, B : np.ndarray
        Matrices to operate on.
        
    Returns
    -------
    evals : np.ndarray
        Eigenvalues of the problem.
    evecs : np.ndarray
        2D array of eigenvectors (packed column by column).
    add_const : scalar
        The constant added on the diagonal to regularise.
    """
    add_const = 0.0

    if (A == 0).all():
        evals, evecs = np.zeros(A.shape[0], dtype=A.real.dtype), np.identity(A.shape[0], dtype=A.dtype)

    else:
    
        try:
            evals, evecs = la.eigh(A, B, overwrite_a=True, overwrite_b=True)
        except la.LinAlgError as e:
            print "Error occured in eigenvalue solve."
            # Get error number
            mo = re.search('order (\\d+)', e.message)

            # If exception unrecognised then re-raise.
            if mo is None:
                raise e

            errno = mo.group(1)

            if errno < (A.shape[0]+1):

                print "Matrix probably not positive definite due to numerical issues. \
                Trying to add a constant diagonal...."

                evb = la.eigvalsh(B)
                add_const = 1e-15 * evb[-1] - 2.0 * evb[0] + 1e-60

                B[np.diag_indices(B.shape[0])] += add_const
                evals, evecs = la.eigh(A, B, overwrite_a=True, overwrite_b=True)

            else:
                print "Strange convergence issue. Trying non divide and conquer routine."
                evals, evecs = la.eigh(A, B, overwrite_a=True, overwrite_b=True, turbo=False)

    return evals, evecs, add_const
示例#20
0
def diagonalize(correlator_pannel, t0, td, generalized=False):
    length = correlator_pannel.shape[0]
    n = int(np.sqrt(length))
    assert t0 is not None
    # Here we access the pannel major_xs gives time(n), mean incase it
    # was a multi correlator should have no effect on an already averaged one
    A = np.matrix(np.reshape(correlator_pannel.major_xs(td).mean().values, (n, n)))
    B = np.matrix(np.reshape(correlator_pannel.major_xs(t0).mean().values, (n, n)))
    # Require A and B to be hermition for our generalized eigen value
    # problem method to work. Here we force the matricies to be
    # hermtion. This is justified since it is just useing the other
    # measurement of the same value and averaging them.
    A = hermitionize(A)
    B = hermitionize(B)
    logging.debug("A = {} \n B = {}".format(A, B))

    if generalized:
        logging.info("running generalized eigensolver")
        evals, evecs = LA.eigh(A, b=B)  #gerenalized eig problem, eigh works only if hermitian
        evecs = np.matrix(evecs)
        V = evecs
    else:
        # Instead of using generalized eigen problem, we could solve a
        # regular eigen problem involving Binvqrt
        Binvsqrt =  LA.inv(LA.sqrtm(B))
        logging.info("condition number: {}".format(cond(Binvsqrt*A*Binvsqrt)))
        evals, evecs = LA.eigh(Binvsqrt*A*Binvsqrt)
        evecs = np.matrix(evecs)
        V = np.matrix(Binvsqrt)*evecs

    if min(evals) < 0.05:
        logging.warn("Warning, low eigenvalue detected. Eval={}".format(min(evals)))
    else:
        logging.info("lowest eigenvalue={}".format(min(evals)))
    logging.debug("eigen values are {}".format(evals))
    logging.debug("eigen vectors are {}".format(evecs))
    n = len(evecs)
    logging.debug("Matrix size {N}x{N}".format(N=n))

    def rotate(x):
        M = np.matrix(np.resize(x, (n, n)))
        M = hermitionize(M)
        D = V.H * M * V
        R = np.array(D).flatten()
        P = pd.Series(R)
        return P

    diag = correlator_pannel.apply(rotate, "items")
    diag.items = ["{}{}".format(i,j) for i in reversed(range(n)) for j in reversed(range(n))]

    # This method simultaniously diagaonlizes at t0 and td. Should be
    # identity at t0 and the eigenvalues at td
    assert compare_matrix(np.reshape(diag.major_xs(t0).mean().values, (n, n)),
                          np.identity(n)), "Rotation error: is not ~identity at t0"
    assert compare_matrix(np.reshape(diag.major_xs(td).mean().values, (n, n)),
                          np.diag(evals)), "Rotation error: Is not ~Lambda at td"

    return diag
示例#21
0
def generate_multiKron(C,R):
    S_c,U_c = LA.eigh(C+1E-6*SP.eye(C.shape[0]))
    S_r,U_r = LA.eigh(R+1E-6*SP.eye(R.shape[0]))
    US_c = SP.sqrt(S_c) * U_c
    US_r = SP.sqrt(S_r) * U_r
    # kron(US_c,US_r) vec(Y) = vec(US_r.T*Y*US_c)
    Y = SP.random.randn(R.shape[0],C.shape[0])
    Y = SP.dot(US_r,SP.dot(Y,US_c.T))
    return Y
示例#22
0
文件: Solver.py 项目: bebopsan/peyeQM
 def solve_spectral(self, simulation, equation):
     """
     This method solves for the eigen-values (lam) and eigen_vectors 
     ({u}) of an equation in the frequency domain of the form:
         [K]{u}=lam[M]{u}.
     """
     from numpy import zeros
     from scipy.linalg import eigh, eigvalsh
     n = simulation.domain.nodes.n 
     solver_param = simulation.solver_param
     g = equation['sol_vec']
     remove = equation['dir_positions']
     print 'Solving eigenvalue problem...\n'
     if 'y'in solver_param[0] and 'n' in solver_param[1]:
         n_vals = int(solver_param[2])
         v = eigvalsh(equation['left_side'], equation['right_side'], \
                                 eigvals = (0, n_vals-1))
 #                v = v/2
         print 'The Eigenvalues are:\n', v
         return v
 
     elif 'y'in solver_param[0] and 'y'in solver_param[1]:
         n_vals = int(solver_param[2])
         n_vects = int(solver_param[3])
         n_solutions = max(n_vals,n_vects)
         v, dir_solution = eigh(equation['left_side'], equation['right_side'], \
                                  eigvals = (0, n_solutions-1))
 #                v = v/2
         if equation['vectorial']:
             solution = []
             for i in range(n_vects):
                 solution.append(self.build_solution(dir_solution[:, i], g, remove, True))
         else:
             solution = zeros((n, n_vects))
             for i in range(n_vects):
                 solution[:,i] = self.build_solution(dir_solution[:, i], g, remove)
         
         return v, solution
 
     elif 'n'in solver_param[0] and 'y'in solver_param[1]:
         n_vects = int(solver_param[3])
         v, dir_solution = eigh(equation['left_side'], equation['right_side'], \
                                 eigvals = (0, n_vects-1))
         
         if equation['vectorial']:
             solution = zeros((n/2, n_vects))
             for i in range(n_vects):
                 solution[:,i] = self.build_solution(dir_solution[:, i], g, remove, True)
         else:
             solution = zeros((n, n_vects))
             for i in range(n_vects):
                 solution[:,i] = self.build_solution(dir_solution[:, i], g, remove)
         
         return solution
     else:
         raise IOError('If you dont want anything why do you solve?')
示例#23
0
文件: sparse.py 项目: mil52603/qutip
def _dense_eigs(data, isherm, vecs, N, eigvals, num_large, num_small):
    """
    Internal functions for computing eigenvalues and eigenstates for a dense
    matrix.
    """
    if debug:
        logger.debug(inspect.stack()[0][3] + ": vectors = " + str(vecs))

    evecs = None

    if vecs:
        if isherm:
            if eigvals == 0:
                evals, evecs = la.eigh(data)
            else:
                if num_small > 0:
                    evals, evecs = la.eigh(
                        data, eigvals=[0, num_small - 1])
                if num_large > 0:
                    evals, evecs = la.eigh(
                        data, eigvals=[N - num_large, N - 1])
        else:
            evals, evecs = la.eig(data)
    else:
        if isherm:
            if eigvals == 0:
                evals = la.eigvalsh(data)
            else:
                if num_small > 0:
                    evals = la.eigvalsh(data, eigvals=[0, num_small - 1])
                if num_large > 0:
                    evals = la.eigvalsh(data, eigvals=[N - num_large, N - 1])
        else:
            evals = la.eigvals(data)

    _zipped = list(zip(evals, range(len(evals))))
    _zipped.sort()
    evals, perm = list(zip(*_zipped))

    if vecs:
        evecs = np.array([evecs[:, k] for k in perm])

    if not isherm and eigvals > 0:
        if vecs:
            if num_small > 0:
                evals, evecs = evals[:num_small], evecs[:num_small]
            elif num_large > 0:
                evals, evecs = evals[(N - num_large):], evecs[(N - num_large):]
        else:
            if num_small > 0:
                evals = evals[:num_small]
            elif num_large > 0:
                evals = evals[(N - num_large):]

    return np.array(evals), np.array(evecs)
示例#24
0
文件: iTdvp.py 项目: xrincon/tdvp
def symmNormalization(MPS, chir, chic):
    omega, R = getLargestW(MPS, 'R')
    R = fixPhase(R)
    if np.isreal(R).all(): omega, R = omega.real, R.real
    print "wR", omega, np.isreal(R).all(), "R\n", R

    assym = np.linalg.norm(R - R.T.conj())
    print "assym R", assym

    Rvals, Rvecs = spla.eigh(R)
    Rvals_s = np.sqrt(abs(Rvals))
    Rvals_si = 1. / Rvals_s
    print "Rvals", Rvals

    Rs = np.dot(Rvecs, np.dot(np.diag(Rvals_s), Rvecs.T.conj()))
    ARs = np.tensordot(MPS, Rs, axes=([1,0]))
    Rsi = np.dot(Rvecs, np.dot(np.diag(Rvals_si), Rvecs.T.conj()))
    A1 = np.tensordot(Rsi, ARs, axes=([1,0]))
    A1 = np.transpose(A1, (0, 2, 1))

    omega, L = getLargestW(A1, 'L')
    L = fixPhase(L)
    if np.isreal(L).all(): omega, L = omega.real, L.real
    print "wL", omega, np.isreal(L).all(), "L\n", L

    assym = np.linalg.norm(L - L.T.conj())
    print "assym L", assym

    Lambda2, U = spla.eigh(L)
    A1U = np.tensordot(A1, U, axes=([1,0]))
    A2 = np.tensordot(U.T.conj(), A1U, axes=([1,0]))
    A2 = np.transpose(A2, (0, 2, 1))
    print "Lambda**2", Lambda2#, "\n", U

    Lambda = np.sqrt(abs(Lambda2))
    Lambdas = np.sqrt(Lambda)
    Lambdasi = 1. / Lambdas
    A2Lsi = np.tensordot(A2, np.diag(Lambdasi), axes=([1,0]))
    A3 = np.tensordot(np.diag(Lambdas), A2Lsi, axes=([1,0]))
    A3 = np.transpose(A3, (0, 2, 1))
    print "Lambda", Lambda

    nMPS = A3 / np.sqrt(omega)
    RealLambda = fixPhase(np.diag(Lambda))
    #print "nMPS", nMPS.shape, "\n", nMPS
    print "RealLambda", RealLambda.shape, "\n", RealLambda

    ######### CHECKING RESULT #########
    Trace = np.linalg.norm(RealLambda)
    ELambda = linearOpForR(nMPS, RealLambda).reshape(chir, chic)
    LambdaE = linearOpForL(nMPS, RealLambda).reshape(chir, chic)
    print "Trace(RealLambda)", Trace, "\nE|r)\n", ELambda, "\n(l|E\n", LambdaE

    return RealLambda, nMPS
示例#25
0
 def time_mikota(self, n, solver):
     m = 10
     if solver == 'lobpcg':
         X = rand(n, m)
         X = orth(X)
         LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
         M = LinearOperator(self.shape,
                            matvec=partial(_precond, LorU, lower),
                            matmat=partial(_precond, LorU, lower))
         eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
     else:
         eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m - 1))
def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7):
    """Return the norm of the covariant derivative at each iteration step of
    geometric_mean. See its docstring for details.

    Norm is intrinsic norm on the tangent space of the manifold of symmetric
    positive definite matrices.

    Returns
    -------
    grad_norm : list of float
        Norm of the covariant derivative in the tangent space at each step.
    """
    mats = np.array(mats)

    # Initialization
    if init is None:
        gmean = np.mean(mats, axis=0)
    else:
        gmean = init
    norm_old = np.inf
    step = 1.
    grad_norm = []
    for n in range(max_iter):
        # Computation of the gradient
        vals_gmean, vecs_gmean = linalg.eigh(gmean)
        gmean_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_gmean, vecs_gmean)
        whitened_mats = [gmean_inv_sqrt.dot(mat).dot(gmean_inv_sqrt)
                         for mat in mats]
        logs = [_map_eigenvalues(np.log, w_mat) for w_mat in whitened_mats]
        logs_mean = np.mean(logs, axis=0)  # Covariant derivative is
                                           # - gmean.dot(logms_mean)
        norm = np.linalg.norm(logs_mean)  # Norm of the covariant derivative on
                                          # the tangent space at point gmean

        # Update of the minimizer
        vals_log, vecs_log = linalg.eigh(logs_mean)
        gmean_sqrt = _form_symmetric(np.sqrt, vals_gmean, vecs_gmean)
        gmean = gmean_sqrt.dot(
            _form_symmetric(np.exp, vals_log * step, vecs_log)).dot(gmean_sqrt)

        # Update the norm and the step size
        if norm < norm_old:
            norm_old = norm
        if norm > norm_old:
            step = step / 2.
            norm = norm_old

        grad_norm.append(norm / gmean.size)
        if tol is not None and norm / gmean.size < tol:
            break

    return grad_norm
示例#27
0
    def train(self, X, ncomps=None, energy=None):
        """
        Solve the eigenproblem and stores the basis for future predictions.

        Parameters
        ----------
        X: ndarray or matrix
            Data matrix (nfeatures x nsamples).

        ncomps: int
            Number of components to retain (0 < ncomps <= nsamples).

        energy: float
            Energy level for which to retain components (0 < energy <= 1).
        """
        # k(x,y) = <x,y> + <x,y>^2 + ... + <x,y>^d
        K = np.matrix(np.polyval(np.ones(self._d+1), np.dot(X.T, X)) - 1)

        # center in the feature space
        m = X.shape[1]
        ones = 1./m * np.ones([m,m])
        K = K - (ones*K + K*ones) + ones*K*ones

        # enforce symmetry
        K = (K + K.T) / 2.

        # The kernel Gramian matrix is positive semidefinite,
        # all eigenvalues are nonnegative.
        if ncomps is None and energy is not None:
            # retain eigenvalues till reach specified energy level
            assert 0 < energy <= 1, "energy must be in range (0,1]"
            lambdas, V = eigh(K, overwrite_a=True)

            levels = np.cumsum(np.flipud(lambdas)) / sum(lambdas)
            ncomps = np.argmax(levels >= energy) + 1

            lambdas = lambdas[-ncomps:]
            V = V[:,-ncomps:]
        elif ncomps is not None and energy is None:
            # retain the desired number of components
            assert 0 < ncomps <= m, "ncomps must be in range (0,nsamples]"
            lambdas, V = eigh(K, overwrite_a=True, eigvals=(m-ncomps,m-1))
        else:
            raise TypeError("Exactly one of 'ncomps' and 'energy' arguments must be specified")

        # normalize
        self.eigbasis = np.dot(V, np.diag(1./np.sqrt(lambdas)))

        # stores a reference to the data
        self.X = X

        return self.eigbasis.shape[1]
示例#28
0
def align_PCA(vertices0, vertices1):
    """
     it accepts and returns lists of lists
     vertices0 can be either MH mesh
     or the concatenation of a few manually aligned scans
    """
    vertices0 = np.array(vertices0)
    vertices1 = np.array(vertices1)

    # Computes the barycenter
    mean0 = np.mean(vertices0,0)
    mean1 = np.mean(vertices1,0)

    # computes covariance matrix and principal axes
    cov0 = np.cov(vertices0.T)
    w0,u0 = eigh(cov0)

    cov1 = np.cov(vertices1.T)
    w1,u1 = eigh(cov1)

    # Computes rotation matrix to go from 1 to 0
    R = np.dot(u0,pinv(u1))

    #apply the transformation
    vertices = w0[-1]/w1[-1]*np.dot(vertices1-mean1,R.T)
    
    # axis can be arbitrarily oriented so we have to check if some rotations
    # around main axes are needed
    dmin = None
    amin = bmin = cmin = None
    
    for a in [0,np.pi]:
        if a != 0 : va = turn_around(vertices[::10],u0[:,0],a)
        else : va = vertices[::2]
        for b in [0,np.pi] :
            if b != 0 : vb = turn_around(va,u0[:,1],b)
            else : vb = va
            for c in [0,np.pi] :
                if c != 0 : vc = turn_around(vb,u0[:,2],c)
                else : vc = vb
                dist = compute_distance(vc+mean0,vertices0)
                if dmin is None or dist < dmin :
                    dmin = dist
                    amin = a
                    bmin = b
                    cmin = c

    if amin : vertices = turn_around(vertices,u0[:,0],amin)
    if bmin : vertices = turn_around(vertices,u0[:,1],bmin)
    if cmin : vertices = turn_around(vertices,u0[:,2],cmin)
        
    return vertices + mean0
示例#29
0
 def test2by2(self):
     a = 2e0
     S11 = 1e0
     S12 = (1 + a) * exp(-a)
     S = array([[S11, S12], [S12, S11]])
     H11 = -(0.5e0 + exp(-2 * a))
     H12 = -exp(-a) / 2e0 * (3 + a)
     H22 = -(0.5e0 + exp(-2 * a))
     H = array([[H11, H12], [H12, H22]])
     E, C = part_b(H, S)
     print E
     print C
     print eigh(H, S)
示例#30
0
def inner_product_embedding2(C, ndim):
    n = C.shape[0]
    eigvals = (n - ndim, n - 1)
    from scipy.linalg import eigh

    S, V = eigh(C, eigvals=eigvals)
    coords = V.T
    for i in range(ndim):
        coords[i, :] = coords[i, :] * np.sqrt(S[i])

    S, V = eigh(C)  # returns the eigenvalues reversed
    S = S[::-1]
    return coords, S
示例#31
0
def _initialize_metric_mahalanobis(input,
                                   init='identity',
                                   random_state=None,
                                   return_inverse=False,
                                   strict_pd=False,
                                   matrix_name='matrix'):
    """Returns a PSD matrix that can be used as a prior or an initialization
  for the Mahalanobis distance

  Parameters
  ----------
  input : array-like
    The input samples (can be tuples or regular samples).

  init : string or numpy array, optional (default='identity')
    Specification for the matrix to initialize. Possible options are
    'identity', 'covariance', 'random', and a numpy array of shape
    (n_features, n_features).

    'identity'
      An identity matrix of shape (n_features, n_features).

    'covariance'
      The (pseudo-)inverse covariance matrix (raises an error if the
      covariance matrix is not definite and `strict_pd == True`)

    'random'
      A random positive definite (PD) matrix of shape
      `(n_features, n_features)`, generated using
      `sklearn.datasets.make_spd_matrix`.

    numpy array
      A PSD matrix (or strictly PD if strict_pd==True) of
      shape (n_features, n_features), that will be used as such to
      initialize the metric, or set the prior.

  random_state : int or `numpy.RandomState` or None, optional (default=None)
    A pseudo random number generator object or a seed for it if int. If
    ``init='random'``, ``random_state`` is used to set the random Mahalanobis
    matrix. If ``init='pca'``, ``random_state`` is passed as an
    argument to PCA when initializing the matrix.

  return_inverse : bool, optional (default=False)
    Whether to return the inverse of the specified matrix. This
    can be sometimes useful. It will return the pseudo-inverse (which is the
    same as the inverse if the matrix is definite (i.e. invertible)). If
    `strict_pd == True` and the matrix is not definite, it will return an
    error.

  strict_pd : bool, optional (default=False)
    Whether to enforce that the provided matrix is definite (in addition to
    being PSD).

  param_name : str, optional (default='matrix')
    The name of the matrix used (example: 'init', 'prior'). Will be used in
    error messages.

  Returns
  -------
  M, or (M, M_inv) : `numpy.ndarray`
    The initial matrix to use M, and its inverse if `return_inverse=True`.
  """
    n_features = input.shape[-1]
    if isinstance(init, np.ndarray):
        # we copy the array, so that if we update the metric, we don't want to
        # update the init
        init = check_array(init, copy=True)

        # Assert that init.shape[1] = n_features
        if init.shape != (n_features, ) * 2:
            raise ValueError('The input dimensionality {} of the given '
                             'mahalanobis matrix `{}` must match the '
                             'dimensionality of the given inputs ({}).'.format(
                                 init.shape, matrix_name, n_features))

        # Assert that the matrix is symmetric
        if not np.allclose(init, init.T):
            raise ValueError("`{}` is not symmetric.".format(matrix_name))

    elif init not in ['identity', 'covariance', 'random']:
        raise ValueError(
            "`{}` must be 'identity', 'covariance', 'random' "
            "or a numpy array of shape (n_features, n_features).".format(
                matrix_name))

    random_state = check_random_state(random_state)
    M = init
    if isinstance(M, np.ndarray):
        w, V = eigh(M, check_finite=False)
        init_is_definite = _check_sdp_from_eigen(w)
        if strict_pd and not init_is_definite:
            raise LinAlgError(
                "You should provide a strictly positive definite "
                "matrix as `{}`. This one is not definite. Try another"
                " {}, or an algorithm that does not "
                "require the {} to be strictly positive definite.".format(
                    *((matrix_name, ) * 3)))
        elif return_inverse and not init_is_definite:
            warnings.warn('The initialization matrix is not invertible: '
                          'using the pseudo-inverse instead.')
        if return_inverse:
            M_inv = _pseudo_inverse_from_eig(w, V)
            return M, M_inv
        else:
            return M
    elif init == 'identity':
        M = np.eye(n_features, n_features)
        if return_inverse:
            M_inv = M.copy()
            return M, M_inv
        else:
            return M
    elif init == 'covariance':
        if input.ndim == 3:
            # if the input are tuples, we need to form an X by deduplication
            X = np.unique(np.vstack(input), axis=0)
        else:
            X = input
        # atleast2d is necessary to deal with scalar covariance matrices
        M_inv = np.atleast_2d(np.cov(X, rowvar=False))
        w, V = eigh(M_inv, check_finite=False)
        cov_is_definite = _check_sdp_from_eigen(w)
        if strict_pd and not cov_is_definite:
            raise LinAlgError(
                "Unable to get a true inverse of the covariance "
                "matrix since it is not definite. Try another "
                "`{}`, or an algorithm that does not "
                "require the `{}` to be strictly positive definite.".format(
                    *((matrix_name, ) * 2)))
        elif not cov_is_definite:
            warnings.warn(
                'The covariance matrix is not invertible: '
                'using the pseudo-inverse instead.'
                'To make the covariance matrix invertible'
                ' you can remove any linearly dependent features and/or '
                'reduce the dimensionality of your input, '
                'for instance using `sklearn.decomposition.PCA` as a '
                'preprocessing step.')
        M = _pseudo_inverse_from_eig(w, V)
        if return_inverse:
            return M, M_inv
        else:
            return M
    elif init == 'random':
        # we need to create a random symmetric matrix
        M = make_spd_matrix(n_features, random_state=random_state)
        if return_inverse:
            # we use pinvh even if we know the matrix is definite, just because
            # we need the returned matrix to be symmetric (and sometimes
            # np.linalg.inv returns not symmetric inverses of symmetric matrices)
            # TODO: there might be a more efficient method to do so
            M_inv = pinvh(M)
            return M, M_inv
        else:
            return M
示例#32
0
def get_response_content(fs):
    # get the tree
    tree = Newick.parse(fs.tree, Newick.NewickTree)
    tree.assert_valid()
    # get the lexicographically ordered tip names
    states = list(sorted(node.name for node in tree.gen_tips()))
    # start to prepare the reponse
    out = StringIO()
    # create the dictionary distance matrix
    dictionary_distance_matrix = {}
    for ta in tree.gen_tips():
        for tb in tree.gen_tips():
            key = (ta.name, tb.name)
            distance = get_distance(ta, tb)
            dictionary_distance_matrix[key] = distance
    # show the distance matrix
    print >> out, 'distance matrix:'
    for a in states:
        for b in states:
            print >> out, a, b, dictionary_distance_matrix[(a, b)]
    print >> out, ''
    # create the off diagonals of a rate matrix from the distance matrix
    unnormalized_rate_matrix = {}
    for key, distance in dictionary_distance_matrix.items():
        a, b = key
        if a == b:
            rate = 0
        else:
            rate = 1.0 / distance
        unnormalized_rate_matrix[key] = rate
    # create the diagonals of the rate matrix
    for a in states:
        row_rate = sum(unnormalized_rate_matrix[(a, b)] for b in states)
        unnormalized_rate_matrix[(a, a)] = -row_rate
    # show the unnormalized rate matrix
    print >> out, 'unnormalized rate matrix:'
    for a in states:
        for b in states:
            print >> out, a, b, unnormalized_rate_matrix[(a, b)]
    print >> out, ''
    """
    # normalize the off diagonals and put negative ones on the diagonal
    rate_matrix = dict(unnormalized_rate_matrix.items())
    for a in states:
        normalizing_factor = -1.0 / rate_matrix[(a, a)]
        for b in states:
            rate_matrix[(a, b)] *= normalizing_factor
    # show the rate matrix
    print >> out, 'normalized rate matrix:'
    for a in states:
        for b in states:
            print >> out, a, b, rate_matrix[(a, b)]
    print >> out, ''
    """
    # create the numpy rate matrix
    row_major = []
    for a in states:
        row = [unnormalized_rate_matrix[(a, b)] for b in states]
        row_major.append(row)
    L = np.array(row_major)
    D = np.diag(
        [math.sqrt(-1 / unnormalized_rate_matrix[(a, a)]) for a in states])
    np_matrix = np.dot(D, np.dot(L, D))
    print >> out, 'symmetrically normalized matrix:'
    print >> out, np_matrix
    print >> out, ''
    # show the eigendecomposition of the matrix
    w, vl, vr = linalg.eig(np_matrix, left=True, right=True)
    print >> out, 'eigenvalues:'
    print >> out, w
    print >> out, 'left eigenvectors:'
    print >> out, vl
    print >> out, 'right eigenvectors:'
    print >> out, vr
    # get the eigenvalues sorted by absolute value
    ordered_eigenvalue_info = list(sorted(
        (abs(x), i) for i, x in enumerate(w)))
    # get the index of the eigenvector with the second smallest absolute value
    fiedler_eigenvalue, fiedler_eigenvalue_index = ordered_eigenvalue_info[1]
    fiedler_vector = vl.T[fiedler_eigenvalue_index]
    print >> out, 'second smallest absolute value of any eigenvalue:'
    print >> out, fiedler_eigenvalue
    print >> out, 'corresponding fiedler vector:'
    print >> out, fiedler_vector
    print >> out, 'corresponding partition:'
    print >> out, [
        name for name, value in zip(states, fiedler_vector) if value < 0
    ]
    print >> out, [
        name for name, value in zip(states, fiedler_vector) if value >= 0
    ]
    # get the laplacian according to the method of balaji and bapat
    # create the numpy distance matrix
    row_major = []
    for a in states:
        row = [dictionary_distance_matrix[(a, b)] for b in states]
        row_major.append(row)
    np_distance_matrix = np.array(row_major)
    # get the inverse of the numpy distance matrix,
    # using the notation of balaji and bapat
    E_inv = linalg.inv(np_distance_matrix)
    ones = np.ones((len(states), len(states)))
    numerator = np.dot(E_inv, np.dot(ones, E_inv))
    denominator = sum(sum(E_inv))
    R = E_inv - numerator / denominator
    print >> out, 'R:'
    print >> out, R
    print >> out, 'sum(R):'
    print >> out, sum(R)
    print >> out, 'eigenvalues of R:'
    w, v = linalg.eig(R)
    print >> out, w
    # get the eigenvalues sorted by absolute value
    ordered_eigenvalue_info = list(sorted(
        (abs(x), i) for i, x in enumerate(w)))
    # get the index of the eigenvector with the second smallest absolute value
    fiedler_eigenvalue, fiedler_eigenvalue_index = ordered_eigenvalue_info[1]
    fiedler_vector = v.T[fiedler_eigenvalue_index]
    print >> out, 'second smallest absolute value of any eigenvalue:'
    print >> out, fiedler_eigenvalue
    print >> out, 'corresponding fiedler vector:'
    print >> out, fiedler_vector
    print >> out, 'corresponding partition:'
    print >> out, [
        name for name, value in zip(states, fiedler_vector) if value < 0
    ]
    print >> out, [
        name for name, value in zip(states, fiedler_vector) if value >= 0
    ]
    # try an alternative formulation for R
    n = len(states)
    P = np.eye(n) - np.ones((n, n)) / n
    R_inv = np.dot(P, np.dot(np_distance_matrix, P))
    print >> out, 'alternative formulation of R inverse:'
    print >> out, R_inv
    w, v = linalg.eigh(R_inv)
    ordered_eigenvalue_info = list(sorted(
        (abs(x), i) for i, x in enumerate(w)))
    fiedler_eigenvalue, fiedler_eigenvalue_index = ordered_eigenvalue_info[1]
    fiedler_vector = v.T[fiedler_eigenvalue_index]
    print >> out, 'eigensystem:'
    print >> out, w
    print >> out, v
    print >> out, 'corresponding fiedler vector:'
    print >> out, fiedler_vector
    print >> out, 'alternative formulation of R:'
    R = linalg.pinv(R_inv)
    print >> out, R
    w, v = linalg.eigh(R)
    ordered_eigenvalue_info = list(sorted(
        (abs(x), i) for i, x in enumerate(w)))
    fiedler_eigenvalue, fiedler_eigenvalue_index = ordered_eigenvalue_info[1]
    fiedler_vector = v.T[fiedler_eigenvalue_index]
    print >> out, 'eigensystem:'
    print >> out, w
    print >> out, v
    print >> out, 'corresponding fiedler vector:'
    print >> out, fiedler_vector
    # return the response
    return out.getvalue()
示例#33
0
def plot_bic(n_components_range):
    # Number of samples per component
    n_samples = 500

    # Generate random sample, two components
    np.random.seed(56)
    X, y = create_dataset()

    lowest_bic = np.infty
    bic = []
    cv_types = ['spherical', 'tied', 'diag', 'full']
    for cv_type in cv_types:
        for n_components in n_components_range:
            # Fit a Gaussian mixture with EM
            gmm = mixture.GaussianMixture(n_components=n_components,
                                          covariance_type=cv_type)
            gmm.fit(X)
            bic.append(gmm.bic(X))
            if bic[-1] < lowest_bic:
                lowest_bic = bic[-1]
                best_gmm = gmm

    bic = np.array(bic)
    color_iter = itertools.cycle(
        ['navy', 'turquoise', 'cornflowerblue', 'darkorange'])
    clf = best_gmm
    bars = []

    # Plot the BIC scores
    plt.figure(figsize=(8, 6))
    spl = plt.subplot(2, 1, 1)
    for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
        xpos = np.array(n_components_range) + .2 * (i - 2)
        bars.append(
            plt.bar(xpos,
                    bic[i * len(n_components_range):(i + 1) *
                        len(n_components_range)],
                    width=.2,
                    color=color))
    plt.xticks(n_components_range)
    plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
    plt.title('BIC score per model')
    xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
        .2 * np.floor(bic.argmin() / len(n_components_range))
    plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
    spl.set_xlabel('Number of components')
    spl.legend([b[0] for b in bars], cv_types)

    # Plot the winner
    splot = plt.subplot(2, 1, 2)
    Y_ = clf.predict(X)
    for i, (mean, cov,
            color) in enumerate(zip(clf.means_, clf.covariances_, color_iter)):
        v, w = linalg.eigh(cov)
        if not np.any(Y_ == i):
            continue
        plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)

        # Plot an ellipse to show the Gaussian component
        angle = np.arctan2(w[0][1], w[0][0])
        angle = 180. * angle / np.pi  # convert to degrees
        v = 2. * np.sqrt(2.) * np.sqrt(v)
        ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
        ell.set_clip_box(splot.bbox)
        ell.set_alpha(.5)
        splot.add_artist(ell)

    plt.xticks(())
    plt.yticks(())
    plt.title('Selected GMM: full model, 2 components')
    plt.subplots_adjust(hspace=.35, bottom=.02)
    plt.show()
示例#34
0
def make_lcmv(info,
              forward,
              data_cov,
              reg=0.05,
              noise_cov=None,
              label=None,
              pick_ori=None,
              rank=None,
              weight_norm='unit-noise-gain',
              reduce_rank=False,
              verbose=None):
    """Compute LCMV spatial filter.

    Parameters
    ----------
    info : dict
        The measurement info to specify the channels to include.
        Bad channels in info['bads'] are not used.
    forward : dict
        Forward operator.
    data_cov : Covariance
        The data covariance.
    reg : float
        The regularization for the whitened data covariance.
    noise_cov : Covariance
        The noise covariance. If provided, whitening will be done. Providing a
        noise covariance is mandatory if you mix sensor types, e.g.
        gradiometers with magnetometers or EEG with MEG.
    label : Label
        Restricts the LCMV solution to a given label.
    pick_ori : None | 'normal' | 'max-power' | 'vector'
        For forward solutions with fixed orientation, None (default) must be
        used and a scalar beamformer is computed. For free-orientation forward
        solutions, a vector beamformer is computed and:

            None
                Pools the orientations by taking the norm.
            'normal'
                Keeps only the radial component.
            'max-power'
                Selects orientations that maximize output source power at
                each location.
            'vector'
                Keeps the currents for each direction separate

    rank : None | int | dict
        Specified rank of the noise covariance matrix. If None, the rank is
        detected automatically. If int, the rank is specified for the MEG
        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
        to specify the rank for each modality.
    weight_norm : 'unit-noise-gain' | 'nai' | None
        If 'unit-noise-gain', the unit-noise gain minimum variance beamformer
        will be computed (Borgiotti-Kaplan beamformer) [2]_,
        if 'nai', the Neural Activity Index [1]_ will be computed,
        if None, the unit-gain LCMV beamformer [2]_ will be computed.
    reduce_rank : bool
        If True, the rank of the leadfield will be reduced by 1 for each
        spatial location. Setting reduce_rank to True is typically necessary
        if you use a single sphere model for MEG.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see :func:`mne.verbose`
        and :ref:`Logging documentation <tut_logging>` for more).

    Returns
    -------
    filters : dict
        Dictionary containing filter weights from LCMV beamformer.
        Contains the following keys:

            'weights' : {array}
                The filter weights of the beamformer.
            'data_cov' : {instance of Covariance}
                The data covariance matrix used to compute the beamformer.
            'noise_cov' : {instance of Covariance | None}
                The noise covariance matrix used to compute the beamformer.
            'whitener' : {None | array}
                Whitening matrix, provided if whitening was applied to the
                covariance matrix and leadfield during computation of the
                beamformer weights.
            'weight_norm' : {'unit-noise-gain'| 'nai' | None}
                Type of weight normalization used to compute the filter
                weights.
            'pick_ori' : {None | 'normal'}
                Orientation selection used in filter computation.
            'ch_names' : {list}
                Channels used to compute the beamformer.
            'proj' : {array}
                Projections used to compute the beamformer.
            'is_ssp' : {bool}
                If True, projections were applied prior to filter computation.
            'vertices' : {list}
                Vertices for which the filter weights were computed.
            'is_free_ori' : {bool}
                If True, the filter was computed with free source orientation.
            'src' : {instance of SourceSpaces}
                Source space information.

    Notes
    -----
    The original reference is [1]_.

    References
    ----------
    .. [1] Van Veen et al. Localization of brain electrical activity via
           linearly constrained minimum variance spatial filtering.
           Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
    .. [2] Sekihara & Nagarajan. Adaptive spatial filters for electromagnetic
           brain imaging (2008) Springer Science & Business Media
    """
    picks = _setup_picks(info, forward, data_cov, noise_cov)

    is_free_ori, ch_names, proj, vertno, G = \
        _prepare_beamformer_input(info, forward, label, picks, pick_ori)

    data_cov = pick_channels_cov(data_cov, include=ch_names)
    Cm = data_cov['data']

    # check number of sensor types present in the data
    _check_one_ch_type(info, picks, noise_cov)

    # apply SSPs
    is_ssp = False
    if info['projs']:
        Cm = np.dot(proj, np.dot(Cm, proj.T))
        is_ssp = True

    if noise_cov is not None:
        # Handle whitening + data covariance
        whitener, _ = compute_whitener(noise_cov, info, picks, rank=rank)
        # whiten the leadfield
        G = np.dot(whitener, G)
        # whiten  data covariance
        Cm = np.dot(whitener, np.dot(Cm, whitener.T))
    else:
        whitener = None

    # Tikhonov regularization using reg parameter d to control for
    # trade-off between spatial resolution and noise sensitivity
    Cm_inv, d = _reg_pinv(Cm.copy(), reg)

    if weight_norm is not None:
        # estimate noise level based on covariance matrix, taking the
        # smallest eigenvalue that is not zero
        noise, _ = linalg.eigh(Cm)
        if rank is not None:
            rank_Cm = rank
        else:
            rank_Cm = estimate_rank(Cm,
                                    tol='auto',
                                    norm=False,
                                    return_singular=False)
        noise = noise[len(noise) - rank_Cm]

        # use either noise floor or regularization parameter d
        noise = max(noise, d)

        # Compute square of Cm_inv used for weight normalization
        Cm_inv_sq = np.dot(Cm_inv, Cm_inv)

    del Cm

    # leadfield rank and optional rank reduction
    if reduce_rank:
        if not pick_ori == 'max-power':
            raise NotImplementedError('The computation of spatial filters '
                                      'with rank reduction using reduce_rank '
                                      'parameter is only implemented with '
                                      'pick_ori=="max-power".')
        if not isinstance(reduce_rank, bool):
            raise ValueError('reduce_rank has to be True or False '
                             ' (got %s).' % reduce_rank)

    # Compute spatial filters
    W = np.dot(G.T, Cm_inv)
    n_orient = 3 if is_free_ori else 1
    n_sources = G.shape[1] // n_orient
    for k in range(n_sources):
        Wk = W[n_orient * k:n_orient * k + n_orient]
        Gk = G[:, n_orient * k:n_orient * k + n_orient]
        if np.all(Gk == 0.):
            continue
        Ck = np.dot(Wk, Gk)

        # XXX This should be de-duplicated with DICS

        # Compute scalar beamformer by finding the source orientation which
        # maximizes output source power
        if pick_ori == 'max-power':
            # weight normalization and orientation selection:
            if weight_norm is not None and pick_ori == 'max-power':
                # finding optimal orientation for NAI and unit-noise-gain
                # based on [2]_, Eq. 4.47
                tmp = np.dot(Gk.T, np.dot(Cm_inv_sq, Gk))

                if reduce_rank:
                    # use pseudo inverse computation setting smallest component
                    # to zero if the leadfield is not full rank
                    tmp_inv = _eig_inv(tmp, tmp.shape[0] - 1)
                else:
                    # use straight inverse with full rank leadfield
                    try:
                        tmp_inv = linalg.inv(tmp)
                    except np.linalg.linalg.LinAlgError:
                        raise ValueError('Singular matrix detected when '
                                         'estimating LCMV filters. Consider '
                                         'reducing the rank of the leadfield '
                                         'by using reduce_rank=True.')

                eig_vals, eig_vecs = linalg.eig(np.dot(tmp_inv, np.dot(Wk,
                                                                       Gk)))

                if np.iscomplex(eig_vecs).any():
                    raise ValueError('The eigenspectrum of the leadfield at '
                                     'this voxel is complex. Consider '
                                     'reducing the rank of the leadfield by '
                                     'using reduce_rank=True.')

                idx_max = eig_vals.argmax()
                max_ori = eig_vecs[:, idx_max]
                Wk[:] = np.dot(max_ori, Wk)
                Gk = np.dot(Gk, max_ori)

                # compute spatial filter for NAI or unit-noise-gain
                tmp = np.dot(Gk.T, np.dot(Cm_inv_sq, Gk))
                denom = np.sqrt(tmp)
                Wk /= denom
                if weight_norm == 'nai':
                    Wk /= np.sqrt(noise)

                is_free_ori = False

            # no weight-normalization and max-power is not implemented yet:
            else:
                raise NotImplementedError('The max-power orientation '
                                          'selection is not yet implemented '
                                          'with weight_norm set to None.')

        else:  # do vector beamformer
            # compute the filters:
            if is_free_ori:
                # Free source orientation
                Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
            else:
                # Fixed source orientation
                Wk /= Ck

            # handle noise normalization with free/normal source orientation:
            if weight_norm == 'nai':
                raise NotImplementedError('Weight normalization with neural '
                                          'activity index is not implemented '
                                          'yet with free or fixed '
                                          'orientation.')

            if weight_norm == 'unit-noise-gain':
                noise_norm = np.sum(Wk**2, axis=1)
                if is_free_ori:
                    noise_norm = np.sum(noise_norm)
                noise_norm = np.sqrt(noise_norm)
                if noise_norm == 0.:
                    noise_norm_inv = 0  # avoid division by 0
                else:
                    noise_norm_inv = 1. / noise_norm
                Wk[:] *= noise_norm_inv

    # Pick source orientation maximizing output source power
    if pick_ori == 'max-power':
        W = W[0::3]
    elif pick_ori == 'normal':
        W = W[2::3]
        is_free_ori = False

    filters = dict(weights=W,
                   data_cov=data_cov,
                   noise_cov=noise_cov,
                   whitener=whitener,
                   weight_norm=weight_norm,
                   pick_ori=pick_ori,
                   ch_names=ch_names,
                   proj=proj,
                   is_ssp=is_ssp,
                   vertices=vertno,
                   is_free_ori=is_free_ori,
                   nsource=forward['nsource'],
                   src=deepcopy(forward['src']),
                   source_nn=forward['source_nn'].copy())

    return filters
示例#35
0
# plt.xticks(n_components_range)
# plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
# plt.title('BIC score per model')
# xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
#     .2 * np.floor(bic.argmin() / len(n_components_range))
# plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
# spl.set_xlabel('Number of components')
# spl.legend([b[0] for b in bars], cv_types)

# Plot the winner
splot = plt.subplot()
Y_ = clf.predict(X)

for i, (mean, color) in enumerate(zip(clf.means_, color_iter)):
    #v, w = linalg.eigh(covar)
    v, w = linalg.eigh(clf._get_covars()[i][:2, :2])
    u = w[0] / np.linalg.norm(w[0])
    angle = np.arctan2(u[1], u[0])
    angle = 180 * angle / np.pi  # convert to degrees
    ell = mpl.patches.Ellipse(clf.means_[i, :2],
                              v[0],
                              v[1],
                              180 + angle,
                              color=color)
    if not np.any(Y_ == i):
        continue
    plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 10, color=color)

    # Plot an ellipse to show the Gaussian component
    # angle = np.arctan2(w[0][1], w[0][0])
    # angle = 180 * angle / np.pi  # convert to degrees
示例#36
0
def lobpcg(
    A,
    X,
    B=None,
    M=None,
    Y=None,
    tol=None,
    maxiter=None,
    largest=True,
    verbosityLevel=0,
    retLambdaHistory=False,
    retResidualNormsHistory=False,
):
    """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)

    LOBPCG is a preconditioned eigensolver for large symmetric positive
    definite (SPD) generalized eigenproblems.

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The symmetric linear operator of the problem, usually a
        sparse matrix.  Often called the "stiffness matrix".
    X : ndarray, float32 or float64
        Initial approximation to the ``k`` eigenvectors (non-sparse). If `A`
        has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``.
    B : {dense matrix, sparse matrix, LinearOperator}, optional
        The right hand side operator in a generalized eigenproblem.
        By default, ``B = Identity``.  Often called the "mass matrix".
    M : {dense matrix, sparse matrix, LinearOperator}, optional
        Preconditioner to `A`; by default ``M = Identity``.
        `M` should approximate the inverse of `A`.
    Y : ndarray, float32 or float64, optional
        n-by-sizeY matrix of constraints (non-sparse), sizeY < n
        The iterations will be performed in the B-orthogonal complement
        of the column-space of Y. Y must be full rank.
    tol : scalar, optional
        Solver tolerance (stopping criterion).
        The default is ``tol=n*sqrt(eps)``.
    maxiter : int, optional
        Maximum number of iterations.  The default is ``maxiter = 20``.
    largest : bool, optional
        When True, solve for the largest eigenvalues, otherwise the smallest.
    verbosityLevel : int, optional
        Controls solver output.  The default is ``verbosityLevel=0``.
    retLambdaHistory : bool, optional
        Whether to return eigenvalue history.  Default is False.
    retResidualNormsHistory : bool, optional
        Whether to return history of residual norms.  Default is False.

    Returns
    -------
    w : ndarray
        Array of ``k`` eigenvalues
    v : ndarray
        An array of ``k`` eigenvectors.  `v` has the same shape as `X`.
    lambdas : list of ndarray, optional
        The eigenvalue history, if `retLambdaHistory` is True.
    rnorms : list of ndarray, optional
        The history of residual norms, if `retResidualNormsHistory` is True.

    Notes
    -----
    If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True,
    the return tuple has the following format
    ``(lambda, V, lambda history, residual norms history)``.

    In the following ``n`` denotes the matrix size and ``m`` the number
    of required eigenvalues (smallest or largest).

    The LOBPCG code internally solves eigenproblems of the size ``3m`` on every
    iteration by calling the "standard" dense eigensolver, so if ``m`` is not
    small enough compared to ``n``, it does not make sense to call the LOBPCG
    code, but rather one should use the "standard" eigensolver, e.g. numpy or
    scipy function in this case.
    If one calls the LOBPCG algorithm for ``5m > n``, it will most likely break
    internally, so the code tries to call the standard function instead.

    It is not that ``n`` should be large for the LOBPCG to work, but rather the
    ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
    and ``n=10``, it works though ``n`` is small. The method is intended
    for extremely large ``n / m`` [4]_.

    The convergence speed depends basically on two factors:

    1. How well relatively separated the seeking eigenvalues are from the rest
       of the eigenvalues. One can try to vary ``m`` to make this better.

    2. How well conditioned the problem is. This can be changed by using proper
       preconditioning. For example, a rod vibration test problem (under tests
       directory) is ill-conditioned for large ``n``, so convergence will be
       slow, unless efficient preconditioning is used. For this specific
       problem, a good simple preconditioner function would be a linear solve
       for `A`, which is easy to code since A is tridiagonal.

    References
    ----------
    .. [1] A. V. Knyazev (2001),
           Toward the Optimal Preconditioned Eigensolver: Locally Optimal
           Block Preconditioned Conjugate Gradient Method.
           SIAM Journal on Scientific Computing 23, no. 2,
           pp. 517-541. :doi:`10.1137/S1064827500366124`

    .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
           (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
           (BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`

    .. [3] A. V. Knyazev's C and MATLAB implementations:
           https://bitbucket.org/joseroman/blopex

    .. [4] S. Yamada, T. Imamura, T. Kano, and M. Machida (2006),
           High-performance computing for exact numerical approaches to
           quantum many-body problems on the earth simulator. In Proceedings
           of the 2006 ACM/IEEE Conference on Supercomputing.
           :doi:`10.1145/1188455.1188504`

    Examples
    --------

    Solve ``A x = lambda x`` with constraints and preconditioning.

    >>> import numpy as np
    >>> from scipy.sparse import spdiags, issparse
    >>> from scipy.sparse.linalg import lobpcg, LinearOperator
    >>> n = 100
    >>> vals = np.arange(1, n + 1)
    >>> A = spdiags(vals, 0, n, n)
    >>> A.toarray()
    array([[  1.,   0.,   0., ...,   0.,   0.,   0.],
           [  0.,   2.,   0., ...,   0.,   0.,   0.],
           [  0.,   0.,   3., ...,   0.,   0.,   0.],
           ...,
           [  0.,   0.,   0., ...,  98.,   0.,   0.],
           [  0.,   0.,   0., ...,   0.,  99.,   0.],
           [  0.,   0.,   0., ...,   0.,   0., 100.]])

    Constraints:

    >>> Y = np.eye(n, 3)

    Initial guess for eigenvectors, should have linearly independent
    columns. Column dimension = number of requested eigenvalues.

    >>> rng = np.random.default_rng()
    >>> X = rng.random((n, 3))

    Preconditioner in the inverse of A in this example:

    >>> invA = spdiags([1./vals], 0, n, n)

    The preconditiner must be defined by a function:

    >>> def precond( x ):
    ...     return invA @ x

    The argument x of the preconditioner function is a matrix inside `lobpcg`,
    thus the use of matrix-matrix product ``@``.

    The preconditioner function is passed to lobpcg as a `LinearOperator`:

    >>> M = LinearOperator(matvec=precond, matmat=precond,
    ...                    shape=(n, n), dtype=float)

    Let us now solve the eigenvalue problem for the matrix A:

    >>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False)
    >>> eigenvalues
    array([4., 5., 6.])

    Note that the vectors passed in Y are the eigenvectors of the 3 smallest
    eigenvalues. The results returned are orthogonal to those.

    """
    blockVectorX = X
    blockVectorY = Y
    residualTolerance = tol
    if maxiter is None:
        maxiter = 20

    if blockVectorY is not None:
        sizeY = blockVectorY.shape[1]
    else:
        sizeY = 0

    # Block size.
    if len(blockVectorX.shape) != 2:
        raise ValueError("expected rank-2 array for argument X")

    n, sizeX = blockVectorX.shape

    if verbosityLevel:
        aux = "Solving "
        if B is None:
            aux += "standard"
        else:
            aux += "generalized"
        aux += " eigenvalue problem with"
        if M is None:
            aux += "out"
        aux += " preconditioning\n\n"
        aux += "matrix size %d\n" % n
        aux += "block size %d\n\n" % sizeX
        if blockVectorY is None:
            aux += "No constraints\n\n"
        else:
            if sizeY > 1:
                aux += "%d constraints\n\n" % sizeY
            else:
                aux += "%d constraint\n\n" % sizeY
        print(aux)

    A = _makeOperator(A, (n, n))
    B = _makeOperator(B, (n, n))
    M = _makeOperator(M, (n, n))

    if (n - sizeY) < (5 * sizeX):
        # warn('The problem size is small compared to the block size.' \
        #        ' Using dense eigensolver instead of LOBPCG.')

        sizeX = min(sizeX, n)

        if blockVectorY is not None:
            raise NotImplementedError("The dense eigensolver "
                                      "does not support constraints.")

        # Define the closed range of indices of eigenvalues to return.
        if largest:
            eigvals = (n - sizeX, n - 1)
        else:
            eigvals = (0, sizeX - 1)

        A_dense = A(np.eye(n, dtype=A.dtype))
        B_dense = None if B is None else B(np.eye(n, dtype=B.dtype))

        vals, vecs = eigh(A_dense,
                          B_dense,
                          eigvals=eigvals,
                          check_finite=False)
        if largest:
            # Reverse order to be compatible with eigs() in 'LM' mode.
            vals = vals[::-1]
            vecs = vecs[:, ::-1]

        return vals, vecs

    if (residualTolerance is None) or (residualTolerance <= 0.0):
        residualTolerance = np.sqrt(1e-15) * n

    # Apply constraints to X.
    if blockVectorY is not None:

        if B is not None:
            blockVectorBY = B(blockVectorY)
        else:
            blockVectorBY = blockVectorY

        # gramYBY is a dense array.
        gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
        try:
            # gramYBY is a Cholesky factor from now on...
            gramYBY = cho_factor(gramYBY)
        except LinAlgError as e:
            raise ValueError("linearly dependent constraints") from e

        _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)

    ##
    # B-orthonormalize X.
    blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)

    ##
    # Compute the initial Ritz vectors: solve the eigenproblem.
    blockVectorAX = A(blockVectorX)
    gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)

    _lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
    ii = _get_indx(_lambda, sizeX, largest)
    _lambda = _lambda[ii]

    eigBlockVector = np.asarray(eigBlockVector[:, ii])
    blockVectorX = np.dot(blockVectorX, eigBlockVector)
    blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
    if B is not None:
        blockVectorBX = np.dot(blockVectorBX, eigBlockVector)

    ##
    # Active index set.
    activeMask = np.ones((sizeX, ), dtype=bool)

    lambdaHistory = [_lambda]
    residualNormsHistory = []

    previousBlockSize = sizeX
    ident = np.eye(sizeX, dtype=A.dtype)
    ident0 = np.eye(sizeX, dtype=A.dtype)

    ##
    # Main iteration loop.

    blockVectorP = None  # set during iteration
    blockVectorAP = None
    blockVectorBP = None

    iterationNumber = -1
    restart = True
    explicitGramFlag = False
    while iterationNumber < maxiter:
        iterationNumber += 1
        if verbosityLevel > 0:
            print("iteration %d" % iterationNumber)

        if B is not None:
            aux = blockVectorBX * _lambda[np.newaxis, :]
        else:
            aux = blockVectorX * _lambda[np.newaxis, :]

        blockVectorR = blockVectorAX - aux

        aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
        residualNorms = np.sqrt(aux)

        residualNormsHistory.append(residualNorms)

        ii = np.where(residualNorms > residualTolerance, True, False)
        activeMask = activeMask & ii
        if verbosityLevel > 2:
            print(activeMask)

        currentBlockSize = activeMask.sum()
        if currentBlockSize != previousBlockSize:
            previousBlockSize = currentBlockSize
            ident = np.eye(currentBlockSize, dtype=A.dtype)

        if currentBlockSize == 0:
            break

        if verbosityLevel > 0:
            print("current block size:", currentBlockSize)
            print("eigenvalue:", _lambda)
            print("residual norms:", residualNorms)
        if verbosityLevel > 10:
            print(eigBlockVector)

        activeBlockVectorR = _as2d(blockVectorR[:, activeMask])

        if iterationNumber > 0:
            activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
            activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
            if B is not None:
                activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])

        if M is not None:
            # Apply preconditioner T to the active residuals.
            activeBlockVectorR = M(activeBlockVectorR)

        ##
        # Apply constraints to the preconditioned residuals.
        if blockVectorY is not None:
            _applyConstraints(activeBlockVectorR, gramYBY, blockVectorBY,
                              blockVectorY)

        ##
        # B-orthogonalize the preconditioned residuals to X.
        if B is not None:
            activeBlockVectorR = activeBlockVectorR - (
                blockVectorX @ (blockVectorBX.T.conj() @ activeBlockVectorR))
        else:
            activeBlockVectorR = activeBlockVectorR - (
                blockVectorX @ (blockVectorX.T.conj() @ activeBlockVectorR))

        ##
        # B-orthonormalize the preconditioned residuals.
        aux = _b_orthonormalize(B, activeBlockVectorR)
        activeBlockVectorR, activeBlockVectorBR = aux

        if activeBlockVectorR is None:
            warnings.warn(
                f"Iteration {iterationNumber} failed at tolerance "
                f"{np.max(residualNorms)} not reaching {residualTolerance}.",
                UserWarning,
                stacklevel=3)
            break
        activeBlockVectorAR = A(activeBlockVectorR)

        if iterationNumber > 0:
            if B is not None:
                aux = _b_orthonormalize(B,
                                        activeBlockVectorP,
                                        activeBlockVectorBP,
                                        retInvR=True)
                activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
            else:
                aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)
                activeBlockVectorP, _, invR, normal = aux
            # Function _b_orthonormalize returns None if Cholesky fails
            if activeBlockVectorP is not None:
                activeBlockVectorAP = activeBlockVectorAP / normal
                activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
                restart = False
            else:
                restart = True

        ##
        # Perform the Rayleigh Ritz Procedure:
        # Compute symmetric Gram matrices:

        if activeBlockVectorAR.dtype == "float32":
            myeps = 1
        elif activeBlockVectorR.dtype == "float32":
            myeps = 1e-4
        else:
            myeps = 1e-8

        if residualNorms.max() > myeps and not explicitGramFlag:
            explicitGramFlag = False
        else:
            # Once explicitGramFlag, forever explicitGramFlag.
            explicitGramFlag = True

        # Shared memory assingments to simplify the code
        if B is None:
            blockVectorBX = blockVectorX
            activeBlockVectorBR = activeBlockVectorR
            if not restart:
                activeBlockVectorBP = activeBlockVectorP

        # Common submatrices:
        gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
        gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)

        if explicitGramFlag:
            gramRAR = (gramRAR + gramRAR.T.conj()) / 2
            gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
            gramXAX = (gramXAX + gramXAX.T.conj()) / 2
            gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
            gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
            gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
        else:
            gramXAX = np.diag(_lambda)
            gramXBX = ident0
            gramRBR = ident
            gramXBR = np.zeros((sizeX, currentBlockSize), dtype=A.dtype)

        def _handle_gramA_gramB_verbosity(gramA, gramB):
            if verbosityLevel > 0:
                _report_nonhermitian(gramA, "gramA")
                _report_nonhermitian(gramB, "gramB")
            if verbosityLevel > 10:
                # Note: not documented, but leave it in here for now
                np.savetxt("gramA.txt", gramA)
                np.savetxt("gramB.txt", gramB)

        if not restart:
            gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
            gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
            gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
            gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
            gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
            if explicitGramFlag:
                gramPAP = (gramPAP + gramPAP.T.conj()) / 2
                gramPBP = np.dot(activeBlockVectorP.T.conj(),
                                 activeBlockVectorBP)
            else:
                gramPBP = ident

            gramA = bmat([
                [gramXAX, gramXAR, gramXAP],
                [gramXAR.T.conj(), gramRAR, gramRAP],
                [gramXAP.T.conj(), gramRAP.T.conj(), gramPAP],
            ])
            gramB = bmat([
                [gramXBX, gramXBR, gramXBP],
                [gramXBR.T.conj(), gramRBR, gramRBP],
                [gramXBP.T.conj(), gramRBP.T.conj(), gramPBP],
            ])

            _handle_gramA_gramB_verbosity(gramA, gramB)

            try:
                _lambda, eigBlockVector = eigh(gramA,
                                               gramB,
                                               check_finite=False)
            except LinAlgError:
                # try again after dropping the direction vectors P from RR
                restart = True

        if restart:
            gramA = bmat([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]])
            gramB = bmat([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]])

            _handle_gramA_gramB_verbosity(gramA, gramB)

            try:
                _lambda, eigBlockVector = eigh(gramA,
                                               gramB,
                                               check_finite=False)
            except LinAlgError as e:
                raise ValueError("eigh has failed in lobpcg iterations") from e

        ii = _get_indx(_lambda, sizeX, largest)
        if verbosityLevel > 10:
            print(ii)
            print(_lambda)

        _lambda = _lambda[ii]
        eigBlockVector = eigBlockVector[:, ii]

        lambdaHistory.append(_lambda)

        if verbosityLevel > 10:
            print("lambda:", _lambda)
        #         # Normalize eigenvectors!
        #         aux = np.sum( eigBlockVector.conj() * eigBlockVector, 0 )
        #         eigVecNorms = np.sqrt( aux )
        #         eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis, :]
        #         eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )

        if verbosityLevel > 10:
            print(eigBlockVector)

        # Compute Ritz vectors.
        if B is not None:
            if not restart:
                eigBlockVectorX = eigBlockVector[:sizeX]
                eigBlockVectorR = eigBlockVector[sizeX:sizeX +
                                                 currentBlockSize]
                eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]

                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
                pp += np.dot(activeBlockVectorP, eigBlockVectorP)

                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
                app += np.dot(activeBlockVectorAP, eigBlockVectorP)

                bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
                bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
            else:
                eigBlockVectorX = eigBlockVector[:sizeX]
                eigBlockVectorR = eigBlockVector[sizeX:]

                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
                bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)

            if verbosityLevel > 10:
                print(pp)
                print(app)
                print(bpp)

            blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
            blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
            blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp

            blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp

        else:
            if not restart:
                eigBlockVectorX = eigBlockVector[:sizeX]
                eigBlockVectorR = eigBlockVector[sizeX:sizeX +
                                                 currentBlockSize]
                eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]

                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
                pp += np.dot(activeBlockVectorP, eigBlockVectorP)

                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
                app += np.dot(activeBlockVectorAP, eigBlockVectorP)
            else:
                eigBlockVectorX = eigBlockVector[:sizeX]
                eigBlockVectorR = eigBlockVector[sizeX:]

                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
                app = np.dot(activeBlockVectorAR, eigBlockVectorR)

            if verbosityLevel > 10:
                print(pp)
                print(app)

            blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
            blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app

            blockVectorP, blockVectorAP = pp, app

    if B is not None:
        aux = blockVectorBX * _lambda[np.newaxis, :]

    else:
        aux = blockVectorX * _lambda[np.newaxis, :]

    blockVectorR = blockVectorAX - aux

    aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
    residualNorms = np.sqrt(aux)

    # Future work: Need to add Postprocessing here:
    # Making sure eigenvectors "exactly" satisfy the blockVectorY constrains?
    # Making sure eigenvecotrs are "exactly" othonormalized by final "exact" RR
    # Computing the actual true residuals

    if verbosityLevel > 0:
        print("Final eigenvalue(s):", _lambda)
        print("Final residual norm(s):", residualNorms)

    if retLambdaHistory:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
        else:
            return _lambda, blockVectorX, lambdaHistory
    else:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, residualNormsHistory
        else:
            return _lambda, blockVectorX
示例#37
0
文件: test5.py 项目: fagan2888/MFMCG
t = clock()
print t  #------------------------------------------------------
#m[s] = array([ h.Hall(s,c,[i,j]) for j in xrange(h.dim) for i in xrange(h.dim) ])
#m[s].shape = h.dim,h.dim
#print 'm[s] = array([ h.Hall(s,c,[i,j]) for j in xrange(h.dim) for i in xrange(h.dim) ])'
#print clock()-t;t = clock() #------------------------------------------------------
mcoo[s] = h.matcoo(s, c)
print clock() - t
t = clock()  #------------------------------------------------------
mc[s] = mcoo[s].todense()
print 'mc[s] = h.matcoo(s,c).todense()'
print clock() - t
t = clock()  #------------------------------------------------------
s = 1
#m[s] = array([ h.Hall(s,c,[i,j]) for j in xrange(h.dim) for i in xrange(h.dim) ])
#m[s].shape = h.dim,h.dim
#print 'm[s] = array([ h.Hall(s,c,[i,j]) for j in xrange(h.dim) for i in xrange(h.dim) ])'
#print clock()-t;t = clock() #------------------------------------------------------
mcoo[s] = h.matcoo(s, c)
print clock() - t
t = clock()  #------------------------------------------------------
mc[s] = mcoo[s].todense()
print 'mc[s] = h.matcoo(s,c).todense()'
print clock() - t
t = clock()  #------------------------------------------------------
#print 'product(m[0]==mc[0]):', product(m[0]==mc[0])
#print 'product(m[1]==mc[1]):', product(m[1]==mc[1])
w, v = eigh(mc[0])
#print w
print clock() - t  #------------------------------------------------------
示例#38
0
 def test_eigs(self):
     Ne=1
     exacteigs=sl.eigh(self.lanczos.matrix,eigvals_only=True)[:Ne] if self.lanczos.matrix.size<1000**2 else eigsh(self.lanczos.matrix,k=Ne,return_eigenvectors=False,which='SA')[::-1]
     Leigs=self.lanczos.eigs()[:Ne]
     self.assertAlmostEqual(sl.norm(exacteigs-Leigs),0.0)
示例#39
0
def plot_state_qsphere(rho, figsize=None):
    """Plot the qsphere representation of a quantum state.

    Args:
        rho (ndarray): State vector or density matrix representation.
        of quantum state.
        figsize (tuple): Figure size in inches.

    Returns:
        Figure: A matplotlib figure instance.

    Raises:
        ImportError: Requires matplotlib.
    """
    if not HAS_MATPLOTLIB:
        raise ImportError('Must have Matplotlib installed.')
    rho = _validate_input_state(rho)
    if figsize is None:
        figsize = (7, 7)
    num = int(np.log2(len(rho)))
    # get the eigenvectors and eigenvalues
    we, stateall = linalg.eigh(rho)
    for _ in range(2**num):
        # start with the max
        probmix = we.max()
        prob_location = we.argmax()
        if probmix > 0.001:
            # get the max eigenvalue
            state = stateall[:, prob_location]
            loc = np.absolute(state).argmax()
            # get the element location closes to lowest bin representation.
            for j in range(2**num):
                test = np.absolute(
                    np.absolute(state[j]) - np.absolute(state[loc]))
                if test < 0.001:
                    loc = j
                    break
            # remove the global phase
            angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi)
            angleset = np.exp(-1j * angles)
            # print(state)
            # print(angles)
            state = angleset * state
            # print(state)
            state.flatten()
            # start the plotting
            fig = plt.figure(figsize=figsize)
            ax = fig.add_subplot(111, projection='3d')
            ax.axes.set_xlim3d(-1.0, 1.0)
            ax.axes.set_ylim3d(-1.0, 1.0)
            ax.axes.set_zlim3d(-1.0, 1.0)
            ax.set_aspect("equal")
            ax.axes.grid(False)
            # Plot semi-transparent sphere
            u = np.linspace(0, 2 * np.pi, 25)
            v = np.linspace(0, np.pi, 25)
            x = np.outer(np.cos(u), np.sin(v))
            y = np.outer(np.sin(u), np.sin(v))
            z = np.outer(np.ones(np.size(u)), np.cos(v))
            ax.plot_surface(x,
                            y,
                            z,
                            rstride=1,
                            cstride=1,
                            color='k',
                            alpha=0.05,
                            linewidth=0)
            # wireframe
            # Get rid of the panes
            ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
            ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
            ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))

            # Get rid of the spines
            ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
            ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
            ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
            # Get rid of the ticks
            ax.set_xticks([])
            ax.set_yticks([])
            ax.set_zticks([])

            d = num
            for i in range(2**num):
                # get x,y,z points
                element = bin(i)[2:].zfill(num)
                weight = element.count("1")
                zvalue = -2 * weight / d + 1
                number_of_divisions = n_choose_k(d, weight)
                weight_order = bit_string_index(element)
                # if weight_order >= number_of_divisions / 2:
                #    com_key = compliment(element)
                #    weight_order_temp = bit_string_index(com_key)
                #    weight_order = np.floor(
                #        number_of_divisions / 2) + weight_order_temp + 1
                angle = weight_order * 2 * np.pi / number_of_divisions
                xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)
                yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)
                ax.plot([xvalue], [yvalue], [zvalue],
                        markerfacecolor=(.5, .5, .5),
                        markeredgecolor=(.5, .5, .5),
                        marker='o',
                        markersize=10,
                        alpha=1)
                # get prob and angle - prob will be shade and angle color
                prob = np.real(np.dot(state[i], state[i].conj()))
                colorstate = phase_to_color_wheel(state[i])
                a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue],
                            mutation_scale=20,
                            alpha=prob,
                            arrowstyle="-",
                            color=colorstate,
                            lw=10)
                ax.add_artist(a)
            # add weight lines
            for weight in range(d + 1):
                theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)
                z = -2 * weight / d + 1
                r = np.sqrt(1 - z**2)
                x = r * np.cos(theta)
                y = r * np.sin(theta)
                ax.plot(x, y, z, color=(.5, .5, .5))
            # add center point
            ax.plot([0], [0], [0],
                    markerfacecolor=(.5, .5, .5),
                    markeredgecolor=(.5, .5, .5),
                    marker='o',
                    markersize=10,
                    alpha=1)
            we[prob_location] = 0
        else:
            break
    plt.tight_layout()
    plt.close(fig)
    return fig
示例#40
0
        required=False,
        type=bool,
        default=False,
        help='Output also second most likely speaker of VB-HMM')

    args = parser.parse_args()
    assert 0 <= args.loopP <= 1, f'Expecting loopP between 0 and 1, got {args.loopP} instead.'

    # segments file with x-vector timing information
    segs_dict = read_xvector_timing_dict(args.segments_file)

    kaldi_plda = read_plda(args.plda_file)
    plda_mu, plda_tr, plda_psi = kaldi_plda
    W = np.linalg.inv(plda_tr.T.dot(plda_tr))
    B = np.linalg.inv((plda_tr.T / plda_psi).dot(plda_tr))
    acvar, wccn = eigh(B, W)
    plda_psi = acvar[::-1]
    plda_tr = wccn.T[::-1]

    # Open ark file with x-vectors and in each iteration of the following for-loop
    # read a batch of x-vectors corresponding to one recording
    arkit = kaldi_io.read_vec_flt_ark(args.xvec_ark_file)
    recit = itertools.groupby(arkit, lambda e: e[0].rsplit('_', 1)[0]
                              )  # group xvectors in ark by recording name
    for file_name, segs in recit:
        print(file_name)
        seg_names, xvecs = zip(*segs)
        x = np.array(xvecs)

        with h5py.File(args.xvec_transform, 'r') as f:
            mean1 = np.array(f['mean1'])
示例#41
0
def from_rotation_matrix(rot, nonorthogonal=True):
    """Convert input 3x3 rotation matrix to unit quaternion

    By default, if scipy.linalg is available, this function uses
    Bar-Itzhack's algorithm to allow for non-orthogonal matrices.
    [J. Guidance, Vol. 23, No. 6, p. 1085 <http://dx.doi.org/10.2514/2.4654>]
    This will almost certainly be quite a bit slower than simpler versions,
    though it will be more robust to numerical errors in the rotation matrix.
    Also note that Bar-Itzhack uses some pretty weird conventions.  The last
    component of the quaternion appears to represent the scalar, and the
    quaternion itself is conjugated relative to the convention used
    throughout this module.

    If scipy.linalg is not available or if the optional
    `nonorthogonal` parameter is set to `False`, this function falls
    back to the possibly faster, but less robust, algorithm of Markley
    [J. Guidance, Vol. 31, No. 2, p. 440
    <http://dx.doi.org/10.2514/1.31730>].

    Parameters
    ----------
    rot: (...Nx3x3) float array
        Each 3x3 matrix represents a rotation by multiplying (from the left)
        a column vector to produce a rotated column vector.  Note that this
        input may actually have ndims>3; it is just assumed that the last
        two dimensions have size 3, representing the matrix.
    nonorthogonal: bool, optional
        If scipy.linalg is available, use the more robust algorithm of
        Bar-Itzhack.  Default value is True.

    Returns
    -------
    q: array of quaternions
        Unit quaternions resulting in rotations corresponding to input
        rotations.  Output shape is rot.shape[:-2].

    Raises
    ------
    LinAlgError
        If any of the eigenvalue solutions does not converge

    """
    try:
        from scipy import linalg
    except ImportError:
        linalg = False

    rot = np.array(rot, copy=False)
    shape = rot.shape[:-2]

    if linalg and nonorthogonal:
        from operator import mul
        from functools import reduce

        K3 = np.empty(shape + (4, 4))
        K3[..., 0,
           0] = (rot[..., 0, 0] - rot[..., 1, 1] - rot[..., 2, 2]) / 3.0
        K3[..., 0, 1] = (rot[..., 1, 0] + rot[..., 0, 1]) / 3.0
        K3[..., 0, 2] = (rot[..., 2, 0] + rot[..., 0, 2]) / 3.0
        K3[..., 0, 3] = (rot[..., 1, 2] - rot[..., 2, 1]) / 3.0
        K3[..., 1, 0] = K3[..., 0, 1]
        K3[..., 1,
           1] = (rot[..., 1, 1] - rot[..., 0, 0] - rot[..., 2, 2]) / 3.0
        K3[..., 1, 2] = (rot[..., 2, 1] + rot[..., 1, 2]) / 3.0
        K3[..., 1, 3] = (rot[..., 2, 0] - rot[..., 0, 2]) / 3.0
        K3[..., 2, 0] = K3[..., 0, 2]
        K3[..., 2, 1] = K3[..., 1, 2]
        K3[..., 2,
           2] = (rot[..., 2, 2] - rot[..., 0, 0] - rot[..., 1, 1]) / 3.0
        K3[..., 2, 3] = (rot[..., 0, 1] - rot[..., 1, 0]) / 3.0
        K3[..., 3, 0] = K3[..., 0, 3]
        K3[..., 3, 1] = K3[..., 1, 3]
        K3[..., 3, 2] = K3[..., 2, 3]
        K3[..., 3,
           3] = (rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2]) / 3.0

        if not shape:
            q = zero.copy()
            eigvals, eigvecs = linalg.eigh(K3.T, eigvals=(3, 3))
            q.components[0] = eigvecs[-1]
            q.components[1:] = -eigvecs[:-1].flatten()
            return q
        else:
            q = np.empty(shape + (4, ), dtype=np.float)
            for flat_index in range(reduce(mul, shape)):
                multi_index = np.unravel_index(flat_index, shape)
                eigvals, eigvecs = linalg.eigh(K3[multi_index], eigvals=(3, 3))
                q[multi_index + (0, )] = eigvecs[-1]
                q[multi_index + (slice(1, None), )] = -eigvecs[:-1].flatten()
            return as_quat_array(q)

    else:  # No scipy.linalg or not `nonorthogonal`
        diagonals = np.empty(shape + (4, ))
        diagonals[..., 0] = rot[..., 0, 0]
        diagonals[..., 1] = rot[..., 1, 1]
        diagonals[..., 2] = rot[..., 2, 2]
        diagonals[..., 3] = rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2]

        indices = np.argmax(diagonals, axis=-1)

        q = diagonals  # reuse storage space
        indices_i = (indices == 0)
        if np.any(indices_i):
            if indices_i.shape == ():
                indices_i = Ellipsis
            rot_i = rot[indices_i, :, :]
            q[indices_i, 0] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
            q[indices_i,
              1] = 1 + rot_i[..., 0, 0] - rot_i[..., 1, 1] - rot_i[..., 2, 2]
            q[indices_i, 2] = rot_i[..., 0, 1] + rot_i[..., 1, 0]
            q[indices_i, 3] = rot_i[..., 0, 2] + rot_i[..., 2, 0]
        indices_i = (indices == 1)
        if np.any(indices_i):
            if indices_i.shape == ():
                indices_i = Ellipsis
            rot_i = rot[indices_i, :, :]
            q[indices_i, 0] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
            q[indices_i, 1] = rot_i[..., 1, 0] + rot_i[..., 0, 1]
            q[indices_i,
              2] = 1 - rot_i[..., 0, 0] + rot_i[..., 1, 1] - rot_i[..., 2, 2]
            q[indices_i, 3] = rot_i[..., 1, 2] + rot_i[..., 2, 1]
        indices_i = (indices == 2)
        if np.any(indices_i):
            if indices_i.shape == ():
                indices_i = Ellipsis
            rot_i = rot[indices_i, :, :]
            q[indices_i, 0] = rot_i[..., 1, 0] - rot_i[..., 0, 1]
            q[indices_i, 1] = rot_i[..., 2, 0] + rot_i[..., 0, 2]
            q[indices_i, 2] = rot_i[..., 2, 1] + rot_i[..., 1, 2]
            q[indices_i,
              3] = 1 - rot_i[..., 0, 0] - rot_i[..., 1, 1] + rot_i[..., 2, 2]
        indices_i = (indices == 3)
        if np.any(indices_i):
            if indices_i.shape == ():
                indices_i = Ellipsis
            rot_i = rot[indices_i, :, :]
            q[indices_i,
              0] = 1 + rot_i[..., 0, 0] + rot_i[..., 1, 1] + rot_i[..., 2, 2]
            q[indices_i, 1] = rot_i[..., 2, 1] - rot_i[..., 1, 2]
            q[indices_i, 2] = rot_i[..., 0, 2] - rot_i[..., 2, 0]
            q[indices_i, 3] = rot_i[..., 1, 0] - rot_i[..., 0, 1]

        q /= np.linalg.norm(q, axis=-1)[..., np.newaxis]

        return as_quat_array(q)
def isPSD(A, tol=1e-8):
    E, V = linalg.eigh(A)
    return np.all(E > -tol)
示例#43
0
def high_variance_confounds(series,
                            n_confounds=5,
                            percentile=2.,
                            detrend=True):
    """ Return confounds time series extracted from series with highest
    variance.

    Parameters
    ----------
    series: numpy.ndarray
        Timeseries. A timeseries is a column in the "series" array.
        shape (sample number, feature number)

    n_confounds: int, optional
        Number of confounds to return

    percentile: float, optional
        Highest-variance series percentile to keep before computing the
        singular value decomposition, 0. <= `percentile` <= 100.
        series.shape[0] * percentile / 100 must be greater than n_confounds

    detrend: bool, optional
        If True, detrend timeseries before processing.

    Returns
    -------
    v: numpy.ndarray
        highest variance confounds. Shape: (samples, n_confounds)

    Notes
    -----
    This method is related to what has been published in the literature
    as 'CompCor' (Behzadi NeuroImage 2007).

    The implemented algorithm does the following:

    - compute sum of squares for each time series (no mean removal)
    - keep a given percentile of series with highest variances (percentile)
    - compute an svd of the extracted series
    - return a given number (n_confounds) of series from the svd with
      highest singular values.

    See also
    --------
    nilearn.image.high_variance_confounds
    """

    if detrend:
        series = _detrend(series)  # copy

    # Retrieve the voxels|features with highest variance

    # Compute variance without mean removal.
    var = _mean_of_squares(series)

    var_thr = stats.scoreatpercentile(var, 100. - percentile)
    series = series[:, var > var_thr]  # extract columns (i.e. features)
    # Return the singular vectors with largest singular values
    # We solve the symmetric eigenvalue problem here, increasing stability
    s, u = linalg.eigh(series.dot(series.T) / series.shape[0])
    ix_ = np.argsort(s)[::-1]
    u = u[:, ix_[:n_confounds]].copy()
    return u
示例#44
0
# Redo loop for better performance

for i in range(nx - 1):
    T[i, i + 1] = -0.5
    T[i + 1, i] = -0.5

T = T / (dx**2) * hbar**2 / m

# <codecell>

H = T + Vext  # Non-interacting Hamiltonian

# <codecell>

En, Psi0 = LA.eigh(H, eigvals=(0, 5))

# <codecell>
plt.figure(1)
for i in range(5):
    plt.plot(x, Psi0[:, i])
plt.title("Non-interacting wavefunctions")
plt.xlabel("Position")
plt.ylabel("Wavefunction")
plt.legend(["GS", "ES1", "ES2", "ES3", "ES4", "ES5"])

# <codecell>
# Start of Hartree part
count = 1
psi_diff = 100
tol = 1e-3  # convergence criterion for the difference in wave functions
示例#45
0
def main():

    # Parse command line arguements
    ###################################
    parser = argparse.ArgumentParser(
        description=("Exact numerical diagonalization of "
                     "transverse field Ising Models of the form:\n"
                     "H = -\sum_{ij} J_{ij}\sigma^z_i \sigma^z_j"
                     "- h \sum_i \sigma^x_i"))
    parser.add_argument('lattice_specifier',
                        help=("Either: L (linear dimensions of the system)"
                              " or the filename base of matrix files"))
    parser.add_argument('-D',
                        type=int,
                        default=1,
                        help='Number of spatial dimensions')
    parser.add_argument('--obc',
                        action='store_true',
                        help='Open boundary condintions (deault is PBC)')
    parser.add_argument('--h_min',
                        type=float,
                        default=0.0,
                        help='Minimum value of the transverse field')
    parser.add_argument('--h_max',
                        type=float,
                        default=4.0,
                        help='Maximum value of the transverse field')
    parser.add_argument('--dh',
                        type=float,
                        default=0.5,
                        help='Tranverse fied step size')
    parser.add_argument('-J',
                        type=float,
                        default=1.0,
                        help='Nearest neighbor Ising coupling')
    parser.add_argument('-k',
                        type=int,
                        default=3,
                        help='Number eigenvalues to resolve')
    parser.add_argument('-o', default='output', help='output filename base')
    parser.add_argument('--full',
                        action='store_true',
                        help='Full (rather than Lanczos) diagonalization')
    parser.add_argument('--save_state',
                        action='store_true',
                        help='Save ground state to file')
    parser.add_argument('--init_v0',
                        action='store_true',
                        help='Start Lanzcos with previous ground state')
    parser.add_argument('--load',
                        action='store_true',
                        help='Load matrices from file')
    parser.add_argument('--fidelity',
                        action='store_true',
                        help='Compute fidelities')
    parser.add_argument('--delta_h_F0',
                        type=float,
                        default=1E-4,
                        help='Inital \Delta h for fidelity')
    parser.add_argument('--N_F_steps',
                        type=int,
                        default=3,
                        help='Number of steps for fidelity')
    parser.add_argument('--overlap',
                        action='store_true',
                        help='Compute the overlap distribution')
    parser.add_argument('--N_ovlp_samples',
                        type=int,
                        default=10**4,
                        help='Number of samples of the overlap distribution')
    parser.add_argument('--SK',
                        action='store_true',
                        help='SK model with infinite range ZZ interactions')

    args = parser.parse_args()
    ###################################

    # Load matricies from file
    ###################################
    load_matrices = args.load
    if load_matrices:
        loaded_params, JZZ, ZZ, Mz, Ms = tfim.load_diag_ME(
            args.lattice_specifier)
        Mx = tfim.load_Mx(args.lattice_specifier)
    ###################################

    # Set calculation Parameters
    ###################################
    out_filename = args.o + '.dat'
    if load_matrices:
        L = loaded_params['L']
        D = len(L)
        PBC = loaded_params['PBC']
        J = loaded_params['J']
    else:
        D = args.D
        L = [int(args.lattice_specifier) for d in range(D)]
        PBC = not args.obc
        J = args.J
    k = args.k
    init_v0 = args.init_v0
    full_diag = args.full
    SK = args.SK
    save_state = args.save_state
    if save_state:
        state_filename = args.o + '_psi0.dat'

    fidelity_on = args.fidelity
    if fidelity_on:
        delta_h_F0 = args.delta_h_F0
        N_F_steps = args.N_F_steps
        dhf = np.flip(delta_h_F0 / (2**(np.arange(N_F_steps))), axis=0)
        F2 = np.zeros(dhf.shape)
        F2_filename = args.o + '_F2.dat'

    overlap_on = args.overlap
    if overlap_on:
        N_ovlp_samples = args.N_ovlp_samples
        Pq_filename = args.o + '_Pq.dat'

    h_arr = np.arange(args.h_min, args.h_max + args.dh / 2, args.dh)
    parameter_string = ("D = {}, L = {}, PBC = {}, J = {},"
                        " k = {}".format(D, L, PBC, J, k))
    print('\tStarting tfim_diag using parameters:\t' + parameter_string)
    ###################################

    # Setup physical quantities
    ##################################
    # Quantities to write ouput file
    phys_keys = ['h', 'e0', 'Delta_1', 'Delta_2', 'Mx', 'Mz2', 'Cnn', 'Ms2']
    phys = {}  # Dictionary for values
    ##################################

    # Build lattice and basis
    ###################################
    lattice = tfim.Lattice(L, PBC)
    N = lattice.N
    basis = tfim.IsingBasis(lattice)
    ###################################

    # Setup output data files
    ##################################
    width = 25
    precision = 16
    header_list = [tfim.phys_labels[key] for key in phys_keys]
    header = ''.join(
        ['{:>{width}}'.format(head, width=width) for head in header_list])
    out_file = open(out_filename, 'w')
    print("\tData will write to {}".format(out_filename))
    out_file.write('#\ttfim_diag parameters:\t' + parameter_string + '\n' +
                   '#' + header[1:] + '\n')

    if save_state:
        state_file = open(state_filename, 'w')
        print("\tGround state will write to {}".format(state_filename))
        state_file.write(
            "# tfim_diag parameters:\t{}\n".format(parameter_string) +
            "#{:>{width_h}}{:>{width_psi}}\n".format(
                'h', '\psi_0', width_h=(width - 1), width_psi=(width + 1)))

    if fidelity_on:
        F2_header = ("#{:>{width}}".format('h', width=(width - 1)) + ''.join([
            '{:{width}.{prec}e}'.format(
                dhfi, width=(width + 1), prec=(precision - 1)) for dhfi in dhf
        ]))
        F2_file = open(F2_filename, 'w')
        print("\tFidelities will write to {}".format(F2_filename))
        F2_file.write('#\ttfim_diag parameters:\t' + parameter_string + '\n' +
                      '#' + F2_header[1:] + '\n')

    if overlap_on:
        q = np.arange(-N, N + 1, 2) / float(N)
        Pq_header = ("#{:>{width}}".format('h', width=(width - 1)) + ''.join([
            '{:{width}.{prec}e}{:>{width}}'.format(
                qi, 'error', width=(width + 1), prec=(precision - 1))
            for qi in q
        ]))
        Pq_file = open(Pq_filename, 'w')
        print("\tOverlap distributions will write to {}".format(Pq_filename))
        Pq_file.write('#\ttfim_diag parameters:\t' + parameter_string + '\n' +
                      '#' + Pq_header[1:] + '\n')
    ##################################

    # Build Matricies
    ###################################
    if not load_matrices:
        print('\tBuilding matrices...')
        JZZ, ZZ = tfim.z_correlations_NN(lattice, basis, J)
        Mz, Ms = tfim.z_magnetizations(lattice, basis)
        Mx = tfim.build_Mx(lattice, basis)

        if SK:
            Jij = tfim.Jij_instance(N, J)
            #Jij = np.ones((N/2,N))/N
            JZZ = tfim.JZZ_SK(basis, Jij)
    ###################################

    # Main Diagonalization Loop
    #######################################################
    if full_diag:
        print("\tStarting full diagaonalization with h in ({},{}), "
              "dh = {}".format(h_arr[0], h_arr[-1], args.dh))
    else:
        print("\tStarting sparse diagaonalization with k={} and "
              "h in ({},{}), dh ={}".format(k, h_arr[0], h_arr[-1], args.dh))
    v0 = None
    for h in h_arr:

        H = -JZZ - h * Mx
        if full_diag:
            # Full diagonalize
            E, v = linalg.eigh(H.todense())
        else:
            # Sparse diagonalize
            E, v = spla.eigsh(H, k=k, which='SA', v0=v0)

        # Sort eigenvalues/vectors
        sort_order = np.argsort(E)
        E = E[sort_order]
        v = v[:, sort_order]

        # Grab Energies & ground state
        e0 = E[0] / N
        Delta = E - E[0]
        psi0 = v[:, 0]

        # Set starting vector for Lanczos:
        if not full_diag and init_v0:
            v0 = psi0

        # Compute expectation values
        ###################################
        Mx0 = np.real((psi0.conj().T).dot(Mx.dot(psi0))) / N
        Mz20 = np.real((psi0.conj().T).dot((Mz.power(2)).dot(psi0))) / (N**2)
        Cnn = np.real((psi0.conj().T).dot(ZZ.dot(psi0))) / lattice.N_links
        Ms20 = np.real((psi0.conj().T).dot((Ms.power(2)).dot(psi0))) / (N**2)
        ###################################

        # Compute fidelities
        ###################################
        if fidelity_on:
            for i, dhfi in enumerate(dhf):
                H_F = -JZZ - (h + dhfi) * Mx
                E_F, v_F = spla.eigsh(H_F, k=2, which='SA', v0=psi0)
                # Sort eigenvalues/vectors
                sort_order_F = np.argsort(E_F)
                E_F = E_F[sort_order_F]
                v_F = v_F[:, sort_order_F]
                F2[i] = (np.absolute(np.vdot(v_F[:, 0], psi0)))**2
        ###################################

        # Overlap distribution
        ###################################
        if overlap_on:
            Pq, Pq_err, q = basis.sample_overlap_distribution(
                psi0, N_ovlp_samples)
        ###################################

        # Put physical values in phys dictionary
        ###################################
        phys['h'] = h
        phys['e0'] = e0
        phys['Delta_1'] = Delta[1]
        phys['Delta_2'] = Delta[2]
        phys['Mx'] = Mx0
        phys['Mz2'] = Mz20
        phys['Cnn'] = Cnn
        phys['Ms2'] = Ms20
        ###################################

        # Write data to output files
        ###################################
        data_list = [phys[key] for key in phys_keys]
        data_line = ''.join([
            '{:{width}.{prec}e}'.format(data, width=width, prec=precision)
            for data in data_list
        ])
        out_file.write(data_line + '\n')

        # Write psi0 to file
        if save_state:
            np.savetxt(state_file,
                       np.concatenate(([h], psi0)).reshape(
                           (1, psi0.shape[0] + 1)),
                       fmt='%{}.{}e'.format(width, precision - 1))

        # Write fidelities to file
        if fidelity_on:
            np.savetxt(F2_file,
                       np.concatenate(([h], F2)).reshape((1, F2.shape[0] + 1)),
                       fmt='%{}.{}e'.format(width, precision - 1))

        # Write overlap distribution to file
        if overlap_on:
            Pq_line = np.zeros(1 + 2 * len(Pq))
            Pq_line[0] = h
            Pq_line[1::2] = Pq
            Pq_line[2::2] = Pq_err
            np.savetxt(Pq_file,
                       Pq_line.reshape((1, Pq_line.shape[0])),
                       fmt='%{}.{}e'.format(width, precision - 1))

    #######################################################

    # Close files
    out_file.close()
    if save_state:
        state_file.close()
    if fidelity_on:
        F2_file.close()
    if overlap_on:
        Pq_file.close()
示例#46
0
def locally_linear_embedding(X,
                             n_neighbors,
                             n_components,
                             reg=1e-3,
                             eigen_solver='auto',
                             tol=1e-6,
                             max_iter=100,
                             method='standard',
                             hessian_tol=1E-4,
                             modified_tol=1E-12,
                             random_state=None,
                             n_jobs=None):
    """Perform a Locally Linear Embedding analysis on the data.

    Read more in the :ref:`User Guide <locally_linear_embedding>`.

    Parameters
    ----------
    X : {array-like, NearestNeighbors}
        Sample data, shape = (n_samples, n_features), in the form of a
        numpy array or a NearestNeighbors object.

    n_neighbors : integer
        number of neighbors to consider for each point.

    n_components : integer
        number of coordinates for the manifold.

    reg : float
        regularization constant, multiplies the trace of the local covariance
        matrix of the distances.

    eigen_solver : string, {'auto', 'arpack', 'dense'}
        auto : algorithm will attempt to choose the best method for input data

        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.

        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.

    tol : float, optional
        Tolerance for 'arpack' method
        Not used if eigen_solver=='dense'.

    max_iter : integer
        maximum number of iterations for the arpack solver.

    method : {'standard', 'hessian', 'modified', 'ltsa'}
        standard : use the standard locally linear embedding algorithm.
                   see reference [1]_
        hessian  : use the Hessian eigenmap method.  This method requires
                   n_neighbors > n_components * (1 + (n_components + 1) / 2.
                   see reference [2]_
        modified : use the modified locally linear embedding algorithm.
                   see reference [3]_
        ltsa     : use local tangent space alignment algorithm
                   see reference [4]_

    hessian_tol : float, optional
        Tolerance for Hessian eigenmapping method.
        Only used if method == 'hessian'

    modified_tol : float, optional
        Tolerance for modified LLE method.
        Only used if method == 'modified'

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`. Used when ``solver`` == 'arpack'.

    n_jobs : int or None, optional (default=None)
        The number of parallel jobs to run for neighbors search.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.

    Returns
    -------
    Y : array-like, shape [n_samples, n_components]
        Embedding vectors.

    squared_error : float
        Reconstruction error for the embedding vectors. Equivalent to
        ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.

    References
    ----------

    .. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
        by locally linear embedding.  Science 290:2323 (2000).
    .. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
        linear embedding techniques for high-dimensional data.
        Proc Natl Acad Sci U S A.  100:5591 (2003).
    .. [3] Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
        Embedding Using Multiple Weights.
        http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
    .. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
        dimensionality reduction via tangent space alignment.
        Journal of Shanghai Univ.  8:406 (2004)
    """
    if eigen_solver not in ('auto', 'arpack', 'dense'):
        raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)

    if method not in ('standard', 'hessian', 'modified', 'ltsa'):
        raise ValueError("unrecognized method '%s'" % method)

    nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
    nbrs.fit(X)
    X = nbrs._fit_X

    N, d_in = X.shape

    if n_components > d_in:
        raise ValueError("output dimension must be less than or equal "
                         "to input dimension")
    if n_neighbors >= N:
        raise ValueError("Expected n_neighbors <= n_samples, "
                         " but n_samples = %d, n_neighbors = %d" %
                         (N, n_neighbors))

    if n_neighbors <= 0:
        raise ValueError("n_neighbors must be positive")

    M_sparse = (eigen_solver != 'dense')

    if method == 'standard':
        W = barycenter_kneighbors_graph(nbrs,
                                        n_neighbors=n_neighbors,
                                        reg=reg,
                                        n_jobs=n_jobs)

        # we'll compute M = (I-W)'(I-W)
        # depending on the solver, we'll do this differently
        if M_sparse:
            M = eye(*W.shape, format=W.format) - W
            M = (M.T * M).tocsr()
        else:
            M = (W.T * W - W.T - W).toarray()
            M.flat[::M.shape[0] + 1] += 1  # W = W - I = W - I

    elif method == 'hessian':
        dp = n_components * (n_components + 1) // 2

        if n_neighbors <= n_components + dp:
            raise ValueError("for method='hessian', n_neighbors must be "
                             "greater than "
                             "[n_components * (n_components + 3) / 2]")

        neighbors = nbrs.kneighbors(X,
                                    n_neighbors=n_neighbors + 1,
                                    return_distance=False)
        neighbors = neighbors[:, 1:]

        Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
        Yi[:, 0] = 1

        M = np.zeros((N, N), dtype=np.float64)

        use_svd = (n_neighbors > d_in)

        for i in range(N):
            Gi = X[neighbors[i]]
            Gi -= Gi.mean(0)

            # build Hessian estimator
            if use_svd:
                U = svd(Gi, full_matrices=0)[0]
            else:
                Ci = np.dot(Gi, Gi.T)
                U = eigh(Ci)[1][:, ::-1]

            Yi[:, 1:1 + n_components] = U[:, :n_components]

            j = 1 + n_components
            for k in range(n_components):
                Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
                                                 U[:, k:n_components])
                j += n_components - k

            Q, R = qr(Yi)

            w = Q[:, n_components + 1:]
            S = w.sum(0)

            S[np.where(abs(S) < hessian_tol)] = 1
            w /= S

            nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
            M[nbrs_x, nbrs_y] += np.dot(w, w.T)

        if M_sparse:
            M = csr_matrix(M)

    elif method == 'modified':
        if n_neighbors < n_components:
            raise ValueError("modified LLE requires "
                             "n_neighbors >= n_components")

        neighbors = nbrs.kneighbors(X,
                                    n_neighbors=n_neighbors + 1,
                                    return_distance=False)
        neighbors = neighbors[:, 1:]

        # find the eigenvectors and eigenvalues of each local covariance
        # matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
        # where the columns are eigenvectors
        V = np.zeros((N, n_neighbors, n_neighbors))
        nev = min(d_in, n_neighbors)
        evals = np.zeros([N, nev])

        # choose the most efficient way to find the eigenvectors
        use_svd = (n_neighbors > d_in)

        if use_svd:
            for i in range(N):
                X_nbrs = X[neighbors[i]] - X[i]
                V[i], evals[i], _ = svd(X_nbrs, full_matrices=True)
            evals **= 2
        else:
            for i in range(N):
                X_nbrs = X[neighbors[i]] - X[i]
                C_nbrs = np.dot(X_nbrs, X_nbrs.T)
                evi, vi = eigh(C_nbrs)
                evals[i] = evi[::-1]
                V[i] = vi[:, ::-1]

        # find regularized weights: this is like normal LLE.
        # because we've already computed the SVD of each covariance matrix,
        # it's faster to use this rather than np.linalg.solve
        reg = 1E-3 * evals.sum(1)

        tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
        tmp[:, :nev] /= evals + reg[:, None]
        tmp[:, nev:] /= reg[:, None]

        w_reg = np.zeros((N, n_neighbors))
        for i in range(N):
            w_reg[i] = np.dot(V[i], tmp[i])
        w_reg /= w_reg.sum(1)[:, None]

        # calculate eta: the median of the ratio of small to large eigenvalues
        # across the points.  This is used to determine s_i, below
        rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
        eta = np.median(rho)

        # find s_i, the size of the "almost null space" for each point:
        # this is the size of the largest set of eigenvalues
        # such that Sum[v; v in set]/Sum[v; v not in set] < eta
        s_range = np.zeros(N, dtype=int)
        evals_cumsum = stable_cumsum(evals, 1)
        eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
        for i in range(N):
            s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
        s_range += n_neighbors - nev  # number of zero eigenvalues

        # Now calculate M.
        # This is the [N x N] matrix whose null space is the desired embedding
        M = np.zeros((N, N), dtype=np.float64)
        for i in range(N):
            s_i = s_range[i]

            # select bottom s_i eigenvectors and calculate alpha
            Vi = V[i, :, n_neighbors - s_i:]
            alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)

            # compute Householder matrix which satisfies
            #  Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
            # using prescription from paper
            h = np.full(s_i, alpha_i) - np.dot(Vi.T, np.ones(n_neighbors))

            norm_h = np.linalg.norm(h)
            if norm_h < modified_tol:
                h *= 0
            else:
                h /= norm_h

            # Householder matrix is
            #  >> Hi = np.identity(s_i) - 2*np.outer(h,h)
            # Then the weight matrix is
            #  >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
            # We do this much more efficiently:
            Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
                  (1 - alpha_i) * w_reg[i, :, None])

            # Update M as follows:
            # >> W_hat = np.zeros( (N,s_i) )
            # >> W_hat[neighbors[i],:] = Wi
            # >> W_hat[i] -= 1
            # >> M += np.dot(W_hat,W_hat.T)
            # We can do this much more efficiently:
            nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
            M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
            Wi_sum1 = Wi.sum(1)
            M[i, neighbors[i]] -= Wi_sum1
            M[neighbors[i], i] -= Wi_sum1
            M[i, i] += s_i

        if M_sparse:
            M = csr_matrix(M)

    elif method == 'ltsa':
        neighbors = nbrs.kneighbors(X,
                                    n_neighbors=n_neighbors + 1,
                                    return_distance=False)
        neighbors = neighbors[:, 1:]

        M = np.zeros((N, N))

        use_svd = (n_neighbors > d_in)

        for i in range(N):
            Xi = X[neighbors[i]]
            Xi -= Xi.mean(0)

            # compute n_components largest eigenvalues of Xi * Xi^T
            if use_svd:
                v = svd(Xi, full_matrices=True)[0]
            else:
                Ci = np.dot(Xi, Xi.T)
                v = eigh(Ci)[1][:, ::-1]

            Gi = np.zeros((n_neighbors, n_components + 1))
            Gi[:, 1:] = v[:, :n_components]
            Gi[:, 0] = 1. / np.sqrt(n_neighbors)

            GiGiT = np.dot(Gi, Gi.T)

            nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
            M[nbrs_x, nbrs_y] -= GiGiT
            M[neighbors[i], neighbors[i]] += 1

    return null_space(M,
                      n_components,
                      k_skip=1,
                      eigen_solver=eigen_solver,
                      tol=tol,
                      max_iter=max_iter,
                      random_state=random_state)
示例#47
0
    k_out, Q_out, mu_out = splitpars(res.x)

    axk = plt.subplot(211)
    axk.plot(k_in, label='k_in')
    axk.plot(k_out, label='k_out')
    axk.legend()

    axk.text(.8,
             .2,
             'mu_in:  {:4.2f}\nmu_out: {:4.2f}'.format(mu_in, mu_out),
             transform=axk.transAxes)

    axq1 = plt.subplot(223)
    axq2 = plt.subplot(224)
    imq1 = axq1.imshow(Q_in)
    plt.colorbar(imq1, ax=axq1, format='%.0e')
    imq2 = axq2.imshow(Q_out)
    plt.colorbar(imq2, ax=axq2, format='%.0e')
    savepath = '/home/ycan/Documents/meeting_notes/2018-12-05/'
    #plt.savefig(savepath+'simulatedsuccess.pdf')
    #plt.savefig(savepath+'simulatedsuccess.png')
    plt.show()

    w_in, v_in = eigh(Q_in)
    w_out, v_out = eigh(Q_out)

    [plt.plot(Qk * Qw, color='C1') for Qk, Qw in zip(Qks, Qws)]
    plt.plot(v_in[:, [0, -2, -1]], color='C0')
    plt.plot(v_out[:, [0, -2, -1]], color='C2')
    plt.show()
示例#48
0
def null_space(M,
               k,
               k_skip=1,
               eigen_solver='arpack',
               tol=1E-6,
               max_iter=100,
               random_state=None):
    """
    Find the null space of a matrix M.

    Parameters
    ----------
    M : {array, matrix, sparse matrix, LinearOperator}
        Input covariance matrix: should be symmetric positive semi-definite

    k : integer
        Number of eigenvalues/vectors to return

    k_skip : integer, optional
        Number of low eigenvalues to skip.

    eigen_solver : string, {'auto', 'arpack', 'dense'}
        auto : algorithm will attempt to choose the best method for input data
        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.
        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.

    tol : float, optional
        Tolerance for 'arpack' method.
        Not used if eigen_solver=='dense'.

    max_iter : int
        Maximum number of iterations for 'arpack' method.
        Not used if eigen_solver=='dense'

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`. Used when ``solver`` == 'arpack'.

    """
    if eigen_solver == 'auto':
        if M.shape[0] > 200 and k + k_skip < 10:
            eigen_solver = 'arpack'
        else:
            eigen_solver = 'dense'

    if eigen_solver == 'arpack':
        random_state = check_random_state(random_state)
        # initialize with [-1,1] as in ARPACK
        v0 = random_state.uniform(-1, 1, M.shape[0])
        try:
            eigen_values, eigen_vectors = eigsh(M,
                                                k + k_skip,
                                                sigma=0.0,
                                                tol=tol,
                                                maxiter=max_iter,
                                                v0=v0)
        except RuntimeError as msg:
            raise ValueError("Error in determining null-space with ARPACK. "
                             "Error message: '%s'. "
                             "Note that method='arpack' can fail when the "
                             "weight matrix is singular or otherwise "
                             "ill-behaved.  method='dense' is recommended. "
                             "See online documentation for more information." %
                             msg)

        return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
    elif eigen_solver == 'dense':
        if hasattr(M, 'toarray'):
            M = M.toarray()
        eigen_values, eigen_vectors = eigh(M,
                                           eigvals=(k_skip, k + k_skip - 1),
                                           overwrite_a=True)
        index = np.argsort(np.abs(eigen_values))
        return eigen_vectors[:, index], np.sum(eigen_values)
    else:
        raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
示例#49
0
文件: qubit.py 项目: qfizik/pennylane
    def _construct_metric_tensor(self, *, diag_approx=False):
        """Construct metric tensor subcircuits for qubit circuits.

        Constructs a set of quantum circuits for computing a block-diagonal approximation of the
        Fubini-Study metric tensor on the parameter space of the variational circuit represented
        by the QNode, using the Quantum Geometric Tensor.

        If the parameter appears in a gate :math:`G`, the subcircuit contains
        all gates which precede :math:`G`, and :math:`G` is replaced by the variance
        value of its generator.

        Args:
            diag_approx (bool): iff True, use the diagonal approximation

        Raises:
            QuantumFunctionError: if a metric tensor cannot be generated because no generator
                was defined

        """
        # pylint: disable=too-many-statements, too-many-branches

        self._metric_tensor_subcircuits = {}
        for queue, curr_ops, param_idx, _ in self.circuit.iterate_parametrized_layers(
        ):
            obs = []
            scale = []

            Ki_matrices = []
            KiKj_matrices = []
            Ki_ev = []
            KiKj_ev = []
            V = None

            # for each operation in the layer, get the generator and convert it to a variance
            for n, op in enumerate(curr_ops):
                gen, s = op.generator
                w = op.wires

                if gen is None:
                    raise QuantumFunctionError(
                        "Can't generate metric tensor, operation {}"
                        "has no defined generator".format(op))

                # get the observable corresponding to the generator of the current operation
                if isinstance(gen, np.ndarray):
                    # generator is a Hermitian matrix
                    variance = var(qml.Hermitian(gen, w, do_queue=False))

                    if not diag_approx:
                        Ki_matrices.append((n, expand(gen, w, self.num_wires)))

                elif issubclass(gen, Observable):
                    # generator is an existing PennyLane operation
                    variance = var(gen(w, do_queue=False))

                    if not diag_approx:
                        if issubclass(gen, qml.PauliX):
                            mat = np.array([[0, 1], [1, 0]])
                        elif issubclass(gen, qml.PauliY):
                            mat = np.array([[0, -1j], [1j, 0]])
                        elif issubclass(gen, qml.PauliZ):
                            mat = np.array([[1, 0], [0, -1]])

                        Ki_matrices.append((n, expand(mat, w, self.num_wires)))

                else:
                    raise QuantumFunctionError(
                        "Can't generate metric tensor, generator {}"
                        "has no corresponding observable".format(gen))

                obs.append(variance)
                scale.append(s)

            if not diag_approx:
                # In order to compute the block diagonal portion of the metric tensor,
                # we need to compute 'second order' <psi|K_i K_j|psi> terms.

                for i, j in itertools.product(range(len(Ki_matrices)),
                                              repeat=2):
                    # compute the matrices representing all K_i K_j terms
                    obs1 = Ki_matrices[i]
                    obs2 = Ki_matrices[j]
                    KiKj_matrices.append(
                        ((obs1[0], obs2[0]), obs1[1] @ obs2[1]))

                V = np.identity(2**self.num_wires, dtype=np.complex128)

                # generate the unitary operation to rotate to
                # the shared eigenbasis of all observables
                for _, term in Ki_matrices:
                    _, S = linalg.eigh(V.conj().T @ term @ V)
                    V = np.round(V @ S, 15)

                V = V.conj().T

                # calculate the eigenvalues for
                # each observable in the shared eigenbasis
                for idx, term in Ki_matrices:
                    eigs = np.diag(V @ term @ V.conj().T).real
                    Ki_ev.append((idx, eigs))

                for idx, term in KiKj_matrices:
                    eigs = np.diag(V @ term @ V.conj().T).real
                    KiKj_ev.append((idx, eigs))

            self._metric_tensor_subcircuits[param_idx] = {
                "queue": queue,
                "observable": obs,
                "Ki_expectations": Ki_ev,
                "KiKj_expectations": KiKj_ev,
                "eigenbasis_matrix": V,
                "result": None,
                "scale": scale,
            }
示例#50
0
def lobpcg(A,
           X,
           B=None,
           M=None,
           Y=None,
           tol=None,
           maxiter=20,
           largest=True,
           verbosityLevel=0,
           retLambdaHistory=False,
           retResidualNormsHistory=False):
    """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)

    LOBPCG is a preconditioned eigensolver for large symmetric positive
    definite (SPD) generalized eigenproblems.

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The symmetric linear operator of the problem, usually a
        sparse matrix.  Often called the "stiffness matrix".
    X : array_like
        Initial approximation to the k eigenvectors. If A has
        shape=(n,n) then X should have shape shape=(n,k).
    B : {dense matrix, sparse matrix, LinearOperator}, optional
        the right hand side operator in a generalized eigenproblem.
        by default, B = Identity
        often called the "mass matrix"
    M : {dense matrix, sparse matrix, LinearOperator}, optional
        preconditioner to A; by default M = Identity
        M should approximate the inverse of A
    Y : array_like, optional
        n-by-sizeY matrix of constraints, sizeY < n
        The iterations will be performed in the B-orthogonal complement
        of the column-space of Y. Y must be full rank.
    tol : scalar, optional
        Solver tolerance (stopping criterion)
        by default: tol=n*sqrt(eps)
    maxiter : integer, optional
        maximum number of iterations
        by default: maxiter=min(n,20)
    largest : bool, optional
        when True, solve for the largest eigenvalues, otherwise the smallest
    verbosityLevel : integer, optional
        controls solver output.  default: verbosityLevel = 0.
    retLambdaHistory : boolean, optional
        whether to return eigenvalue history
    retResidualNormsHistory : boolean, optional
        whether to return history of residual norms

    Returns
    -------
    w : array
        Array of k eigenvalues
    v : array
        An array of k eigenvectors.  V has the same shape as X.
    lambdas : list of arrays, optional
        The eigenvalue history, if `retLambdaHistory` is True.
    rnorms : list of arrays, optional
        The history of residual norms, if `retResidualNormsHistory` is True.

    Examples
    --------

    Solve A x = lambda B x with constraints and preconditioning.

    >>> from scipy.sparse import spdiags, issparse
    >>> from scipy.sparse.linalg import lobpcg, LinearOperator
    >>> n = 100
    >>> vals = [np.arange(n, dtype=np.float64) + 1]
    >>> A = spdiags(vals, 0, n, n)
    >>> A.toarray()
    array([[  1.,   0.,   0., ...,   0.,   0.,   0.],
           [  0.,   2.,   0., ...,   0.,   0.,   0.],
           [  0.,   0.,   3., ...,   0.,   0.,   0.],
           ...,
           [  0.,   0.,   0., ...,  98.,   0.,   0.],
           [  0.,   0.,   0., ...,   0.,  99.,   0.],
           [  0.,   0.,   0., ...,   0.,   0., 100.]])

    Constraints.

    >>> Y = np.eye(n, 3)

    Initial guess for eigenvectors, should have linearly independent
    columns. Column dimension = number of requested eigenvalues.

    >>> X = np.random.rand(n, 3)

    Preconditioner -- inverse of A (as an abstract linear operator).

    >>> invA = spdiags([1./vals[0]], 0, n, n)
    >>> def precond( x ):
    ...     return invA  * x
    >>> M = LinearOperator(matvec=precond, shape=(n, n), dtype=float)

    Here, ``invA`` could of course have been used directly as a preconditioner.
    Let us then solve the problem:

    >>> eigs, vecs = lobpcg(A, X, Y=Y, M=M, largest=False)
    >>> eigs
    array([4., 5., 6.])

    Note that the vectors passed in Y are the eigenvectors of the 3 smallest
    eigenvalues. The results returned are orthogonal to those.

    Notes
    -----
    If both retLambdaHistory and retResidualNormsHistory are True,
    the return tuple has the following format
    (lambda, V, lambda history, residual norms history).

    In the following ``n`` denotes the matrix size and ``m`` the number
    of required eigenvalues (smallest or largest).

    The LOBPCG code internally solves eigenproblems of the size 3``m`` on every
    iteration by calling the "standard" dense eigensolver, so if ``m`` is not
    small enough compared to ``n``, it does not make sense to call the LOBPCG
    code, but rather one should use the "standard" eigensolver,
    e.g. numpy or scipy function in this case.
    If one calls the LOBPCG algorithm for 5``m``>``n``,
    it will most likely break internally, so the code tries to call
    the standard function instead.

    It is not that n should be large for the LOBPCG to work, but rather the
    ratio ``n``/``m`` should be large. It you call LOBPCG with ``m``=1
    and ``n``=10, it works though ``n`` is small. The method is intended
    for extremely large ``n``/``m``, see e.g., reference [28] in
    https://arxiv.org/abs/0705.2626

    The convergence speed depends basically on two factors:

    1. How well relatively separated the seeking eigenvalues are from the rest
       of the eigenvalues. One can try to vary ``m`` to make this better.

    2. How well conditioned the problem is. This can be changed by using proper
       preconditioning. For example, a rod vibration test problem (under tests
       directory) is ill-conditioned for large ``n``, so convergence will be
       slow, unless efficient preconditioning is used. For this specific
       problem, a good simple preconditioner function would be a linear solve
       for A, which is easy to code since A is tridiagonal.

    *Acknowledgements*

    lobpcg.py code was written by Robert Cimrman.
    Many thanks belong to Andrew Knyazev, the author of the algorithm,
    for lots of advice and support.

    References
    ----------
    .. [1] A. V. Knyazev (2001),
           Toward the Optimal Preconditioned Eigensolver: Locally Optimal
           Block Preconditioned Conjugate Gradient Method.
           SIAM Journal on Scientific Computing 23, no. 2,
           pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124

    .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
           (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
           (BLOPEX) in hypre and PETSc. https://arxiv.org/abs/0705.2626

    .. [3] A. V. Knyazev's C and MATLAB implementations:
           https://bitbucket.org/joseroman/blopex
    """
    blockVectorX = X
    blockVectorY = Y
    residualTolerance = tol
    maxIterations = maxiter

    if blockVectorY is not None:
        sizeY = blockVectorY.shape[1]
    else:
        sizeY = 0

    # Block size.
    if len(blockVectorX.shape) != 2:
        raise ValueError('expected rank-2 array for argument X')

    n, sizeX = blockVectorX.shape

    if verbosityLevel:
        aux = "Solving "
        if B is None:
            aux += "standard"
        else:
            aux += "generalized"
        aux += " eigenvalue problem with"
        if M is None:
            aux += "out"
        aux += " preconditioning\n\n"
        aux += "matrix size %d\n" % n
        aux += "block size %d\n\n" % sizeX
        if blockVectorY is None:
            aux += "No constraints\n\n"
        else:
            if sizeY > 1:
                aux += "%d constraints\n\n" % sizeY
            else:
                aux += "%d constraint\n\n" % sizeY
        print(aux)

    A = _makeOperator(A, (n, n))
    B = _makeOperator(B, (n, n))
    M = _makeOperator(M, (n, n))

    if (n - sizeY) < (5 * sizeX):
        # warn('The problem size is small compared to the block size.' \
        #        ' Using dense eigensolver instead of LOBPCG.')

        sizeX = min(sizeX, n)

        if blockVectorY is not None:
            raise NotImplementedError('The dense eigensolver '
                                      'does not support constraints.')

        # Define the closed range of indices of eigenvalues to return.
        if largest:
            eigvals = (n - sizeX, n - 1)
        else:
            eigvals = (0, sizeX - 1)

        A_dense = A(np.eye(n, dtype=A.dtype))
        B_dense = None if B is None else B(np.eye(n, dtype=B.dtype))

        vals, vecs = eigh(A_dense,
                          B_dense,
                          eigvals=eigvals,
                          check_finite=False)
        if largest:
            # Reverse order to be compatible with eigs() in 'LM' mode.
            vals = vals[::-1]
            vecs = vecs[:, ::-1]

        return vals, vecs

    if (residualTolerance is None) or (residualTolerance <= 0.0):
        residualTolerance = np.sqrt(1e-15) * n

    # Apply constraints to X.
    if blockVectorY is not None:

        if B is not None:
            blockVectorBY = B(blockVectorY)
        else:
            blockVectorBY = blockVectorY

        # gramYBY is a dense array.
        gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
        try:
            # gramYBY is a Cholesky factor from now on...
            gramYBY = cho_factor(gramYBY)
        except LinAlgError:
            raise ValueError('cannot handle linearly dependent constraints')

        _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)

    ##
    # B-orthonormalize X.
    blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)

    ##
    # Compute the initial Ritz vectors: solve the eigenproblem.
    blockVectorAX = A(blockVectorX)
    gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)

    _lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
    ii = _get_indx(_lambda, sizeX, largest)
    _lambda = _lambda[ii]

    eigBlockVector = np.asarray(eigBlockVector[:, ii])
    blockVectorX = np.dot(blockVectorX, eigBlockVector)
    blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
    if B is not None:
        blockVectorBX = np.dot(blockVectorBX, eigBlockVector)

    ##
    # Active index set.
    activeMask = np.ones((sizeX, ), dtype=bool)

    lambdaHistory = [_lambda]
    residualNormsHistory = []

    previousBlockSize = sizeX
    ident = np.eye(sizeX, dtype=A.dtype)
    ident0 = np.eye(sizeX, dtype=A.dtype)

    ##
    # Main iteration loop.

    blockVectorP = None  # set during iteration
    blockVectorAP = None
    blockVectorBP = None

    iterationNumber = -1
    while iterationNumber < maxIterations:
        iterationNumber += 1
        if verbosityLevel > 0:
            print('iteration %d' % iterationNumber)

        if B is not None:
            aux = blockVectorBX * _lambda[np.newaxis, :]

        else:
            aux = blockVectorX * _lambda[np.newaxis, :]

        blockVectorR = blockVectorAX - aux

        aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)
        residualNorms = np.sqrt(aux)

        residualNormsHistory.append(residualNorms)

        ii = np.where(residualNorms > residualTolerance, True, False)
        activeMask = activeMask & ii
        if verbosityLevel > 2:
            print(activeMask)

        currentBlockSize = activeMask.sum()
        if currentBlockSize != previousBlockSize:
            previousBlockSize = currentBlockSize
            ident = np.eye(currentBlockSize, dtype=A.dtype)

        if currentBlockSize == 0:
            break

        if verbosityLevel > 0:
            print('current block size:', currentBlockSize)
            print('eigenvalue:', _lambda)
            print('residual norms:', residualNorms)
        if verbosityLevel > 10:
            print(eigBlockVector)

        activeBlockVectorR = _as2d(blockVectorR[:, activeMask])

        if iterationNumber > 0:
            activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
            activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
            if B is not None:
                activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])

        if M is not None:
            # Apply preconditioner T to the active residuals.
            activeBlockVectorR = M(activeBlockVectorR)

        ##
        # Apply constraints to the preconditioned residuals.
        if blockVectorY is not None:
            _applyConstraints(activeBlockVectorR, gramYBY, blockVectorBY,
                              blockVectorY)

        ##
        # B-orthonormalize the preconditioned residuals.

        aux = _b_orthonormalize(B, activeBlockVectorR)
        activeBlockVectorR, activeBlockVectorBR = aux

        activeBlockVectorAR = A(activeBlockVectorR)

        if iterationNumber > 0:
            if B is not None:
                aux = _b_orthonormalize(B,
                                        activeBlockVectorP,
                                        activeBlockVectorBP,
                                        retInvR=True)
                activeBlockVectorP, activeBlockVectorBP, invR = aux
                activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)

            else:
                aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)
                activeBlockVectorP, _, invR = aux
                activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)

        ##
        # Perform the Rayleigh Ritz Procedure:
        # Compute symmetric Gram matrices:

        if B is not None:
            xaw = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
            waw = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
            xbw = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)

            if iterationNumber > 0:
                xap = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
                wap = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
                pap = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
                xbp = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
                wbp = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)

                gramA = np.bmat([[np.diag(_lambda), xaw, xap],
                                 [xaw.T.conj(), waw, wap],
                                 [xap.T.conj(),
                                  wap.T.conj(), pap]])

                gramB = np.bmat([[ident0, xbw, xbp],
                                 [xbw.T.conj(), ident, wbp],
                                 [xbp.T.conj(),
                                  wbp.T.conj(), ident]])
            else:
                gramA = np.bmat([[np.diag(_lambda), xaw], [xaw.T.conj(), waw]])
                gramB = np.bmat([[ident0, xbw], [xbw.T.conj(), ident]])

        else:
            xaw = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
            waw = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
            xbw = np.dot(blockVectorX.T.conj(), activeBlockVectorR)

            if iterationNumber > 0:
                xap = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
                wap = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
                pap = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
                xbp = np.dot(blockVectorX.T.conj(), activeBlockVectorP)
                wbp = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorP)

                gramA = np.bmat([[np.diag(_lambda), xaw, xap],
                                 [xaw.T.conj(), waw, wap],
                                 [xap.T.conj(),
                                  wap.T.conj(), pap]])

                gramB = np.bmat([[ident0, xbw, xbp],
                                 [xbw.T.conj(), ident, wbp],
                                 [xbp.T.conj(),
                                  wbp.T.conj(), ident]])
            else:
                gramA = np.bmat([[np.diag(_lambda), xaw], [xaw.T.conj(), waw]])
                gramB = np.bmat([[ident0, xbw], [xbw.T.conj(), ident]])

        if verbosityLevel > 0:
            _report_nonhermitian(gramA, 3, -1, 'gramA')
            _report_nonhermitian(gramB, 3, -1, 'gramB')

        if verbosityLevel > 10:
            _save(gramA, 'gramA')
            _save(gramB, 'gramB')

        # Solve the generalized eigenvalue problem.
        _lambda, eigBlockVector = eigh(gramA, gramB, check_finite=False)
        ii = _get_indx(_lambda, sizeX, largest)

        if verbosityLevel > 10:
            print(ii)
            print(_lambda)

        _lambda = _lambda[ii]
        eigBlockVector = eigBlockVector[:, ii]

        lambdaHistory.append(_lambda)

        if verbosityLevel > 10:
            print('lambda:', _lambda)
#         # Normalize eigenvectors!
#         aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
#         eigVecNorms = np.sqrt( aux )
#         eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis, :]
#         eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )

        if verbosityLevel > 10:
            print(eigBlockVector)

        # Compute Ritz vectors.
        if B is not None:
            if iterationNumber > 0:
                eigBlockVectorX = eigBlockVector[:sizeX]
                eigBlockVectorR = eigBlockVector[sizeX:sizeX +
                                                 currentBlockSize]
                eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]

                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
                pp += np.dot(activeBlockVectorP, eigBlockVectorP)

                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
                app += np.dot(activeBlockVectorAP, eigBlockVectorP)

                bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
                bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
            else:
                eigBlockVectorX = eigBlockVector[:sizeX]
                eigBlockVectorR = eigBlockVector[sizeX:]

                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
                bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)

            if verbosityLevel > 10:
                print(pp)
                print(app)
                print(bpp)

            blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
            blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
            blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp

            blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp

        else:
            if iterationNumber > 0:
                eigBlockVectorX = eigBlockVector[:sizeX]
                eigBlockVectorR = eigBlockVector[sizeX:sizeX +
                                                 currentBlockSize]
                eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]

                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
                pp += np.dot(activeBlockVectorP, eigBlockVectorP)

                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
                app += np.dot(activeBlockVectorAP, eigBlockVectorP)
            else:
                eigBlockVectorX = eigBlockVector[:sizeX]
                eigBlockVectorR = eigBlockVector[sizeX:]

                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
                app = np.dot(activeBlockVectorAR, eigBlockVectorR)

            if verbosityLevel > 10:
                print(pp)
                print(app)

            blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
            blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app

            blockVectorP, blockVectorAP = pp, app

    if B is not None:
        aux = blockVectorBX * _lambda[np.newaxis, :]

    else:
        aux = blockVectorX * _lambda[np.newaxis, :]

    blockVectorR = blockVectorAX - aux

    aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)
    residualNorms = np.sqrt(aux)

    if verbosityLevel > 0:
        print('final eigenvalue:', _lambda)
        print('final residual norms:', residualNorms)

    if retLambdaHistory:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
        else:
            return _lambda, blockVectorX, lambdaHistory
    else:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, residualNormsHistory
        else:
            return _lambda, blockVectorX
示例#51
0
 def eigenvalues(k):
     h = h0 + h1 * np.exp(1j * k) + h1t * np.exp(-1j * k)
     return eigh(h, eigvals_only=True)
示例#52
0
def RfBandStructure(omega,
                    delta,
                    epsilon,
                    kList=np.linspace(-2.0, 2.0, 600),
                    plot=True):
    Energies = np.zeros((kList.size, 3))
    mF = np.array([-1.0, 0.0, 1.0])
    magnetization = np.zeros((kList.size, 3))
    for ind, k in enumerate(kList):
        H = RfHamiltonian(k, omega, delta, epsilon)
        EigE, EigV = sLA.eigh(H)
        sort = np.argsort(EigE)
        Esorted, eVsorted = EigE[sort], EigV[:, sort]
        Energies[ind] = Esorted
        magnetization[ind] = np.dot(mF, eVsorted * np.conjugate(eVsorted))

    if plot:
        figure = plt.figure()
        pan = figure.add_subplot(1, 1, 1)
        pan.set_title('Omega = ' + str(omega) + ' E_L/hbar, delta = ' +
                      str(delta) + ' E_L/hbar, epsilon = ' + str(epsilon) +
                      ' E_L')
        for i in range(3):
            pan.scatter(kList,
                        Energies[:, i],
                        c=magnetization[:, i],
                        vmin=-1,
                        vmax=1,
                        cmap='jet',
                        marker='.')
        pan.set_xlabel(r'q [$k_R$]')

    return Energies, magnetization


#roiList=[[545, 585, 390, 430], [545, 585, 440, 480] ,[545, 585,490,530]]
##
#filestart=34
#filestop=63
#fileroot = 'C:/Users/swooty/Documents/Thesis Data/2017Jan27 rf calibrations/PIXIS_27Jan2017'
##counts, fractions, waveDict, probeAvg = readIgor.batchCountMultipleROIs(fileroot,filestart,filestop,roiList,bgndLoc='top')
#tList=waveDict['pulseDelay']
#tRecoils = tList*Erecoil/hbar
#fractions=np.array(fractions)
#
#a=np.array(tRecoils)
#
#popt,pcov = optimize.curve_fit(propagateRfHamiltonian,a,fractions.flatten(), p0=(4.2,0.0))
#print popt,pcov
#tForFit=np.linspace(np.min(tRecoils),np.max(tRecoils),200)
#pops_fitted=propagateRfHamiltonian(tForFit,*popt)
#sort=np.argsort(tRecoils)
#tSorted=tRecoils[sort]
#pop0 = np.array([pops_fitted[i*3] for i in range(tForFit.size)])
#pop1 = np.array([pops_fitted[i*3+1] for i in range(tForFit.size)])
#pop2 = np.array([pops_fitted[i*3+2] for i in range(tForFit.size)])
#
#figure=plt.figure()
#panel=figure.add_subplot(1,1,1)
#panel.set_title(r'$\Omega$ = ' + str(np.round(popt[0],2)) + r' $E_L/\hbar$, $\delta$ = '+str(np.round(popt[1],3))+r' $E_L/\hbar$')#, epsilon = ' + str(np.round(popt[2],3))+ ' Er')
#panel.plot(tList*1e6,fractions[:,0],'ro', label='mF=-1') #tRecoils*hbar*1e6/Erecoil
#panel.plot(tList*1e6,fractions[:,1],'go', label='mF=0')
#panel.plot(tList*1e6,fractions[:,2],'bo', label='mF=+1')
#panel.plot(tForFit*hbar*1e6/Erecoil,pop0,'r-')
#panel.plot(tForFit*hbar*1e6/Erecoil,pop1,'g-')
#panel.plot(tForFit*hbar*1e6/Erecoil,pop2,'b-')
#panel.set_xlabel(r'pulse time [$\mu s$]')
#legend()
示例#53
0
# Get integrals from files
Vnn, Vne, T, S, ERI = getIntegrals(mol)

# Number of basis functions dim
dim = len(S)

# Build Core Hamiltonian
h = T + Vne

# Set up initial Fock with core guess, and density at 0
F = h
P = np.zeros((dim,dim))
C = np.zeros((dim,dim))

# Form transformation matrix
s, Y = eigh(S)
s = np.diag(s**(-0.5))
X = np.dot(Y, np.dot(s, Y.T))

# Form transformation matrix
s, Y = eigh(S)
s = np.diag(s**(-0.5))
X = np.dot(Y, np.dot(s, Y.T))

# Initialize variables
delta = 1.0
conver = 1.0e-10
count = 0

# Start main SCF loop
while delta > conver and count < 256:
示例#54
0
    def __init__(self, unitary_matrix):
        """The flip into the Weyl Chamber is described in B. Kraus and J. I. Cirac,
        Phys. Rev. A 63, 062309 (2001).

        FIXME: There's a cleaner-seeming method based on choosing branch cuts carefully, in
        Andrew M. Childs, Henry L. Haselgrove, and Michael A. Nielsen, Phys. Rev. A 68, 052311,
        but I wasn't able to get that to work.

        The overall decomposition scheme is taken from Drury and Love, arXiv:0806.4015 [quant-ph].
        """
        pi2 = np.pi / 2
        pi4 = np.pi / 4

        # Make U be in SU(4)
        U = unitary_matrix.copy()
        U *= la.det(U)**(-0.25)

        Up = _Bd.dot(U).dot(_B)
        M2 = Up.T.dot(Up)

        # M2 is a symmetric complex matrix. We need to decompose it as M2 = P D P^T where
        # P ∈ SO(4), D is diagonal with unit-magnitude elements.
        # D, P = la.eig(M2)  # this can fail for certain kinds of degeneracy
        for _ in range(100):  # FIXME: this randomized algorithm is horrendous
            M2real = np.random.randn() * M2.real + np.random.randn() * M2.imag
            _, P = la.eigh(M2real)
            D = P.T.dot(M2).dot(P).diagonal()
            if np.allclose(P.dot(np.diag(D)).dot(P.T),
                           M2,
                           rtol=1.0e-13,
                           atol=1.0e-13):
                break
        else:
            raise QiskitError(
                "TwoQubitWeylDecomposition: failed to diagonalize M2")

        d = -np.angle(D) / 2
        d[3] = -d[0] - d[1] - d[2]
        cs = np.mod((d[:3] + d[3]) / 2, 2 * np.pi)

        # Reorder the eigenvalues to get in the Weyl chamber
        cstemp = np.mod(cs, pi2)
        np.minimum(cstemp, pi2 - cstemp, cstemp)
        order = np.argsort(cstemp)[[1, 2, 0]]
        cs = cs[order]
        d[:3] = d[order]
        P[:, :3] = P[:, order]

        # Fix the sign of P to be in SO(4)
        if np.real(la.det(P)) < 0:
            P[:, -1] = -P[:, -1]

        # Find K1, K2 so that U = K1.A.K2, with K being product of single-qubit unitaries
        K1 = _B.dot(Up).dot(P).dot(np.diag(np.exp(1j * d))).dot(_Bd)
        K2 = _B.dot(P.T).dot(_Bd)

        K1l, K1r = decompose_two_qubit_product_gate(K1)
        K2l, K2r = decompose_two_qubit_product_gate(K2)

        K1l = K1l.copy()

        # Flip into Weyl chamber
        if cs[0] > pi2:
            cs[0] -= 3 * pi2
            K1l = K1l.dot(_ipy)
            K1r = K1r.dot(_ipy)
        if cs[1] > pi2:
            cs[1] -= 3 * pi2
            K1l = K1l.dot(_ipx)
            K1r = K1r.dot(_ipx)
        conjs = 0
        if cs[0] > pi4:
            cs[0] = pi2 - cs[0]
            K1l = K1l.dot(_ipy)
            K2r = _ipy.dot(K2r)
            conjs += 1
        if cs[1] > pi4:
            cs[1] = pi2 - cs[1]
            K1l = K1l.dot(_ipx)
            K2r = _ipx.dot(K2r)
            conjs += 1
        if cs[2] > pi2:
            cs[2] -= 3 * pi2
            K1l = K1l.dot(_ipz)
            K1r = K1r.dot(_ipz)
        if conjs == 1:
            cs[2] = pi2 - cs[2]
            K1l = K1l.dot(_ipz)
            K2r = _ipz.dot(K2r)
        if cs[2] > pi4:
            cs[2] -= pi2
            K1l = K1l.dot(_ipz)
            K1r = K1r.dot(_ipz)
        self.a = cs[1]
        self.b = cs[0]
        self.c = cs[2]
        self.K1l = K1l
        self.K1r = K1r
        self.K2l = K2l
        self.K2r = K2r
示例#55
0
def HF(N,
       K,
       S,
       Hc,
       G,
       Vnn,
       oS,
       SCF_MAX_iteration,
       SCF_ERROR,
       debug=0,
       HFArchive=None):
    '''
    oS = 1 RHF
    oS = 2 UHF
    '''
    print('\n========== Begin {} =========='.format(['RHF', 'UHF'][oS - 1]))

    if oS == 2 and len(N) == 1:
        N = [N[0] - N[0] // 2, N[0] // 2]
    elif oS == 1 and len(N) == 1:
        N = [N[0] // 2]
    else:
        raise RuntimeError()

    print('\nElectrons: ' + str(N) + '\n')

    X = linalg.sqrtm(linalg.inv(S))
    C = np.zeros((oS, K, K))
    P = np.zeros((oS, K, K))

    inisial_gasse(oS, K, P)

    E = 0.0
    count = 0

    t = timer()
    for iteration in range(SCF_MAX_iteration):
        E_old = E
        E = 0.0
        F = np.zeros((oS, K, K))
        nP = np.zeros((oS, K, K))
        for s in range(oS):
            nE = 0.0
            for i in range(K):
                for j in range(K):
                    F[s, i, j] = Hc[i, j]
                    for k in range(K):
                        for l in range(K):
                            F[s, i, j] -= 1 / 2 * P[s, k, l] * G[i, k, j, l]
                            for t in range(oS):
                                F[s, i,
                                  j] += 1 / oS * P[t, k, l] * G[i, j, k, l]

            Fp = X.T @ F[s] @ X
            e, Cp = linalg.eigh(Fp)
            C[s] = X @ Cp

            for i in range(K):
                for j in range(K):
                    for a in range(N[s]):
                        nP[s, i, j] += C[s, i, a] * C[s, j, a] * 2 / oS

            P[s] = nP[s] * Rate + P[s] * (1 - Rate)

            if P.any() > 2:
                print('Warning: Density Matrix Overflow')
            # print('\nDensity Matrix (iteration {:2d})'.format(count))
            # print(str(P))

            for i in range(K):
                for j in range(K):
                    nE += 1 / 2 * P[s, j, i] * (Hc[i, j] + F[s, i, j])
            # print('s: ', s, 'E: ', nE)
            E += nE

        if count % 10 == 0:
            t, dt = timer(), timer() - t
            print('E (iteration {:2d}) = {:12.6f} \t 10 iteration in {:.4f} s'.
                  format(count, E, dt))
        if (abs(E - E_old) < SCF_ERROR) and (iteration > 0):
            print('\n====== SCP converged in {} steps ======'.format(count))
            print('\nE = Eel + Vnn')
            print('  = {:.6f} + {:.6f}'.format(E, Vnn))
            print('  = {:.6f} Hartrees ({} iterations)\n'.format(
                E + Vnn, count))
            # P_ = np.zeros((K,K))
            # for s in range(oS):
            #     P_ += P[s,:,:]
            # print( P_ )
            print('e: ', e)
            # print(str(P))
            if HFArchive:
                HFArchive.e = e
                HFArchive.X = X
                HFArchive.C = C
                HFArchive.F = F
            return E + Vnn

        count += 1

    print('!!!SCF iteration does not converge!!!')
    return -1
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
                       random_state=None, eigen_tol=0.0,
                       norm_laplacian=True, drop_first=True):
    """Project the sample on the first eigenvectors of the graph Laplacian.

    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigenvectors associated to the
    smallest eigenvalues) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.

    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).

    However care must taken to always make the affinity matrix symmetric
    so that the eigenvector decomposition works as expected.

    Note : Laplacian Eigenmaps is the actual algorithm implemented here.

    Read more in the :ref:`User Guide <spectral_embedding>`.

    Parameters
    ----------
    adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.

    n_components : integer, optional, default 8
        The dimension of the projection subspace.

    eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities.

    random_state : int, RandomState instance or None, optional, default: None
        A pseudo random number generator used for the initialization of the
        lobpcg eigenvectors decomposition.  If int, random_state is the seed
        used by the random number generator; If RandomState instance,
        random_state is the random number generator; If None, the random number
        generator is the RandomState instance used by `np.random`. Used when
        ``solver`` == 'amg'.

    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.

    drop_first : bool, optional, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.

    norm_laplacian : bool, optional, default=True
        If True, then compute normalized Laplacian.

    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.

    Notes
    -----
    Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
    has one connected component. If there graph has many components, the first
    few eigenvectors will simply uncover the connected components of the graph.

    References
    ----------
    * https://en.wikipedia.org/wiki/LOBPCG

    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      http://dx.doi.org/10.1137%2FS1064827500366124
    """
    adjacency = check_symmetric(adjacency)

    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
                             "not available.")

    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'"
                         % eigen_solver)

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1

    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")

    laplacian, dd = sparse.csgraph.laplacian(adjacency, normed=norm_laplacian,
                                             return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
       (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
        # for details see the source code in scipy:
        # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
        # /lobpcg/lobpcg.py#L237
        # or matlab:
        # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        try:
            # We are computing the opposite of the laplacian inplace so as
            # to spare a memory allocation of a possibly very large array
            laplacian *= -1
            v0 = random_state.uniform(-1, 1, laplacian.shape[0])
            lambdas, diffusion_map = eigsh(laplacian, k=n_components,
                                           sigma=1.0, which='LM',
                                           tol=eigen_tol, v0=v0)
            embedding = diffusion_map.T[n_components::-1] * dd
        except RuntimeError:
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"
            # Revert the laplacian to its opposite to have lobpcg work
            laplacian *= -1

    if eigen_solver == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        if not sparse.issparse(laplacian):
            warnings.warn("AMG works better for sparse matrices")
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian, dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)
        ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1:
            raise ValueError

    elif eigen_solver == "lobpcg":
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian, dtype=np.float64,
                                accept_sparse=True)
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lobpcg has problems with small
            # number of nodes
            # lobpcg will fallback to eigh, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.toarray()
            lambdas, diffusion_map = eigh(laplacian)
            embedding = diffusion_map.T[:n_components] * dd
        else:
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
                                            largest=False, maxiter=2000)
            embedding = diffusion_map.T[:n_components] * dd
            if embedding.shape[0] == 1:
                raise ValueError

    embedding = _deterministic_vector_sign_flip(embedding)
    if drop_first:
        return embedding[1:n_components].T
    else:
        return embedding[:n_components].T
示例#57
0
    for dd in range(3):
        axs[dd].plot(markers[:, 3 * mm + dd], color=color_list[mm])

# Set up model
ds = 2 * d
do = d

# Initial estimate
est_params = dict()
Imat = np.identity(d)
Zmat = np.zeros((d, d))

est_params['F'] = np.vstack((np.hstack((Imat, Imat)), np.hstack((Zmat, Imat))))
est_params['Q'] = 0.001 * np.vstack((np.hstack(
    (Imat, Imat)), np.hstack((Imat, Imat))))
est_params['val'], est_params['vec'] = la.eigh(est_params['Q'])
est_params['rank'] = np.array([ds])
est_params['H'] = np.hstack((np.identity(d), np.zeros((d, d))))
est_params['R'] = 0.001 * np.identity(d)

prior = GaussianDensity(np.zeros(ds), 1000 * np.identity(ds))
est_degenerate_model = DegenerateLinearModel(ds, do, prior, est_params)
est_basic_model = BasicLinearModel(ds, do, prior, est_params)
est_naive_model = BasicLinearModel(ds, do, prior, est_params)

# Hyperparameters
hyperparams = dict()
hyperparams['rPsi0'] = 0.001 * np.identity(ds)
hyperparams['M0'] = np.zeros((ds, ds))
hyperparams['V0'] = 100 * np.identity(ds)
hyperparams['a0'] = 1
示例#58
0
ma = 200
ka = ma * w_w1**2
ca = 0
mb = 200
kb = mb * w_w2**2
cb = 0
wna = sqrt(ka / ma)

M = np.array([[m1, 0, 0, 0], [0, ma, 0, 0], [0, 0, m2, 0], [0, 0, 0, mb]])
C = np.array([[c1 + c2 + ca, -ca, -c2, 0], [-ca, ca, 0, 0],
              [-c2, 0, c2 + cb, -cb], [0, 0, -cb, cb]])
K = np.array([[k1 + k2 + ka, -ka, -k2, 0], [-ka, ka, 0, 0],
              [-k2, 0, k2 + kb, -kb], [0, 0, -kb, kb]])

### Natural modes & Natural frequencies:
evals, evecs = eigh(K, M)
wn1 = np.sqrt(evals[0])
wn2 = np.sqrt(evals[1])
wn3 = np.sqrt(evals[2])
wn4 = np.sqrt(evals[3])

#************************************************************************************************

## Simulation:

dt = 1 / (8 * freq_w1)  # Time step
end_time = 1  # Finish time
time = np.arange(0, end_time, dt)  # time range

### Initial Conditions:
x1o = 0.0
示例#59
0
    def profile_g_properties(
        self,
        gmes: TimeInvariantSolution,
        gmeq: EquationsGeodesic,
        sub: Dict,
        name: str,
        fig_size: Optional[Tuple[float, float]] = None,
        dpi: Optional[int] = None,
        y_limits: Optional[Tuple[Optional[float], Optional[float]]] = None,
        # n_points=121,
        # do_pub_label=False, pub_label='',
        do_gstar: bool = False,
        do_det: bool = False,
        do_eigenvectors: bool = False,
        eta_label_xy: Optional[Tuple[float, float]] = None,
        do_etaxi_label: bool = True,
        legend_loc: str = "lower left",
        # do_mod_v=False,
        # do_recompute=False
        do_pv: bool = False,
    ) -> None:
        r"""
        Plot velocity :math:`\dot{r}` along a ray.

        Args:
            gmes:
                instance of single ray solution class defined in
                :mod:`gme.ode.single_ray`
            gmeq:
                GME model equations class instance defined in
                :mod:`gme.core.equations`
            n_points: sample rate along each curve
        """
        _ = self.create_figure(name, fig_size=fig_size, dpi=dpi)
        y_limits_: Tuple[Optional[float],
                         Optional[float]] = ((None, None)
                                             if y_limits is None else y_limits)
        axes = plt.gca()

        # HACK
        # self.prep_g_arrays(gmes, gmeq, n_points, do_recompute)

        if do_gstar:
            g_matrices_array = self.gstar_matrices_array
        else:
            g_matrices_array = self.g_matrices_array
        x_array = self.x_array
        # t_array  = self.t_array
        rz_array = self.rz_array
        vx_array = self.vx_array
        vz_array = self.vz_array

        if do_gstar:
            # Use of lambdified g matrix here fails for eta=1/4, sin(beta)
            #    for some reason
            # g_matrices_list = [gmeq.gstar_ij_mat_lambdified(x_,vx_,vz_)
            #                for x_,vx_,vz_ in zip(x_array,vx_array,vz_array)]
            g_label = "{g^*}"
            m_label = "co-metric"
            h_label = "H"
            eta_label_xy_: Tuple[float,
                                 float] = ((0.5, 0.2) if eta_label_xy is None
                                           else eta_label_xy)
        else:
            # Use of lambdified g* matrix here fails for eta=1/4, sin(beta)
            #   for some reason
            # g_matrices_list = [gmeq.g_ij_mat_lambdified(x_,vx_,vz_)
            #               for x_,vx_,vz_ in zip(x_array,vx_array,vz_array)]
            g_label = "{g}"
            m_label = "metric"
            h_label = "L"
            eta_label_xy_ = ((0.5,
                              0.85) if eta_label_xy is None else eta_label_xy)
        # g_eigenvalues_array
        #  = np.array([np.real(eig(g_)[0]) for g_ in g_matrices_array])
        # The metric tensor matrices are symmetric therefore Hermitian
        #  so we can use 'eigh'
        # print(f'g_matrices_array = {g_matrices_array}')
        if g_matrices_array is not None:
            g_eigh_array: Optional[List] = [
                eigh(g_) for g_ in g_matrices_array
            ]
            g_det_array = np.array([det(g_) for g_ in g_matrices_array])
        else:
            g_eigh_array = None
            g_det_array = None
        if g_eigh_array is not None:
            g_eigenvalues_array = np.real(
                np.array([g_eigh_[0] for g_eigh_ in g_eigh_array]))
            g_eigenvectors_array = np.real(
                np.array([g_eigh_[1] for g_eigh_ in g_eigh_array]))
        else:
            g_eigenvalues_array = None
            g_eigenvectors_array = None
        if do_eigenvectors and g_eigenvectors_array is not None:
            plt.plot(x_array, rz_array, "0.6", ls="-", lw=3, label=r"ray")
            plt.ylabel(r"Eigenvectors of $" + g_label + "$", fontsize=14)
            arrow_sf = 0.5
            my_arrow_style = mpatches.ArrowStyle.Fancy(
                head_length=0.99 * arrow_sf,
                head_width=0.6 * arrow_sf,
                tail_width=0.01 * arrow_sf,
            )
            step = 8
            off = 0 * step // 2
            ev_sf = 0.04
            zipped_arrays = zip(
                x_array[off::step],
                rz_array[off::step],
                g_eigenvectors_array[off::step],
            )
            for x_, rz_, evs_ in zipped_arrays:
                xy_ = np.array([x_, rz_])
                for pm in (-1, +1):
                    axes.annotate(
                        "",
                        xy=xy_ + pm * evs_[0] * ev_sf,
                        xytext=xy_,
                        arrowprops={
                            "arrowstyle": my_arrow_style,
                            "color": "magenta",
                        },
                    )
                    axes.annotate(
                        "",
                        xy=xy_ + pm * evs_[1] * ev_sf,
                        xytext=xy_,
                        arrowprops={
                            "arrowstyle": my_arrow_style,
                            "color": "DarkGreen",
                        },
                    )
            plt.plot(0, 0, "DarkGreen", ls="-", lw=1.5, label="eigenvector 0")
            plt.plot(0, 0, "magenta", ls="-", lw=1.5, label=r"eigenvector 1")
            axes.set_aspect(1)
        elif do_det and g_det_array is not None:
            plt.plot(
                x_array,
                g_det_array,
                "DarkBlue",
                ls="-",
                lw=1.5,
                label=r"$\det(" + g_label + ")$",
            )
            plt.ylabel(
                r"Det of $" + g_label + "$ (Hessian of $" + h_label + "$)",
                fontsize=14,
            )
        elif do_pv:
            px_array = gmes.px_interp(x_array)
            pz_array = gmes.pz_interp(x_array)
            pv_array = px_array * vx_array + pz_array * vz_array
            plt.plot(x_array, pv_array, "r", ls="-", lw=2, label=r"$p_i v^i$")
            if self.gstar_matrices_array is not None:
                gstarpp_array = [
                    np.dot(
                        np.dot(gstar_, np.array([px_, pz_])),
                        np.array([px_, pz_]),
                    ) for gstar_, px_, pz_ in zip(self.gstar_matrices_array,
                                                  px_array, pz_array)
                ]
                plt.plot(
                    x_array,
                    gstarpp_array,
                    "0.5",
                    ls="--",
                    lw=3,
                    label=r"$g^j p_j p_j$",
                )
            if self.g_matrices_array is not None:
                gvv_array = [
                    np.dot(np.dot(g_, np.array([vx_, vz_])),
                           np.array([vx_, vz_])) for g_, vx_, vz_ in zip(
                               self.g_matrices_array, vx_array, vz_array)
                ]
                plt.plot(x_array,
                         gvv_array,
                         "k",
                         ls=":",
                         lw=4,
                         label=r"$g_i v^iv^i$")
            plt.ylabel(
                r"Inner product of $\mathbf{\widetilde{p}}$" +
                r" and $\mathbf{{v}}$",
                fontsize=14,
            )
            legend_loc = "upper left"
        elif g_eigenvalues_array is not None:
            (sign_ev0,
             label_ev0) = ((-1,
                            "negative  ") if g_eigenvalues_array[0, 0] < 0 else
                           (1, "positive  "))
            (sign_ev1,
             label_ev1) = ((-1,
                            "negative  ") if g_eigenvalues_array[0, 1] < 0 else
                           (1, "positive  "))
            plt.yscale("log")
            plt.plot(
                x_array,
                sign_ev1 * (g_eigenvalues_array[:, 1]),
                "DarkGreen",
                ls="-",
                lw=1.5,
                label=f"{label_ev1}" + rf"$\lambda_{g_label}(1)$",
            )
            plt.plot(
                x_array,
                sign_ev0 * (g_eigenvalues_array[:, 0]),
                "magenta",
                ls="-",
                lw=1.5,
                label=f"{label_ev0}" + rf"$\lambda_{g_label}(0)$",
            )
            plt.ylabel(
                f"Eigenvalues of {m_label} tensor " + rf"${g_label}$",
                fontsize=12,
            )
        else:
            return

        if do_eigenvectors:
            axes.set_ylim(*y_limits_)
        elif do_det:
            ylim = plt.ylim()
            if ylim[1] < 0:
                axes.set_ylim(ylim[0], 0)
            if ylim[0] > 0:
                axes.set_ylim(0, ylim[1])
            # axes.set_ylim( -(ylim[1]-ylim[0])/20,ylim[1] )
        elif do_pv:
            axes.set_ylim(0, 2)
        plt.grid(True, ls=":")
        plt.xlabel(r"Distance, $x/L_{\mathrm{c}}$  [-]", fontsize=14)
        # axes.set_ylim(ylim[0]*1.1,-0)
        plt.legend(loc=legend_loc, fontsize=12, framealpha=0.95)
        if do_etaxi_label:
            plt.text(
                *eta_label_xy_,
                rf"$\eta={gmeq.eta_}$" + r"$\quad\mathsf{Ci}=$" +
                rf"${round(float(deg(Ci.subs(sub))))}\degree$",
                transform=axes.transAxes,
                horizontalalignment="center",
                verticalalignment="center",
                fontsize=14,
                color="k",
            )
示例#60
0
signal2 = signal2 / 255.0 - 0.5  # uint8 takes values from 0 to 255

# x is our initial data matrix.
x = [signal1, signal2]

# Plot the signals from both sources to show correlations in the data.
plt.figure()
plt.plot(x[0], x[1], '*b')
plt.ylabel('Signal 2')
plt.xlabel('Signal 1')
plt.title("Original data")

# Calculate the covariance matrix of the initial data.
cov = np.cov(x)
# Calculate eigenvalues and eigenvectors of the covariance matrix.
d, E = LA.eigh(cov)
# Generate a diagonal matrix with the eigenvalues as diagonal elements.
D = np.diag(d)

Di = LA.sqrtm(LA.inv(D))
# Perform whitening. xn is the whitened matrix.
xn = np.dot(Di, np.dot(np.transpose(E), x))

# Plot whitened data to show new structure of the data.
plt.figure()
plt.plot(xn[0], xn[1], '*b')
plt.ylabel('Signal 2')
plt.xlabel('Signal 1')
plt.title("Whitened data")

# Perform FOBI.