Пример #1
0
def create_subspace(M, k):
    [size, images] = M.shape
    # calculate the mean
    mean = np.dot(M, np.ones((images, 1), dtype=np.int)) / images
    if images > size:
        covariance = np.dot((M - mean), (M - mean).T)
        [eigenvectors, eigenvalues] = la.eigh(covariance)

    # this should usually be the case since the number of pixels in a picture is probably
    # greater that the number of input pictures so instead of creating a huge Covariance
    # matrix which can be very large we instead calculate the eigenvectors of NxN matrix
    # and then use this to calculate the N eigenvectors of the DxD sized matrix
    else:
        L = np.dot((M - mean).T, (M - mean))
        [eigenvalues, eigenvectors] = la.eigh(L)
        eigenvectors = np.dot((M - mean), eigenvectors)
    # wow python no scoping in loops, it's kinda hard to take you serious as a language sometimes

    # to make the eigenvectors unit length or orthonormal
    for i in range(images):
        eigenvectors[:, i] = eigenvectors[:, i] / la.norm(eigenvectors[:, i])

    sorted_order = np.argsort(eigenvalues)
    sorted_order = np.flipud(sorted_order)

    eigenvalues = eigenvalues[sorted_order]
    eigenvectors = eigenvectors[:, sorted_order]

    principle_eigenvalues = eigenvalues[0:k]
    principle_eigenvectors = eigenvectors[:, 0:k]

    return principle_eigenvalues, principle_eigenvectors, mean
Пример #2
0
    def get_msig_pos( self, sctx, eps_app_eng, *args, **kw ):
        '''
        get biggest positive principle stress
        @param sctx:
        @param eps_app_eng:
        '''
        sig_eng, D_mtx = self.get_corr_pred( sctx, eps_app_eng, 0, 0, 0 )
        ms_vct = zeros( 3 )
        shape = sig_eng.shape[0]
        if shape == 3:
            s_mtx = self.map_sig_eng_to_mtx( sig_eng )
            m_sig, m_vct = linalg.eigh( s_mtx )

            # @todo: - this must be written in a more readable way
            # 
            if m_sig[-1] > 0:
                # multiply biggest positive stress with its vector                
                ms_vct[:2] = m_sig[-1] * m_vct[-1]
        elif shape == 6:
            s_mtx = self.map_sig_eng_to_mtx( sig_eng )
            m_sig = linalg.eigh( s_mtx )
            if m_sig[0][-1] > 0:
                # multiply biggest positive stress with its vector                
                ms_vct = m_sig[0][-1] * m_sig[1][-1]
        return ms_vct
Пример #3
0
 def solve_kernel(self, regparam):
     self.regparam = regparam
     K1, K2 = self.K1, self.K2
     Y = self.Y.reshape((K1.shape[0], K2.shape[0]), order='F')
     #assert self.Y.shape == (self.K1.shape[0], self.K2.shape[0]), 'Y.shape!=(K1.shape[0],K2.shape[0]). Y.shape=='+str(Y.shape)+', K1.shape=='+str(self.K1.shape)+', K2.shape=='+str(self.K2.shape)
     if not self.trained:
         self.trained = True
         evals1, V  = la.eigh(K1)
         evals1 = mat(evals1).T
         V = mat(V)
         self.evals1 = evals1
         self.V = V
         
         evals2, U = la.eigh(K2)
         evals2 = mat(evals2).T
         U = mat(U)
         self.evals2 = evals2
         self.U = U
         self.VTYU = V.T * self.Y * U
     
     newevals = 1. / (self.evals1 * self.evals2.T + regparam)
     
     self.A = multiply(self.VTYU, newevals)
     self.A = self.V * self.A * self.U.T
     self.model = KernelPairwiseModel(self.A)
def get_red_rho_A(Sx,Sz,b,N):
    '''
    Form the reduced ground state density matrix.
    Only defined when N >= 2.
    '''
    
    if N < 2:
        raise Exception("N must be greater or equal to 2.")
    
    H = get_tran_ising_H(Sx,Sz,b,N)
    E,V = eigh(H.toarray())
    
    rho_A_B = np.outer(V[:,0],V[:,0])
    l,basis = eigh(Sz.toarray())
    rho_A_k = rho_A_B
    for k in range(N-1,0,-1):
        rho_A_m = np.zeros([D**k,D**k])
        for m in range(D):
            # Rewrite basis states in the full N space.
            basis_m = get_full_matrix(dok_matrix(basis)[m].transpose(),k,k)
            basis_m = np.kron(np.eye(D),basis_m.toarray())
            
            # Trace the density matrix over the k-th particle.
            rho_A_m += np.dot(np.dot(np.transpose(basis_m),rho_A_k),basis_m)
        rho_A_k = rho_A_m
    rho_A = rho_A_k
    
    return rho_A
Пример #5
0
def chi1d(h,energies = [0.],t=0.0001,delta=0.01,q=0.001,nk=1000,U=None,adaptive=True,ks=None):
  hkgen = h.get_hk_gen() # get the generating hamiltonian
  n = len(h.geometry.x) # initialice response
  m = np.zeros((len(energies),n*n),dtype=np.complex) # initialice
  if not adaptive: # full algorithm  
    if ks==None: ks = np.linspace(0.,1.,nk)  # create klist
    for k in ks:
#      print "Doing k=",k
      hk = hkgen(k) # fist point
      e1,ev1 = lg.eigh(hk) # get eigenvalues
      hk = hkgen(k+q) # second point
      e2,ev2 = lg.eigh(hk) # get eigenvalues
      ct = calculate_xychi(ev1.T,e1,ev2.T,e2,energies,t,delta) # contribution
      m += ct
    m = m/nk # normalice by the number of kpoints  
    ms = [m[i,:].reshape(n,n) for i in range(len(m))] # convert to matrices
  if adaptive: # adaptive algorithm 
    from integration import integrate_matrix
    def get_chik(k): # function which returns a matrix
      """ Get response at a cetain energy"""
      hk = hkgen(k) # first point
      e1,ev1 = lg.eigh(hk) # get eigenvalues
      hk = hkgen(k+q) # second point
      e2,ev2 = lg.eigh(hk) # get eigenvalues
      ct = calculate_xychi(ev1.T,e1,ev2.T,e2,energies,t,delta) # contribution
      return ct # return response
    ms = []
    m = integrate_matrix(get_chik,xlim=[0.,1.],eps=.1,only_imag=False) # add energy
    ms = [m[i,:].reshape(n,n) for i in range(len(m))] # convert to matrices

  if U==None: # raw calculation
    return ms 
  else: # RPA calculation
    return rpachi(ms,U=U)
Пример #6
0
def HEG2(n,V):
    """\
    Does the diagonalization in the discrete variable representation
    """
    X = zeros((n,n),'d')
    for i in range(n):
        if i > 0:
            X[i,i-1] = X[i-1,i] = sqrt(i)/sqrt(2)
    # Eq 1 from HEG
    lam,T = eigh(X)

    KEho = zeros((n,n),'d')
    for i in range(n):
        KEho[i,i] = 0.25*(2*i+1)
        if i > 1:
            KEho[i,i-2] = KEho[i-2,i] = -0.25*sqrt(i-1)*sqrt(i)

    KEx = matmul(transpose(T),matmul(KEho,T))
    #KEx = matmul(T,matmul(KEho,transpose(T)))

    # Form the potential matrix
    # Eq 2 from HEG
    Vx = diag([V(li) for li in lam])

    Hx = KEx + Vx
    print "x\n",lam[:5]
    #from scipy.special.orthogonal import h_roots
    #print h_roots(n)
    
    matprint(KEx,label="T")
    matprint(Vx,label="V")
    matprint(Hx,label="H")
    
    E,U = eigh(Hx)
    return lam,E,U
Пример #7
0
def HEG(n,V):
    X = zeros((n,n),'d')
    for i in range(n):
        if i > 0:
            X[i,i-1] = X[i-1,i] = sqrt(i)/sqrt(2)
    # Eq 1 from HEG
    lam,T = eigh(X)
    print lam

    # Form the potential matrix
    # Eq 2 from HEG
    Vx = [V(li) for li in lam]
    Vho = matmul(T,matmul(diag(Vx),transpose(T)))

    KEho = zeros((n,n),'d')
    for i in range(n):
        KEho[i,i] = 0.25*(2*i+1)
        if i > 1:
            KEho[i,i-2] = KEho[i-2,i] = -0.25*sqrt(i-1)*sqrt(i)
    Hho = KEho+Vho
    Hx = matmul(transpose(T),matmul(Hho,T))
    E,U = eigh(Hho)

    # The eigenvectors are in terms of the HO eigenvectors, so
    # we have to multiply by X before returning
    return lam,E,matmul(transpose(T),U)
Пример #8
0
    def set(self,matrix):
        '''
        Set the basis.

        Parameters
        ----------
        matrix : callable
            The function to get the single particle matrix.
        '''
        Eup,Uup,Edw,Udw=[],[],[],[]
        for k in [()] if self.BZ is None else self.BZ.mesh('k'):
            m=matrix(k)
            es,us=nl.eigh(m[:m.shape[0]//2,:m.shape[0]//2])
            Edw.append(es)
            Udw.append(us)
            es,us=nl.eigh(m[m.shape[0]//2:,m.shape[0]//2:])
            Eup.append(es)
            Uup.append(us)
        Eup,Uup=np.asarray(Eup),np.asarray(Uup).transpose((1,0,2))
        Edw,Udw=np.asarray(Edw),np.asarray(Udw).transpose((1,0,2))
        if self.polarization=='up':
            self._E1_=Edw
            self._E2_=Eup
            self._U1_=Udw
            self._U2_=Uup
        else:
            self._E1_=Eup
            self._E2_=Edw
            self._U1_=Uup
            self._U2_=Udw
Пример #9
0
    def diag_H(self):
        '''Diagonalize the Hamiltonians of spin a and spin b.
        '''
        e_a, w_a = nl.eigh(self.Ha)
        e_b, w_b = nl.eigh(self.Hb)
        e_gs = np.sum(e_a[:self.Na]) + np.sum(e_b[:self.Nb]) + self.C
        tmp_a = (w_a[:, :self.Na].dot(w_a.conj().T[:self.Na, :])).diagonal()
        print "tmpa:", tmp_a
        tmp_b = (w_b[:, :self.Nb].dot(w_b.conj().T[:self.Nb, :])).diagonal()
        print "tmpb:", tmp_b
        def vec_equal(v1, v2, Min = 10e-5):
            a = abs(v1-v2)
            a = (a < Min)*1.
            one = np.ones(len(v1))
            return np.array_equal(a, one)

        not_conv = True
        if vec_equal(tmp_a, self.lattice_a) and vec_equal(tmp_b,\
                self.lattice_b):
            not_conv = False
            return e_gs, not_conv

        self.lattice_a = tmp_a.copy() # update new density
        print "lattice a", self.lattice_a
        self.lattice_b = tmp_b.copy()
        print "lattice b", self.lattice_b
        return e_gs, not_conv #ground state energy
Пример #10
0
    def __calcEigChan(self,A1,G2,Left,channels=10):
        # Calculate Eigenchannels using recipe from PRB
        # For right eigenchannels, A1=A2, G2=G1 !!!
        if isinstance(A1,MM.SpectralMatrix):
            ev, U = LA.eigh(MM.mm(A1.L,A1.R))
        else:
            ev, U = LA.eigh(A1)

        # This small trick will remove all zero contribution vectors
        # and will diagonalize the tt matrix in the subspace where there
        # are values.
        idx = (ev > 0).nonzero()[0]
        ev = N.sqrt(ev[idx] / ( 2 * N.pi ))
        ev.shape = (1, -1)
        Utilde = ev * U[:, idx]
        
        nuo,nuoL,nuoR = self.nuo, self.nuoL, self.nuoR
        if Left:
            tt=MM.mm(MM.dagger(Utilde[nuo-nuoR:nuo,:]),2*N.pi*G2,Utilde[nuo-nuoR:nuo,:])
        else:
            tt=MM.mm(MM.dagger(Utilde[:nuoL,:]),2*N.pi*G2,Utilde[:nuoL,:])

        # Diagonalize (note that this is on a reduced tt matrix (no 0 contributing columns)
        evF, UF = LA.eigh(tt)
        EC = MM.mm(Utilde, UF[:,-channels:]).T
        return EC[::-1, :], evF[::-1] # reverse eigenvalues
Пример #11
0
 def get_chik(k): # function which returns a matrix
   """ Get response at a cetain energy"""
   hk = hkgen(k) # first point
   e1,ev1 = lg.eigh(hk) # get eigenvalues
   hk = hkgen(k+q) # second point
   e2,ev2 = lg.eigh(hk) # get eigenvalues
   ct = calculate_xychi(ev1.T,e1,ev2.T,e2,energies,t,delta) # contribution
   return ct # return response
Пример #12
0
def linear_algebra():
    """ Use the `numpy.linalg` library to do Linear Algebra 
        For a reference on math, see 'Linear Algebra explained in four pages'
        http://minireference.com/static/tutorials/linear_algebra_in_4_pages.pdf
    """

    ### Setup two vectors
    x = np.array([1, 2, 3, 4])
    y = np.array([5, 6, 7, 8])

    ### Vector Operations include addition, subtraction, scaling, norm (length),
    # dot product, and cross product
    print np.vdot(x, y)  # Dot product of two vectors


    ### Setup two arrays / matrices
    a = np.array([[1, 2],
                  [3, 9]])
    b = np.array([[2, 4],
                  [5, 6]])


    ### Dot Product of two arrays
    print np.dot(a, b)


    ### Solving system of equations (i.e. 2 different equations with x and y)
    print LA.solve(a, b)


    ### Inverse of a matrix undoes the effects of the Matrix
    # The matrix multipled by the inverse matrix returns the 
    # 'identity matrix' (ones on the diagonal and zeroes everywhere else); 
    # identity matrix is useful for getting rid of the matrix in some equation
    print LA.inv(a)  # return inverse of the matrix
    print "\n"


    ### Determinant of a matrix is a special way to combine the entries of a
    # matrix that serves to check if matrix is invertible (!=0) or not (=0)
    print LA.det(a)  # returns the determinant of the array
    print "\n"  # e.g. 3, means that it is invertible


    ### Eigenvectors is a special set of input vectors for which the action of
    # the matrix is described as simple 'scaling'.  When a matrix is multiplied
    # by one of its eigenvectors, the output is the same eigenvector multipled
    # by a constant (that constant is the 'eigenvalue' of the matrix)
    print LA.eigvals(a)  # comput the eigenvalues of a general matrix
    print "\n"
    print LA.eigvalsh(a)  # Comput the eigenvalues of a Hermitian or real symmetric matrix
    print "\n"
    print LA.eig(a)  # return the eigenvalues for a square matrix
    print "\n"
    print LA.eigh(a)  # return the eigenvalues or eigenvectors of a Hermitian or symmetric matrix
    print "\n"
Пример #13
0
 def get_chik(k): # function which returns a matrix
   """ Get response at a cetain energy"""
   hk = hkgen(k) # first point
   if collinear: hk = np.matrix([[hk[2*i,2*j] for i in range(len(hk)/2)] for j in range(len(hk)/2)])  # up component
   e1,ev1 = lg.eigh(hk) # get eigenvalues
   hk = hkgen(k+q) # second point
   if collinear: hk = np.matrix([[hk[2*i+1,2*j+1] for i in range(len(hk)/2)] for j in range(len(hk)/2)])  # down component
   e2,ev2 = lg.eigh(hk) # get eigenvalues
   ct = collinear_xychi(ev1.T,e1,ev2.T,e2,energies,t,delta) # contribution
   return ct # return response
Пример #14
0
def energy(s,n = None, scaleEnergy = 1):
  values = []
  if n == None:
    for q in numpy.arange(0,1.+qstep,qstep):
      evalues, evectors = linalg.eigh(ham(q,s,scaleEnergy))
      values.append(evalues[:5])
  else:
    for q in numpy.arange(0,1.+qstep,qstep):
      evalues, evectors = linalg.eigh(ham(q,s,scaleEnergy))
      values.append(evalues[n])
  return values
Пример #15
0
def pca(X, d_prime):
    d,n = X.shape
    # mu: vector promedio
    mu = X.mean(axis=0)
    # Restamos la media 
    for i in range(n):
        X[i] -= mu 
    A = X.copy()

    if d>200 and n<3*d:
        if d_prime > n:
            d_prime = n
        # C: Matriz de covarianzas
        C_prime = 1.0/d * np.dot(A.T,A)
        #Delta=eigenvalues B=eigenvectors
        D_prime,B_prime = la.eigh(C_prime)
        #print "B prime: ", B_prime.shape, "- delta: ",  D_prime.shape

        for i in xrange(n):
            B_prime[:,i] = B_prime[:,i]/np.linalg.norm(B_prime[:,i])

        B = np.dot(A, B_prime)
        D = d/n * D_prime
        #print "B complete: ", B.shape, "- delta: ",  D.shape
        # Ordenamos los vectores propios, primero los que más varianza recogen 
        order = np.argsort(D, axis=0)[::-1] 
        # Ordenamos los vectores propios & los valores propios
        B = B[:,order]
        D = D[order]

    else:
        C = 1.0/n * np.dot(A,A.T)
        D,B = la.eigh(C) 
        # Ordenamos los vectores propios, primero los que más varianza recogen 
        order = np.argsort(D)[::-1] # sorting the eigenvalues
        # Ordenamos los vectores propios & los valores propios
        B = B[:,order]
        D = D[order]

    # B_dprime (d'xn)
    #print "B: ", B.shape, " - ", B[:,:d_prime].shape
    #print "D: ", D.shape
    #print "X: ", X.shape
    #print "d': ",d_prime
    #print "mu: ", mu.shape
    #Proyectamos los datos en d'
    B_dprime = B[:,:d_prime]
    y = np.dot(B_dprime.T,X)
    #print y[0]
    #print 
    #print
    #return ['B_dprime':B_dprime,D,B,mu,X]
    return {'B':B, 'B_dprime':B_dprime,'mu':mu,'y':y}, d_prime
Пример #16
0
def test_eig_vs_eigh_above_560():
        # gh-6896
        N = 560

        A = np.arange(N*N).reshape(N, N)
        A = A + A.T

        w1 = np.sort(linalg.eig(A)[0])
        w2 = np.sort(linalg.eigh(A, UPLO='U')[0])
        w3 = np.sort(linalg.eigh(A, UPLO='L')[0])
        assert_array_almost_equal(w1, w2)
        assert_array_almost_equal(w1, w3)
Пример #17
0
def eig_psd(K):
    """"Returns the reduced eigen decomposition of the kernel matrix K so that only the eigenvectors corresponding to the nonzero eigenvalues are returned.
    
    @param K: a positive semi-definite kernel matrix whose rows and columns are indexed by the datumns.
    @type K: numpy matrix of floats
    @return: the square roots of the nonzero eigenvalues and the corresponding eigenvectors of K. The square roots of the eigenvectors are contained in a r*1-matrix, where r is the number of nonzero eigenvalues. 
    @rtype: a tuple of two numpy matrices"""
    try:
        evals, evecs = la.eigh(K)
    except LinAlgError, e:
        print 'Warning, caught a LinAlgError while eigen decomposing: ' + str(e)
        K = K + np.eye(K.shape[0]) * 0.0000000001
        evals, evecs = la.eigh(K)
Пример #18
0
 def get_chik(k):
   """Function to integrate"""
   hk = hkgen(k) # fist point
   if collinear: hk = np.matrix([[hk[2*i,2*j] for i in range(len(hk)/2)] for j in range(len(hk)/2)])  # up component
   e1,ev1 = lg.eigh(hk) # get eigenvalues
   hk = hkgen(k+q) # second point
   if collinear: hk = np.matrix([[hk[2*i+1,2*j+1] for i in range(len(hk)/2)] for j in range(len(hk)/2)])  # down component
   e2,ev2 = lg.eigh(hk) # get eigenvalues
   if collinear:
     ct = collinear_xychi(ev1.T,e1,ev2.T,e2,energies,t,delta) # contribution
   else:
     ct = calculate_xychi(ev1.T,e1,ev2.T,e2,energies,t,delta) # contribution
   return ct
Пример #19
0
def SVD(mat):
	matT = mat.transpose()
	matmatT = mat.dot(matT)
	matTmat = matT.dot(mat)
	egnvalU, egnvecU = LA.eigh(matmatT)
	egnvalV, egnvecV = LA.eigh(matTmat)
	V = np.fliplr(egnvecV)
	VT = V.transpose()
	egnvalV = egnvalV[::-1]
	S = np.zeros(mat.shape)
	for i in range(min(mat.shape)):
		S[i][i] = math.sqrt(egnvalV[i])
	U = np.dot(np.dot(mat,np.transpose(VT)),LA.pinv(S))
	return U, S, VT
Пример #20
0
def PCA(data_mat, p):
    'reduce the data dimensionality to p, this function will substract the \
    mean from the original data'
    d, N=data_mat.shape
    m=matlib.mean(data_mat, 1)
    data_mat-=m
    if d<N:
        AAT=data_mat*data_mat.T
        w, v=linalg.eigh(AAT)
        return v[:,-p:], m
    else:
        ATA=data_mat.T*data_mat
        w, v=linalg.eigh(ATA)
        return data_mat*v[:, -p:], m
Пример #21
0
 def get_msig_pm( self, sctx, eps_app_eng, *args, **kw ):
     sig_eng, D_mtx = self.get_corr_pred( sctx, eps_app_eng, 0, 0, 0 )
     t_field = zeros( 9 )
     shape = sig_eng.shape[0]
     if shape == 3:
         s_mtx = self.map_sig_eng_to_mtx( sig_eng )
         m_sig = linalg.eigh( s_mtx )
         if m_sig[0][-1] > 0:
             t_field[0] = m_sig[0][-1]#biggest positive stress
     elif shape == 6:
         s_mtx = self.map_sig_eng_to_mtx( sig_eng )
         m_sig = linalg.eigh( s_mtx )
         if m_sig[0][-1] > 0:
             t_field[0] = m_sig[0][-1]
     return t_field
Пример #22
0
    def calculate_edf(self, useibl=True):
        """Calculate the coefficients b_il in the expansion of the EDF.

        ``|phi_l> = sum_i b_il |f^u_i>``, in terms of ``|f^u_i> = P^u|f_i>``.

        To use the infinite band limit set useibl=True.
        N is the total number of bands to use.
        """
        
        for k, L in enumerate(self.L_k):
            if L==0:
                assert L!=0, 'L_k=0 for k=%i. Not implemented' % k
        
        self.Vo_kni = [V_ni[:M] for V_ni, M in zip(self.V_kni, self.M_k)]
        
        self.Fo_kii = np.asarray([np.dot(dagger(Vo_ni), Vo_ni) 
                                  for Vo_ni in self.Vo_kni])
        
        if useibl:
            self.Fu_kii = self.s_lcao_kii - self.Fo_kii
        else:
            self.Vu_kni = [V_ni[M:self.N] 
                           for V_ni, M in zip(self.V_kni, self.M_k)]
            self.Fu_kii = np.asarray([np.dot(dagger(Vu_ni), Vu_ni) 
                                     for Vu_ni in self.Vu_kni])
        self.b_kil = [] 
        for Fu_ii, L in zip(self.Fu_kii, self.L_k):
            b_i, b_ii = la.eigh(Fu_ii)
            ls = b_i.real.argsort()[-L:] 
            b_il = b_ii[:, ls] #pick out the eigenvec with largest eigenvals.
            normalize2(b_il, Fu_ii) #normalize the EDF: <phi_l|phi_l> = 1
            self.b_kil.append(b_il)
Пример #23
0
def get_vdw_potential_hessian(atoms, vdw, spectral=False):

    Bx = np.array([[1, 0, 0, -1, 0, 0], [0, 1, 0, 0, -1, 0], [0, 0, 1, 0, 0, -1]])

    i = vdw.atomi
    j = vdw.atomj

    rij = rel_pos_pbc(atoms, i, j)
    dij = linalg.norm(rij)
    eij = rij / dij

    Pij = np.tensordot(eij, eij, axes=0)
    Qij = np.eye(3) - Pij

    Hr = (156.0 * vdw.Aij / dij ** 14 - 42.0 * vdw.Bij / dij ** 8) * Pij + (
        -12.0 * vdw.Aij / dij ** 13 + 6.0 * vdw.Bij / dij ** 7
    ) / dij * Qij

    Hx = np.dot(Bx.T, np.dot(Hr, Bx))

    if spectral:
        eigvals, eigvecs = linalg.eigh(Hx)
        D = np.diag(np.abs(eigvals))
        U = eigvecs
        Hx = np.dot(U, np.dot(D, np.transpose(U)))

    vdw.r = dij

    return i, j, Hx
Пример #24
0
def mpower(M, p):
    """
    Matrix exponentiation, works for Hermitian matrices
    """
    e,EV = linalg.eigh(M)
    return dot(transp(EV),
               dot(diag((e+0j)**p), EV))
Пример #25
0
def get_coulomb_potential_hessian(atoms, coulomb, spectral=False):

    Bx = np.array([[1, 0, 0, -1, 0, 0], [0, 1, 0, 0, -1, 0], [0, 0, 1, 0, 0, -1]])

    i = coulomb.atomi
    j = coulomb.atomj

    rij = rel_pos_pbc(atoms, i, j)
    dij = linalg.norm(rij)
    eij = rij / dij

    Pij = np.tensordot(eij, eij, axes=0)
    Qij = np.eye(3) - Pij

    Hr = (2.0 * coulomb.chargeij / dij ** 3) * Pij + (-coulomb.chargeij / dij / dij) / dij * Qij

    Hx = np.dot(Bx.T, np.dot(Hr, Bx))

    if spectral:
        eigvals, eigvecs = linalg.eigh(Hx)
        D = np.diag(np.abs(eigvals))
        U = eigvecs
        Hx = np.dot(U, np.dot(D, np.transpose(U)))

    coulomb.r = dij

    return i, j, Hx
Пример #26
0
 def music_3d(self,complexList, f):
     #Frequency, f in Hertz
     gamma = [-45, 45, 135, 225]
     v = 1500
     lamda = v / f
     d = 0.015
     r = math.sqrt(2 * math.pow(d / 2, 2))
     A = zeros((4, 1), dtype=complex)
     pmusic = zeros((360, 90))
     
     (eigval, eigvec) = LA.eigh(self.computeCovarianceMatrix(complexList))
     Vn = eigvec[:, 0:3]
     
     for theta in range(90):		#Theta is altitude
         for phi in range(360):	#Phi is azimuth
             for i in range(4):	#Hydrophone positions
                 pd = 2*math.pi/lamda*r*math.sin(np.deg2rad(theta))*math.cos(np.deg2rad(phi - gamma[i]))
                 A[i] = cmath.exp((1j)*pd)
         
             Ahat = np.matrix(A)
             num = Ahat.T.conj() * Ahat
             denom = (Ahat.T.conj() * Vn) * (Vn.T.conj() * Ahat)
             pmusic[phi, theta] = num.real / denom.real
             #plot(pmusic[:,theta],theta)
             #writeToFile('pmusic.txt',pmusic[:,theta],theta)
     
     [Music_phiCap,Music_thetaCap] = self.getMax(pmusic)
     rospy.loginfo("Music DOA calculated: " + str(Music_phiCap))    
     rospy.loginfo("Music elevation calculated: " + str(Music_thetaCap))	
     return [Music_phiCap,Music_thetaCap]
Пример #27
0
def get_morse_potential_hessian(atoms, morse, spectral=False):

    Mx = np.array([[1, 0, 0, -1, 0, 0], [0, 1, 0, 0, -1, 0], [0, 0, 1, 0, 0, -1]])

    i = morse.atomi
    j = morse.atomj

    rij = rel_pos_pbc(atoms, i, j)
    dij = linalg.norm(rij)
    eij = rij / dij

    Pij = np.tensordot(eij, eij, axes=0)
    Qij = np.eye(3) - Pij

    exp = np.exp(-morse.alpha * (dij - morse.r0))

    Hr = 2.0 * morse.D * morse.alpha * exp * (morse.alpha * (2.0 * exp - 1.0) * Pij + (1.0 - exp) / dij * Qij)

    Hx = np.dot(Mx.T, np.dot(Hr, Mx))

    if spectral:
        eigvals, eigvecs = linalg.eigh(Hx)
        D = np.diag(np.abs(eigvals))
        U = eigvecs
        Hx = np.dot(U, np.dot(D, np.transpose(U)))

    morse.r = dij

    return i, j, Hx
Пример #28
0
def whiten(df, epsilon=1E-5):
    """
    Recibe una matrix de datos en filas de un pandas DataFrame.
    Se asume que los datos estan normalizados.
    """
    from numpy import dot, sqrt, diag
    from numpy.linalg import eigh

    # covariance matrix
    Xcov = dot(df.T, df) / df.shape[1]

    # eigenvalue decomposition of the covariance matrix
    d, V = eigh(Xcov)

    # an epsilon factor can be used so that eigenvectors associated with
    # small eigenvalues do not get overamplified
    D = diag(1./sqrt(d + epsilon))

    # whitening matrix
    W = dot(dot(V, D), V.T)

    # multiply by the whitening matrix
    X = dot(df, W)

    return X, W
	def fit(self, X):
		if self.components==0:
			self.components=X.shape[0]

		self.X = X
		N = X.shape[0]
		K = np.zeros((N,N))
		for row in range(N):
			for col in range(N):
				K[row,col] = self._kernel_func(X[row,:], X[col,:])

		self._K_sum = np.sum(K)
		self._K_cached_sumcols = np.sum(K, axis=0)
		K_c = K - repmat(np.reshape(np.sum(K, axis=1), (N,1)), 1, N)/N - repmat(self._K_cached_sumcols, N, 1)/N + self._K_sum/N**2

		# kernel matrix must be symmetric, so using symmetric matrix eigenvalue solver
		self._eigenvalues, self._eigenvectors = eigh(K_c)
		self._eigenvalues = np.real(self._eigenvalues)
		self._eigenvectors = np.real(self._eigenvectors)
		key = np.argsort(self._eigenvalues)
		key = key[::-1]
		self._eigenvalues = self._eigenvalues[key]
		self._eigenvectors = self._eigenvectors[:,key]
		self.X = self.X[key,:]
		self._K_cached_sumcols = self._K_cached_sumcols[key]
Пример #30
0
def _pca1 (X, verbose=False):
    """
    Simple principal component decomposition (PCA)
    X as Npix by Nt matrix
    X should be normalized and centered beforehand

    returns:
    - EV (Nt by Nesq:esq>0), matrix of PC 'signals'
     (eigenvalues of temporal covariance matrix). Signals are in columns
    - esq, vector of eigenvalues
    """
    print "Please don't use this, it's not ready"
    #return
    n_data, n_dimension = X.shape # (m x n)
    Y = X - X.mean(axis=0)[np.newaxis,:] # remove mean
    #C = dot(Y, Y.T) # (n x n)  covariance matrix
    C = dot(Y.T, Y)
    print C.shape
    es, EV = eigh(C)  # eigenvalues, eigenvectors

    ## take non-negative eigenvalues
    non_neg, = where(es>=0)
    neg = where(es<0)
    if len(neg)>0:
        if verbose:
            print "pca1: Warning, C have %d negative eigenvalues" %len(neg)
        es = es[non_neg]
        EV = EV[:,non_neg]
    #tmp = dot(Y.T, EV).T
    #V1 = tmp[::-1]
    #S1 = sqrt(es)[::-1]
    return EV
Пример #31
0
def svd_wrapper(matrix,
                mode,
                ncomp,
                debug,
                verbose,
                usv=False,
                random_state=None,
                to_numpy=True):
    """ Wrapper for different SVD libraries (CPU and GPU). 
      
    Parameters
    ----------
    matrix : array_like, 2d
        2d input matrix.
    mode : {'lapack', 'arpack', 'eigen', 'randsvd', 'cupy', 'eigencupy',
            'randcupy', 'pytorch', 'eigenpytorch', 'randpytorch'}, str optional
        Switch for the SVD method/library to be used. ``lapack`` uses the LAPACK 
        linear algebra library through Numpy and it is the most conventional way 
        of computing the SVD (deterministic result computed on CPU). ``arpack`` 
        uses the ARPACK Fortran libraries accessible through Scipy (computation
        on CPU). ``eigen`` computes the singular vectors through the 
        eigendecomposition of the covariance M.M' (computation on CPU).
        ``randsvd`` uses the randomized_svd algorithm implemented in Sklearn 
        (computation on CPU). ``cupy`` uses the Cupy library for GPU computation
        of the SVD as in the LAPACK version. ``eigencupy`` offers the same 
        method as with the ``eigen`` option but on GPU (through Cupy). 
        ``randcupy`` is an adaptation of the randomized_svd algorithm, where all
        the computations are done on a GPU (through Cupy). ``pytorch`` uses the
        Pytorch library for GPU computation of the SVD. ``eigenpytorch`` offers
        the same method as with the ``eigen`` option but on GPU (through
        Pytorch). ``randpytorch`` is an adaptation of the randomized_svd
        algorithm, where all the linear algebra computations are done on a GPU
        (through Pytorch).
    ncomp : int
        Number of singular vectors to be obtained. In the cases when the full
        SVD is computed (LAPACK, ARPACK, EIGEN, CUPY), the matrix of singular 
        vectors is truncated. 
    debug : bool
        If True the explained variance ratio is computed and displayed.
    verbose: bool
        If True intermediate information is printed out.
    usv : bool optional
        If True the 3 terms of the SVD factorization are returned.
    random_state : int, RandomState instance or None, optional
        If int, random_state is the seed used by the random number generator.
        If RandomState instance, random_state is the random number generator.
        If None, the random number generator is the RandomState instance used
        by np.random. Used for ``randsvd`` mode.
    to_numpy : bool, optional
        If True (by default) the arrays computed in GPU are transferred from
        VRAM and converted to numpy ndarrays.

    Returns
    -------
    V : array_like
        The right singular vectors of the input matrix. If ``usv`` is True it
        returns the left and right singular vectors and the singular values of
        the input matrix.
    
    References
    ----------
    * For ``lapack`` SVD mode see:
        https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.svd.html
        http://www.netlib.org/lapack/
    * For ``eigen`` mode see:
        https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.eigh.html
    * For ``arpack`` SVD mode see:
        https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.sparse.linalg.svds.html
        http://www.caam.rice.edu/software/ARPACK/
    * For ``randsvd`` SVD mode see:
        https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
        Finding structure with randomness: Stochastic algorithms for constructing
        approximate matrix decompositions
        Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
    * For ``cupy`` SVD mode see:
        https://docs-cupy.chainer.org/en/stable/reference/generated/cupy.linalg.svd.html
    * For ``eigencupy`` mode see:
        https://docs-cupy.chainer.org/en/master/reference/generated/cupy.linalg.eigh.html
    * For ``pytorch`` SVD mode see:
        http://pytorch.org/docs/master/torch.html#torch.svd
    * For ``eigenpytorch`` mode see:
        http://pytorch.org/docs/master/torch.html#torch.eig

    """
    def reconstruction(ncomp, U, S, V, var=1):
        if mode == 'lapack':
            rec_matrix = np.dot(U[:, :ncomp],
                                np.dot(np.diag(S[:ncomp]), V[:ncomp]))
            rec_matrix = rec_matrix.T
            print('  Matrix reconstruction with {} PCs:'.format(ncomp))
            print('  Mean Absolute Error =', MAE(matrix, rec_matrix))
            print('  Mean Squared Error =', MSE(matrix, rec_matrix))

            # see https://github.com/scikit-learn/scikit-learn/blob/c3980bcbabd9d2527548820581725df2904e4a0d/sklearn/decomposition/pca.py
            exp_var = (S**2) / (S.shape[0] - 1)
            full_var = np.sum(exp_var)
            explained_variance_ratio = exp_var / full_var  # % of variance explained by each PC
            ratio_cumsum = np.cumsum(explained_variance_ratio)
        elif mode == 'eigen':
            exp_var = (S**2) / (S.shape[0] - 1)
            full_var = np.sum(exp_var)
            explained_variance_ratio = exp_var / full_var  # % of variance explained by each PC
            ratio_cumsum = np.cumsum(explained_variance_ratio)
        else:
            rec_matrix = np.dot(U, np.dot(np.diag(S), V))
            print('  Matrix reconstruction MAE =', MAE(matrix, rec_matrix))
            exp_var = (S**2) / (S.shape[0] - 1)
            full_var = np.var(matrix, axis=0).sum()
            explained_variance_ratio = exp_var / full_var  # % of variance explained by each PC
            if var == 1:
                pass
            else:
                explained_variance_ratio = explained_variance_ratio[::-1]
            ratio_cumsum = np.cumsum(explained_variance_ratio)
            msg = '  This info makes sense when the matrix is mean centered '
            msg += '(temp-mean scaling)'
            print(msg)

        lw = 2
        alpha = 0.4
        fig = plt.figure(figsize=vip_figsize)
        fig.subplots_adjust(wspace=0.4)
        ax1 = plt.subplot2grid((1, 3), (0, 0), colspan=2)
        ax1.step(range(explained_variance_ratio.shape[0]),
                 explained_variance_ratio,
                 alpha=alpha,
                 where='mid',
                 label='Individual EVR',
                 lw=lw)
        ax1.plot(ratio_cumsum,
                 '.-',
                 alpha=alpha,
                 label='Cumulative EVR',
                 lw=lw)
        ax1.legend(loc='best', frameon=False, fontsize='medium')
        ax1.set_ylabel('Explained variance ratio (EVR)')
        ax1.set_xlabel('Principal components')
        ax1.grid(linestyle='solid', alpha=0.2)
        ax1.set_xlim(-10, explained_variance_ratio.shape[0] + 10)
        ax1.set_ylim(0, 1)

        trunc = 20
        ax2 = plt.subplot2grid((1, 3), (0, 2), colspan=1)
        # plt.setp(ax2.get_yticklabels(), visible=False)
        ax2.step(range(trunc),
                 explained_variance_ratio[:trunc],
                 alpha=alpha,
                 where='mid',
                 lw=lw)
        ax2.plot(ratio_cumsum[:trunc], '.-', alpha=alpha, lw=lw)
        ax2.set_xlabel('Principal components')
        ax2.grid(linestyle='solid', alpha=0.2)
        ax2.set_xlim(-2, trunc + 2)
        ax2.set_ylim(0, 1)

        msg = '  Cumulative explained variance ratio for {} PCs = {:.5f}'
        # plt.savefig('figure.pdf', dpi=300, bbox_inches='tight')
        print(msg.format(ncomp, ratio_cumsum[ncomp - 1]))

    # --------------------------------------------------------------------------

    if matrix.ndim != 2:
        raise TypeError('Input matrix is not a 2d array')

    if usv:
        if mode not in ('lapack', 'arpack', 'randsvd', 'cupy', 'randcupy',
                        'pytorch', 'randpytorch'):
            msg = "Returning USV is supported with modes lapack, arpack, "
            msg += "randsvd, cupy, randcupy, pytorch or randpytorch"
            raise ValueError(msg)

    if ncomp > min(matrix.shape[0], matrix.shape[1]):
        msg = '{} PCs cannot be obtained from a matrix with size [{},{}].'
        msg += ' Increase the size of the patches or request less PCs'
        raise RuntimeError(msg.format(ncomp, matrix.shape[0], matrix.shape[1]))

    if mode == 'eigen':
        # building the covariance as np.dot(matrix.T,matrix) is slower and takes more memory
        C = np.dot(matrix, matrix.T)  # covariance matrix
        e, EV = linalg.eigh(C)  # eigenvalues and eigenvectors
        pc = np.dot(EV.T, matrix)  # PCs using a compact trick when cov is MM'
        V = pc[::-1]  # reverse since last eigenvectors are the ones we want
        S = np.sqrt(e)[::
                       -1]  # reverse since eigenvalues are in increasing order
        if debug:
            reconstruction(ncomp, None, S, None)
        for i in range(V.shape[1]):
            V[:, i] /= S  # scaling by the square root of eigenvalues
        V = V[:ncomp]
        if verbose:
            print('Done PCA with numpy linalg eigh functions')

    elif mode == 'lapack':
        # n_frames is usually smaller than n_pixels. In this setting taking the SVD of M'
        # and keeping the left (transposed) SVs is faster than taking the SVD of M (right SVs)
        U, S, V = linalg.svd(matrix.T, full_matrices=False)
        if debug:
            reconstruction(ncomp, U, S, V)
        V = V[:ncomp]  # we cut projection matrix according to the # of PCs
        U = U[:, :ncomp]
        S = S[:ncomp]
        if verbose:
            print('Done SVD/PCA with numpy SVD (LAPACK)')

    elif mode == 'arpack':
        U, S, V = svds(matrix, k=ncomp)
        if debug:
            reconstruction(ncomp, U, S, V, -1)
        if verbose:
            print('Done SVD/PCA with scipy sparse SVD (ARPACK)')

    elif mode == 'randsvd':
        U, S, V = randomized_svd(matrix,
                                 n_components=ncomp,
                                 n_iter=2,
                                 transpose='auto',
                                 random_state=random_state)
        if debug:
            reconstruction(ncomp, U, S, V)
        if verbose:
            print('Done SVD/PCA with randomized SVD')

    elif mode == 'cupy':
        if no_cupy:
            raise RuntimeError('Cupy is not installed')
        a_gpu = cupy.array(matrix)
        a_gpu = cupy.asarray(a_gpu)  # move the data to the current device
        u_gpu, s_gpu, vh_gpu = cupy.linalg.svd(a_gpu,
                                               full_matrices=True,
                                               compute_uv=True)
        V = vh_gpu[:ncomp]
        if to_numpy:
            V = cupy.asnumpy(V)
        if usv:
            S = s_gpu[:ncomp]
            if to_numpy:
                S = cupy.asnumpy(S)
            U = u_gpu[:, :ncomp]
            if to_numpy:
                U = cupy.asnumpy(U)
        if verbose:
            print('Done SVD/PCA with cupy (GPU)')

    elif mode == 'randcupy':
        if no_cupy:
            raise RuntimeError('Cupy is not installed')
        U, S, V = randomized_svd_gpu(matrix, ncomp, n_iter=2, lib='cupy')
        if to_numpy:
            V = cupy.asnumpy(V)
            S = cupy.asnumpy(S)
            U = cupy.asnumpy(U)
        if debug:
            reconstruction(ncomp, U, S, V)
        if verbose:
            print('Done randomized SVD/PCA with cupy (GPU)')

    elif mode == 'eigencupy':
        if no_cupy:
            raise RuntimeError('Cupy is not installed')
        a_gpu = cupy.array(matrix)
        a_gpu = cupy.asarray(a_gpu)  # move the data to the current device
        C = cupy.dot(a_gpu, a_gpu.T)  # covariance matrix
        e, EV = cupy.linalg.eigh(C)  # eigenvalues and eigenvectors
        pc = cupy.dot(EV.T, a_gpu)  # PCs using a compact trick when cov is MM'
        V = pc[::-1]  # reverse since last eigenvectors are the ones we want
        S = cupy.sqrt(
            e)[::-1]  # reverse since eigenvalues are in increasing order
        if debug:
            reconstruction(ncomp, None, S, None)
        for i in range(V.shape[1]):
            V[:, i] /= S  # scaling by the square root of eigenvalues
        V = V[:ncomp]
        if to_numpy:
            V = cupy.asnumpy(V)
        if verbose:
            print('Done PCA with cupy eigh function (GPU)')

    elif mode == 'pytorch':
        if no_torch:
            raise RuntimeError('Pytorch is not installed')
        a_gpu = torch.Tensor.cuda(torch.from_numpy(matrix.astype('float32').T))
        u_gpu, s_gpu, vh_gpu = torch.svd(a_gpu)
        V = vh_gpu[:ncomp]
        S = s_gpu[:ncomp]
        U = torch.transpose(u_gpu, 0, 1)[:ncomp]
        if to_numpy:
            V = np.array(V)
            S = np.array(S)
            U = np.array(U)
        if verbose:
            print('Done SVD/PCA with pytorch (GPU)')

    elif mode == 'eigenpytorch':
        if no_torch:
            raise RuntimeError('Pytorch is not installed')
        a_gpu = torch.Tensor.cuda(torch.from_numpy(matrix.astype('float32')))
        C = torch.mm(a_gpu, torch.transpose(a_gpu, 0, 1))
        e, EV = torch.eig(C, eigenvectors=True)
        V = torch.mm(torch.transpose(EV, 0, 1), a_gpu)
        S = torch.sqrt(e[:, 0])
        if debug:
            reconstruction(ncomp, None, S, None)
        for i in range(V.shape[1]):
            V[:, i] /= S
        V = V[:ncomp]
        if to_numpy:
            V = np.array(V)
        if verbose:
            print('Done PCA with pytorch eig function')

    elif mode == 'randpytorch':
        if no_torch:
            raise RuntimeError('Pytorch is not installed')
        U, S, V = randomized_svd_gpu(matrix, ncomp, n_iter=2, lib='pytorch')
        if to_numpy:
            V = np.array(V)
            S = np.array(S)
            U = np.array(U)
        if debug:
            reconstruction(ncomp, U, S, V)
        if verbose:
            print('Done randomized SVD/PCA with randomized pytorch (GPU)')

    else:
        raise ValueError('The SVD mode is not available')

    if usv:
        if mode == 'lapack':
            return V.T, S, U.T
        elif mode == 'pytorch':
            if to_numpy:
                return V.T, S, U.T
            else:
                return torch.transpose(V, 0, 1), S, torch.transpose(U, 0, 1)
        else:
            return U, S, V
    else:
        if mode == 'lapack':
            return U.T
        elif mode == 'pytorch':
            return U
        else:
            return V
Пример #32
0
            -0.0000000, 0.0000000, 0.0000000, -0.0000000, 1.0000000,
            -0.0000000, -0.0000000
        ],
        [
            0.0384056, 0.3861388, 0.2684382, 0.2097269, -0.0000000, 1.0000000,
            0.1817599
        ],
        [
            0.0384056, 0.3861388, -0.2684382, 0.2097269, -0.0000000, 0.1817599,
            1.0000000
        ]]

w, v = LA.eig(mat)
print(w)
print_mat(v)
w, v = LA.eigh(mat)
print(w)
print_mat(v)
for i in range(len(w)):
    print(w[i])
    print(v[i])
w, v = LA.eigh(mat)
for i in range(len(w)):
    print(w[i])
    print(v[i])
v2 = np.transpose(v)
vv = np.matmul(v2, v)
print_mat(vv)
w3, v3 = LA.eig(vv)
print(w3)
print_mat(v3)
Пример #33
0
def getPCAVideo(I):
    ICov = I.dot(I.T)
    [lam, V] = linalg.eigh(ICov)
    lam[lam < 0] = 0
    V = V * np.sqrt(lam[None, :])
    return V
Пример #34
0
    def AF_MUSIC2D(self,
                   focusing_freq=-1,
                   signals=1,
                   xrange=(-50, 50),
                   yrange=(-50, 50),
                   xstep=False,
                   ystep=False,
                   colormap="gist_heat",
                   shw=True,
                   block_run=True,
                   no_fig=False,
                   chunks=10):
        """Displays a heatmap for visual inspection of AF-MUSIC-based location estimation.

        Generates a grid of provided dimension/resolution, and evaluates the AF-MUSIC algorithm at
        each point on the grid.

        Arguments:
            focusing_freq (float): The frequency (in Hz) at which to perform the calculation. If <0, will default to 0.9*(spatial Nyquist frequency)
            signals (int): The number of signals to locate.
            xrange (float, float): The lower and upper bound in the x-direction.
            yrange (float, float): The lower and upper bound in the y-direction.
            xstep (float): If given, determines the size of the steps in the x-direction. Otherwise defaults to 1000 steps.
            ystep (float): If given, determines the size of the steps in the y-direction. Otherwise defaults to 1000 steps.
            colormap (str): The colour map for the heatmap. See https://matplotlib.org/examples/color/colormaps_reference.html
            shw (bool): If False, return the axis object rather than display.
            block_run (bool): Pause execution of the file while the figure is open? Set to True for running in the command-line.
            no_fig (bool): If True, return the heatmap grid rather than plot it.

        Returns:
            np.array: Returns EITHER the current (filled) heatmap domain if no_fig == True, OR a handle to the displayed figure.
        """
        raise NotImplementedError

        self.dataFFT = fft_pack.rfft(self.data, axis=0).T
        self.numbins = self.dataFFT.shape[1]
        pos = fft_pack.rfftfreq(self.data.shape[0]) * self.sample_rate

        if focusing_freq < 0:
            focusing_freq = self.spatial_nyquist_freq * 0.45
            # focusing_freq = pos[np.argmax(self.dataFFT[0, :])]
        else:
            focusing_freq = 1000

        idxs = np.array(np.arange(pos.shape[0]))

        refidx = np.argmin(abs(pos - focusing_freq))
        Rcoh = np.zeros((self.dataFFT.shape[0], self.dataFFT.shape[0]),
                        dtype='complex128')
        ul, self.Uf0 = la.eigh(
            dot(self.dataFFT[:, refidx:refidx + 1],
                self.dataFFT[:, refidx:refidx + 1].conj().T) /
            self.dataFFT.shape[0])
        ul = ul.real
        self.Uf0 = self.Uf0[:, argsort(abs(ul))[::-1]]

        pool = Pool(processes=7)
        res = pool.map(self._UfitoRyy_, idxs)
        pool.close()
        for r in res:
            Rcoh += r

        if xstep and ystep:
            xdom = np.linspace(start=xrange[0],
                               stop=xrange[1],
                               num=int((xrange[1] - xrange[0]) // xstep))
            ydom = np.linspace(start=yrange[0],
                               stop=yrange[1],
                               num=int((yrange[1] - yrange[0]) // ystep))
        else:
            xdom = np.linspace(start=xrange[0], stop=xrange[1], num=1000)
            ydom = np.linspace(start=yrange[0], stop=yrange[1], num=1000)
        self._hm_domain_ = np.zeros((len(ydom), len(xdom)))

        xdom, ydom = np.meshgrid(xdom, ydom)
        self._hm_domain_ = self._MUSIC2D_((pos[refidx], refidx),
                                          xdom,
                                          ydom,
                                          numsignals=signals,
                                          SI=Rcoh)

        if no_fig:
            return self._hm_domain_

        f = plt.figure()
        plt.imshow(self._hm_domain_,
                   cmap=colormap,
                   interpolation='none',
                   origin='lower',
                   extent=[xrange[0], xrange[1], yrange[0], yrange[1]])
        plt.colorbar()
        plt.xlabel("Horiz. Dist. from Center of Array [m]")
        plt.ylabel("Vert. Dist. from Center of Array [m]")

        if shw:
            plt.show(block=block_run)
            return
        else:
            return f
Пример #35
0
    # Do global fit on all data
    pg_1step = get_params_global(data)
    print "     Parameters from 1 step global fitting (D0, K0, D1, K1)=\n",\
          pg_1step

    # Likelihood of fits
    p1 = lnlike(pg_1step, data)
    p2 = lnlike(pg_2step, data)
    print "   Likelihood for fits: 2step=", p2, " 1step=", p1

    # Evaluate Hessian by FD
    FDH = FD_Hessian(lnlike, pg_2step, fcnargs=[data])
    print "Hesssian: ", FDH
    Hinv = LA.inv(FDH)
    evals, evecs = LA.eigh(Hinv)
    print "evals:", evals
    print "evecs:", evecs

    samples = np.random.multivariate_normal(pg_2step, Hinv, 50000)
    fig = triangle.corner(samples, labels=["D0", "K0", "D1", "K1"])
    plt.savefig("triangle_Hinv.png")
    plt.close()

    # Now reweight the samples taking into account the likelihood of exp. 3
    F0 = np.zeros(len(samples))
    F1 = np.zeros(len(samples))
    w = np.zeros(len(samples))
    #from scipy.stats import multivariate_normal
    #NOS = len(samples)
    #i = 0
Пример #36
0
def mmr_multic_label(ilabmode,Y,X,kk,lpar):
## It labels the outputs for multiclass classification
## Input:    ilabmode  labeling mode
##                 =0 indicators
##                 =1 class mean
##                 =2 class median
##                 =3 tetrahedron
##                 =31 weighted simplex  
##           Y     output categories 
##                 column vector with components =1,...,kk  
##           X     corresponding input vectors, 
##                 the rows contain the input vectors
##           kk    number of possible categories
##           lpar  optional parameter used by method 31
## Output:   YL  label vectors in its rows to all sample items
##           Y0  all possible labels, it has kk rows and in the rows the
##               possible labels  

## number of items and input dimension  
  (m,nx)=X.shape
  if ilabmode==0:
## the indicator case for multiclass learning
    Y0=eye(kk)
    ## setting the label vectors 
    YL=zeros((m,kk))
    for i in range(m):
      YL[i,Y[i]]=1
  elif ilabmode==1:
## class mean
    Y0=zeros((kk,nx))
    xmm=zeros((kk,nx))
    xnn=zeros(kk)
    for i in range(m):
      iy=Y[i]
      xmm[iy,:]=xmm[iy,:]+X[i,:]
      xnn[iy]+=1
    for k in range(kk):
      if xnn[k]>0:
        Y0[k,:]=xmm[k,:]/xnn[k]
    YL=zeros((m,nx))
    for i in range(m):
      YL[i,:]=Y0[Y[i],:]
  elif ilabmode==2:
## class median
    Y0=zeros((kk,nx))
    for k in range(kk):
      inx=where(Y==k)[0]
      if len(inx)>0:
        xmm=median(X[inx,:],axis=0)
        Y0[k,:]=xmm/sqrt(sum(xmm**2))
    YL=zeros((m,nx))
    for i in range(m):
      YL[i,:]=Y0[Y[i],:]
  elif ilabmode==3:
## tetrahedron, minimum correlation
    Y0=eye(kk)
    Y0=Y0+Y0/(kk-1)-ones((kk,kk))/(kk-1)
    (S,U)=linalg.eigh(Y0)
    SS=dot(U,diag(sqrt(abs(S))))
    ix=argmin(S)
    Y0=zeros((kk,kk-1))
    j=0
    for k in range(kk):
      if k!=ix:
        Y0[:,j]=SS[:,k]
        j+=1
    YL=zeros((m,kk-1))
    for i in range(m):
      YL[i,:]=Y0[Y[i],:]
  elif ilabmode==31:
    if kk>1:
      lpar=float(1)/(kk-1)
    else:
      lpar=1.0
    Y0=(1+lpar)*eye(kk)-lpar*ones((kk,kk))
    YL=zeros((m,kk))
    for i in range(m):
      YL[i,:]=Y0[Y[i,0],:]
  else:
    pass
  
  return(YL,Y0)

        
        
Пример #37
0
def debug(coord1, coord2):
    natom = len(coord1)
    com1 = numpy.mean(coord1, 0)
    coord1 -= com1
    com2 = numpy.mean(coord2, 0)
    coord2 -= com2

    cormat = numpy.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
    for i in range(natom):
        cormat += numpy.outer(coord1[i], coord2[i])

    #print cormat
    Fmat = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0]]
    Fmat[0][0] = cormat[0][0] + cormat[1][1] + cormat[2][2]
    Fmat[0][1] = cormat[1][2] - cormat[2][1]
    Fmat[0][2] = cormat[2][0] - cormat[0][2]
    Fmat[0][3] = cormat[0][1] - cormat[1][0]
    Fmat[1][1] = cormat[0][0] - cormat[1][1] - cormat[2][2]
    Fmat[1][2] = cormat[0][1] + cormat[1][0]
    Fmat[1][3] = cormat[0][2] + cormat[2][0]
    Fmat[2][2] = -cormat[0][0] + cormat[1][1] - cormat[2][2]
    Fmat[2][3] = cormat[1][2] + cormat[2][1]
    Fmat[3][3] = -cormat[0][0] - cormat[1][1] + cormat[2][2]

    for i in range(4):
        for j in range(i):
            Fmat[i][j] = Fmat[j][i]

    Fmat = numpy.array(Fmat)
    #print Fmat
    #print linalg.eigh(Fmat)

    (eigval, eigvec) = linalg.eigh(Fmat)
    #print eigvec[3]

    diag_error = numpy.dot(Fmat, eigvec[:, 3]) - eigval[3] * eigvec[:, 3]
    print diag_error

    quat = eigvec[:, 3]
    q0 = quat[0]
    q1 = quat[1]
    q2 = quat[2]
    q3 = quat[3]

    rot_mat = numpy.zeros((3, 3))
    # Well, this format is used for atom coord as column, we need a transpose
    rot_mat[0][0] = q0**2 + q1**2 - q2**2 - q3**2
    rot_mat[0][1] = 2.0 * (q1 * q2 - q0 * q3)
    rot_mat[0][2] = 2.0 * (q1 * q3 + q0 * q2)
    rot_mat[1][0] = 2.0 * (q1 * q2 + q0 * q3)
    rot_mat[1][1] = q0**2 - q1**2 + q2**2 - q3**2
    rot_mat[1][2] = 2.0 * (q2 * q3 - q0 * q1)
    rot_mat[2][0] = 2.0 * (q1 * q3 - q0 * q2)
    rot_mat[2][1] = 2.0 * (q2 * q3 + q0 * q1)
    rot_mat[2][2] = q0**2 - q1**2 - q2**2 + q3**2
    rot_mat = rot_mat.transpose()

    coord_rot = numpy.dot(coord1, rot_mat)
    print coord1
    print coord2
    print coord_rot
    print linalg.norm(coord1 - coord2)
    print linalg.norm(coord_rot - coord2)
Пример #38
0
        G = snp_data.val

        K = G.dot(G.T)
        K /= K.diagonal().mean()
    else:
        K = snp_intersect.kernel(standardizer=standardizer,
                                 blocksize=blocksize)
        K /= K.diagonal().mean()
    t1 = time.time()
    print("done building kernel after %.4fs" % (t1 - t0))

    if 0:
        print("computing Eigenvalue decomposition of K")
        t0 = time.time()
        S, U = la.eigh(K)
        t1 = time.time()
        print("done computing eigenvalue decomposition of kernel after %.4fs" %
              (t1 - t0))

    if 1:
        print("running GWAS")
        t0 = time.time()
        if 1:  #LMM with pre-built kernel K
            mygwas = GWAS(K=K,
                          snps_K=None,
                          snps_test=snp_intersect,
                          phenotype=pheno,
                          covariates=covariates,
                          h2=None,
                          interact_with_snp=None,
Пример #39
0
def wigner_distribution(x0,
                        hess,
                        masses,
                        zero_threshold=1.0e-9,
                        is_molecule=True):
    """
    compute wigner distribution for quadratic potential. 

    The hessian can have zero vibrational frequencies (translation and rotation) which
    whould result in a very broad wave packet along those zero modes. Therefore only
    the non-zero modes are transformed to the Wigner representation while the zero modes
    are constrained by delta functions:

      W(Q1,...,QN;P1,...,PN) ~ Prod(vib.modes i) exp(-Omega_i*Qi^2 - 1/Omega_i * Pi^2)
                              *Prod(zero modes j) delta(Qj)*delta(Pj)


    Returns:
    ========
    Aw, Bw
    """
    # convert Hessian to mass-weighted coordinates
    hess_mwc = hess / np.sqrt(np.outer(masses, masses))
    # mass weighted coordinates are now qi = sqrt(mi) dxi
    # compute eigen values of hess_mwc
    omega2, modes = la.eigh(hess_mwc)

    # modes that are zero within numerical accuracy
    zero_modes = np.where((omega2 < zero_threshold))[0]
    vib_modes = np.where((omega2 >= zero_threshold))[0]
    Nzero = len(zero_modes)

    if is_molecule == True:
        Nzero_expected = expected_zero_modes(x0, masses)
        if Nzero != Nzero_expected:
            print "WARNING: Expected %d modes with 0 frequency (translation + rotation) but got %d modes!" % (
                Nzero_expected, Nzero)

    Ndim = len(masses)
    Aq = np.zeros(hess.shape)  #, dtype=complex)
    Ap = np.zeros(hess.shape)  #, dtype=complex)

    # vibrational modes
    Msq = np.sqrt(np.outer(masses, masses))
    MsqInv = 1.0 / Msq
    Oi = np.sqrt(omega2[vib_modes])
    Li = modes[:, vib_modes]
    print "Wigner distribution for vibrational modes"
    Oii = np.diag(Oi)
    OiiInv = np.diag(1.0 / Oi)
    Aq = 2 * np.dot(Li, np.dot(Oii, Li.transpose())) * Msq
    Ap = 2 * np.dot(Li, np.dot(OiiInv, Li.transpose())) * MsqInv
    # constrain zero modes by delta-functions
    #   delta(Qi)*delta(Pi) ~ lim_(Oconstr->infty) exp(-Oconstr*(Qi^2 + Pi^2))
    print "delta distribution for zero modes"
    Oconstr = 1.0e6 * np.eye(len(zero_modes))  # very large number, e.g. 1.0e10
    Li = modes[:, zero_modes]
    Aq += np.dot(Li, np.dot(Oconstr, Li.transpose())) * Msq
    Ap += np.dot(Li, np.dot(
        Oconstr, Li.transpose())) * MsqInv * Msq.max() / MsqInv.max()

    Bq = np.dot(x0, Aq)
    Bp = np.zeros(x0.shape)

    Zo = np.zeros((Ndim, Ndim))
    Aw = np.bmat([[Aq, Zo], [Zo, Ap]])
    Aw = np.asarray(Aw)
    Bw = np.hstack([Bq, Bp]).transpose()

    return Aw, Bw
def sce(mu, N, k0, mg, k_step, wd, selection, ww):
    if selection == 1:

        k1 = np.arange(-np.pi, np.pi, k_step)
        k2 = 0.
        k3 = 1. * 0.

        mu_matrix = np.empty(shape=(0, 0))
        for ii in range(N):
            mu_matrix = kronsum(mu_matrix, pm(0) * mu[ii])
        kk = np.empty(shape=(1, 0))

        for i, val in enumerate(k1):
            h1 = -(np.cos(k3) - 2. + np.cos(k1[i]) - np.cos(k0) -
                   mg) * pm(1) - np.sin(k3) * pm(3)
            h3 = -pm(2) / (2. * 1j) - pm(1) / 2.
            h2 = pm(2) / (2. * 1j) - pm(1) / 2.
            H = tridiag(h1, h3, h2, N)

            H = H - mu_matrix
            hd = np.zeros((2 * N, 2 * N)) * 1j
            hd[0:2, 2 * N - 2:2 * N] = h2 * np.exp(+1j * k2)
            hd[2 * N - 2:2 * N, 0:2] = h3 * np.exp(-1j * k2)
            H = H + hd
            # print(H)
            # print('la')
            # print(LA.eig(pm(0)))

            w, U = LA.eigh(
                H
            )  # this calculates the eigen values aka energies of the system for given k_1
            U = U[(w > -wd) & (w < wd), :]
            # print("la")
            # print(w)

            w = w[(w > -wd) & (w < wd)]
            if np.size(w) > 0:
                kk = np.append(kk, [[k1[i]]], 1)
                plt.plot(np.ones((np.size(w))) * k1[i],
                         w,
                         'ko',
                         markersize=0.8)
                plt.plot(k1[i], ww, 'ro', markersize=0.8)
                plt.plot(k1[i], -ww, 'ro', markersize=0.8)

    elif selection == 2:
        k3 = 0.
        k2 = 0.

        # mu=-3.*np.sin(np.pi*np.arange(0,N,1)/(N-1))
        # mu=mu+np.max(-mu)
        # mu=np.ones((N,))
        k1 = np.arange(-np.pi, np.pi, k_step)
        k2 = np.pi * 0.
        k3 = 0.
        deltam = np.empty(shape=(0, 0))
        for ii in range(N):
            deltam = kronsum(deltam, pm(2) * 1j * 0.06)
        delta_up = deltam
        delta_down = delta_up.getH()
        delta_up = np.hstack(
            (np.zeros((2 * N, 2 * N)), delta_up)
        )  # this following three lines creates a block diagonal matrix one block is H and the other block is -H^t
        delta_down = np.hstack((delta_down, np.zeros((2 * N, 2 * N))))
        delta_up = deltam
        delta_down = delta_up.getH()
        delta_up = np.hstack(
            (np.zeros((2 * N, 2 * N)), delta_up)
        )  # this following three lines creates a block diagonal matrix one block is H and the other block is -H^t
        delta_down = np.hstack((delta_down, np.zeros((2 * N, 2 * N))))
        deltam = np.vstack((delta_up, delta_down))

        mu_matrix = np.empty(shape=(0, 0))
        for ii in range(N):
            mu_matrix = kronsum(mu_matrix, pm(0) * mu[ii])
        for i, val in enumerate(k1):
            h1 = -(np.cos(k3) - 2. + np.cos(k1[i]) - np.cos(k0) -
                   mg) * pm(1) - pm(3) * np.sin(k3) * pm(3)
            h3 = -pm(2) / (2. * 1j) - pm(1) / 2.
            h2 = pm(2) / (2. * 1j) - pm(1) / 2.
            H = tridiag(h1, h3, h2, N)

            H = H - mu_matrix
            hd = np.zeros((2 * N, 2 * N)) * 1j
            hd[0:2, int(2 * N - 2):2 * N] = h2 * np.exp(+1j * k2)
            hd[int(2 * N - 2):2 * N, 0:2] = h3 * np.exp(-1j * k2)
            H = H + hd
            h1m = -(np.cos(-k3) - 2. + np.cos(-k1[i]) - np.cos(k0) -
                    mg) * pm(1) - pm(3) * np.sin(-k3) * pm(3)
            h3m = -pm(2) / (2. * 1j) - pm(1) / 2.
            h2m = pm(2) / (2. * 1j) - pm(1) / 2.
            Hm = tridiag(h1m, h3m, h2m, N)
            hdm = np.zeros((2 * N, 2 * N)) * 1j
            hdm[0:2, int(2 * N - 2):2 * N] = h2 * np.exp(-1j * k2)
            hdm[int(2 * N - 2):2 * N, 0:2] = h3 * np.exp(+1j * k2)
            Hm = Hm - mu_matrix + hdm
            h_up = np.hstack(
                (H, np.zeros((2 * N, 2 * N)))
            )  # this following three lines creates a block diagonal matrix one block is H and the other block is -H^t
            h_down = np.hstack((np.zeros((2 * N, 2 * N)), -np.transpose(Hm)))
            h_bog = np.vstack((h_up, h_down))

            h_bog = deltam + h_bog
            w, U = LA.eigh(h_bog)
            wws = np.sqrt(ww**2 + 0.06**2)

            plt.plot(np.ones((np.size(w))) * k1[i] / np.pi,
                     w,
                     'ko',
                     markersize=0.3)
            plt.plot(k1[i], wws, 'ro', markersize=0.8)
            plt.plot(k1[i], -wws, 'ro', markersize=0.8)
            kk = 5

    return kk
Пример #41
0
def network_deconvolution(mat, **kwargs):
    """Python implementation/translation of network deconvolution by MIT-KELLIS LAB.

    .. note::
       code author:gidonro [Github username](https://github.com/gidonro/Network-Deconvolution)

       LICENSE: MIT-KELLIS LAB

       AUTHORS:
       Algorithm was programmed by Soheil Feizi.
       Paper authors are S. Feizi, D. Marbach,  M. M?©dard and M. Kellis
       Python implementation: Gideon Rosenthal

       For more details, see the following paper:
       Network Deconvolution as a General Method to Distinguish
       Direct Dependencies over Networks

       By: Soheil Feizi, Daniel Marbach,  Muriel Médard and Manolis Kellis
       Nature Biotechnology

    Args:
         mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes
             it is a relevance matrix where mat(i,j) represents the similarity content
             between nodes i and j. Elements of matrix should be
             non-negative.
         beta (float): Scaling parameter, the program maps the largest absolute eigenvalue
             of the direct dependency matrix to beta. It should be
             between 0 and 1.
         alpha (float): fraction of edges of the observed dependency matrix to be kept in
             deconvolution process.
         control (int): if 0, displaying direct weights for observed
             interactions, if 1, displaying direct weights for both observed and
             non-observed interactions.

    Returns:
        numpy.ndarray: Output deconvolved matrix (direct dependency matrix). Its components
        represent direct edge weights of observed interactions.
        Choosing top direct interactions (a cut-off) depends on the application and
        is not implemented in this code.

     .. note::
        To apply ND on regulatory networks, follow steps explained in Supplementary notes
        1.4.1 and 2.1 and 2.3 of the paper.
        In this implementation, input matrices are made symmetric.
    """
    alpha = kwargs.get('alpha', 1)
    beta = kwargs.get('beta', 0.99)
    control = kwargs.get('control', 0)

    # ToDO : ASSERTS
    try:
        assert beta < 1 or beta > 0
        assert alpha <= 1 or alpha > 0

    except AssertionError:
        raise ValueError("alpha must be in ]0, 1] and beta in [0, 1]")

    #  Processing the input matrix, diagonal values are filtered
    np.fill_diagonal(mat, 0)

    # Thresholding the input matrix
    y = stat.mquantiles(mat[:], prob=[1 - alpha])
    th = mat >= y
    mat_th = mat * th

    # Making the matrix symetric if already not
    mat_th = (mat_th + mat_th.T) / 2

    # Eigen decomposition
    Dv, U = LA.eigh(mat_th)
    D = np.diag((Dv))
    lam_n = np.abs(np.min(np.min(np.diag(D)), 0))
    lam_p = np.abs(np.max(np.max(np.diag(D)), 0))

    m1 = lam_p * (1 - beta) / beta
    m2 = lam_n * (1 + beta) / beta
    m = max(m1, m2)

    # network deconvolution
    for i in range(D.shape[0]):
        D[i, i] = (D[i, i]) / (m + D[i, i])

    mat_new1 = np.dot(U, np.dot(D, LA.inv(U)))

    # Displying direct weights

    if control == 0:
        ind_edges = (mat_th > 0) * 1.0
        ind_nonedges = (mat_th == 0) * 1.0
        m1 = np.max(np.max(mat * ind_nonedges))
        m2 = np.min(np.min(mat_new1))
        mat_new2 = (mat_new1 + np.max(m1 - m2, 0)) * ind_edges + (mat * ind_nonedges)
    else:
        m2 = np.min(np.min(mat_new1))
        mat_new2 = (mat_new1 + np.max(-m2, 0))

    # linearly mapping the deconvolved matrix to be between 0 and 1
    m1 = np.min(np.min(mat_new2))
    m2 = np.max(np.max(mat_new2))
    mat_nd = (mat_new2 - m1) / (m2 - m1)

    return mat_nd
Пример #42
0
def Heigenvectors(A):
    w, v = linalg.eigh(A)
    return w, transpose(v)
Пример #43
0
def feature_extraction(filename):
    #point_cloud = np.loadtxt(filename)
    orig_data = np.fromfile(filename, dtype=np.float32)
    point_cloud = orig_data.reshape((-1, 4))
    new_ind = np.random.choice(point_cloud.shape[0], DOWN_SAMPLING)
    new_ind.sort()
    point_cloud = point_cloud[new_ind]
    features = np.zeros((NUM_FEATURES))
    mod_cck = point_cloud.shape[0]

    # Histogram features
    features[0] = np.max(point_cloud[:, 3])
    features[1] = np.mean(point_cloud[:, 3])
    features[2] = np.var(point_cloud[:, 3])
    l = np.max(point_cloud[:, 0]) - np.min(point_cloud[:, 0])
    w = np.max(point_cloud[:, 1]) - np.min(point_cloud[:, 1])
    h = np.max(point_cloud[:, 2]) - np.min(point_cloud[:, 2])
    features[3] = l * w * h

    lalonde_features = np.zeros((DOWN_SAMPLING, 3))
    # Lalonde features are computed here
    point_cloud = point_cloud[:, :3]
    # For each point in a component obtain the 20 nearest neighbors within radius 0.5m
    # perform eigen decomposition and normalize eigenvalues
    # Calculate L1 = d1, L2 = d1-d2, L3 = d2-d3, were d1>d2>d3 are eigen values
    tree = sklearn.neighbors.KDTree(point_cloud)
    for i in range(mod_cck):
        #print("Procssing point number {}".format(i))
        dist, ind = tree.query([point_cloud[i]], k=21)
        dist = dist[0, 1:]
        ind = ind[0, 1:]
        ind = ind[(dist <= 0.5)[0]]

        if len(ind) > 0:

            ind = ind[0]
            cck = (point_cloud[ind, :]).transpose()
            cck[0, :] = cck[0, :] - np.mean(cck[0, :])
            cck[1, :] = cck[1, :] - np.mean(cck[1, :])
            cck[2, :] = cck[2, :] - np.mean(cck[2, :])
            cck_trans = (cck).transpose()
            cov_mat = np.matmul(cck, cck_trans) / len(ind)
            e_vals, e_vecs = LA.eigh(cov_mat)
            d_mat = np.array([e_vals[2], e_vals[1], e_vals[0]])
            dnorm = d_mat / np.sum(d_mat)
            l1 = dnorm[0]
            l2 = dnorm[0] - dnorm[1]
            l3 = dnorm[1] - dnorm[2]
            lalonde_features[i] = [l1, l2, l3]
        else:
            lalonde_features[i] = [0, 0, 0]

    # Calculate the normalized histogram with 4 bins for each Lalonde feature
    features[4] = sum(lalonde_features[:, 0] <= 0.25) / mod_cck
    features[5] = sum((lalonde_features[:, 0] > 0.25)
                      & (lalonde_features[:, 0] <= 0.5)) / mod_cck
    features[6] = sum((lalonde_features[:, 0] > 0.5)
                      & (lalonde_features[:, 0] <= 0.75)) / mod_cck
    features[7] = sum((lalonde_features[:, 0] > 0.75)
                      & (lalonde_features[:, 0] <= 1)) / mod_cck

    features[8] = sum(lalonde_features[:, 1] <= 0.25) / mod_cck
    features[9] = sum((lalonde_features[:, 1] > 0.25)
                      & (lalonde_features[:, 1] <= 0.5)) / mod_cck
    features[10] = sum((lalonde_features[:, 1] > 0.5)
                       & (lalonde_features[:, 1] <= 0.75)) / mod_cck
    features[11] = sum((lalonde_features[:, 1] > 0.75)
                       & (lalonde_features[:, 1] <= 1)) / mod_cck

    features[12] = sum(lalonde_features[:, 2] <= 0.25) / mod_cck
    features[13] = sum((lalonde_features[:, 2] > 0.25)
                       & (lalonde_features[:, 2] <= 0.5)) / mod_cck
    features[14] = sum((lalonde_features[:, 2] > 0.5)
                       & (lalonde_features[:, 2] <= 0.75)) / mod_cck
    features[15] = sum((lalonde_features[:, 2] > 0.75)
                       & (lalonde_features[:, 2] <= 1)) / mod_cck

    # Anguelov features are computed here
    dist = sklearn.metrics.pairwise.pairwise_distances(point_cloud[:, :2])
    print(np.shape(dist))
    print(np.shape(point_cloud[:, :2]))
    # For each point, obtain a right cylindrical region on top of it with radius 0.1 and height 2 m
    # treat the point is in the lower axes of the cylinder.
    # Split the right circular region into 3 parts and calculate the sum of points in each part which
    # are the three Anguelov features A1, A2 and A3
    anguelov_features = np.zeros((DOWN_SAMPLING, 3))
    for pt in range(mod_cck):
        indexes_circ = np.where(dist[pt, :] < 0.1)
        z_values = point_cloud[indexes_circ[0], 2]
        height_diff = point_cloud[pt, 2] - z_values
        a1 = sum((height_diff >= 0)
                 & (height_diff <= 0.6667)) / len(height_diff)
        a2 = sum((height_diff > 0.6667)
                 & (height_diff <= 1.3334)) / len(height_diff)
        a3 = sum((height_diff > 1.3334)
                 & (height_diff <= 2)) / len(height_diff)
        anguelov_features[pt] = [a1, a2, a3]

    # Calculate the normalized histogram with 4 bins for each Anguelov feature
    features[16] = sum(anguelov_features[:, 0] <= 0.25) / mod_cck
    features[17] = sum((anguelov_features[:, 0] > 0.25)
                       & (anguelov_features[:, 0] <= 0.5)) / mod_cck
    features[18] = sum((anguelov_features[:, 0] > 0.5)
                       & (anguelov_features[:, 0] <= 0.75)) / mod_cck
    features[19] = sum((anguelov_features[:, 0] > 0.75)
                       & (anguelov_features[:, 0] <= 1)) / mod_cck

    features[20] = sum(anguelov_features[:, 1] <= 0.25) / mod_cck
    features[21] = sum((anguelov_features[:, 1] > 0.25)
                       & (anguelov_features[:, 1] <= 0.5)) / mod_cck
    features[22] = sum((anguelov_features[:, 1] > 0.5)
                       & (anguelov_features[:, 1] <= 0.75)) / mod_cck
    features[23] = sum((anguelov_features[:, 1] > 0.75)
                       & (anguelov_features[:, 1] <= 1)) / mod_cck

    features[24] = sum(anguelov_features[:, 2] <= 0.25) / mod_cck
    features[25] = sum((anguelov_features[:, 2] > 0.25)
                       & (anguelov_features[:, 2] <= 0.5)) / mod_cck
    features[26] = sum((anguelov_features[:, 2] > 0.5)
                       & (anguelov_features[:, 2] <= 0.75)) / mod_cck
    features[27] = sum((anguelov_features[:, 2] > 0.75)
                       & (anguelov_features[:, 2] <= 1)) / mod_cck

    return features
Пример #44
0
def mmr_normalization(ilocal,iscale,XTrain,XTest,ipar):
## function to normalize the input and the output data
## !!!! the localization happens before normalization if both given !!! 
## input
##      ilocal centralization   
##                  =-1 no localization
##                  =0 mean
##                  =1 median
##                  =2 geometric median
##                  =3 shift by ipar
##                  =4 smallest enclosing ball
##                  =5 row mean row wise 
##      icenter
##                  =-1 no scaling
##                  =0 scale item wise by L2 norm
##                  =1 scale item wise by L1 norm
##                  =2 scale item wise by L_infty norm
##                  =3 scale items by stereographic projection relative to zero
##                  =4 scale variables by STD(standard deviation)
##                  =5 scale variables by MAD(median absolute deviation)
##                  =6 scale variables by absolute deviation
##                  =7 scale all variables by average STD 
##                  =8 scale all variables by maximum STD 
##                  =9 scale all variables by median MAD 
##                  =10 scale item wise by Minkowski norm, power given by ipar
##                  =11 \sum_i||u-x_i||/m where u=0
##                  =12 scale all variables by overall max
##                  =13 Mahalonobis scaling  
##      XTrain       Data matrix which will be normalized. It assumed the
##                   rows are the sample vetors and the columns are variables 
##      XTest        Data matrix which will be normalized. It assumed the
##                   rows are the sample vetors and the columns are
##                   variables.
##                   It herites the center and the scale in the variable wise ca##                   se from the XTrain,
##                   otherwise it is normalized independently  
##      ipar         additional parameter   
##  output
##      XTrain       Data matrix which is the result of the normalization
##                   of input XTrain. It assumed the rows are the sample
##                   vetors and the columns are variables  
##      XTest        Data matrix which is the result of the normalization
##                   of input XTest. It assumed the rows are the sample
##                   vetors and the columns are variables.
##      opar         the radius in case of ixnorm=2.  
##  
  if XTest is None:
    XTest=array([])
    
  opar=0;
  (mtrain,n)=XTrain.shape
  if len(XTest.shape)>=2:
    mtest=XTest.shape[0]
  elif len(XTest.shape)==1:
    mtest=XTest.shape[0]
    XTest=XTest.reshape((1,mtest))
  else:
    mtest=0
    XTest=array([])

  if ilocal==-1:
    pass
  elif ilocal==0:   ##  mean
    xcenter=mean(XTrain,axis=0)
  elif ilocal==1:   ##  median
    xcenter=median(XTrain,axis=0)
  elif ilocal==2:    ##  geometric median
    xcenter=mmr_geometricmedian(XTrain)[0]
  elif ilocal==3:    ##  shift by ipar
    xcenter=ipar
  elif ilocal==4:   ##  smallest comprising ball
    xalpha=mmr_outerball(0,XTrain)
    xcenter=dot(XTrain.T,xalpha)
  elif ilocal==5:   ## row mean row wise
    xcenter=mean(XTrain,axis=1)

  if ilocal in (0,1,2,3,4):
    XTrain=XTrain-tile(xcenter,(mtrain,1))
    if mtest>0:
      XTest=XTest-tile(xcenter,(mtest,1))
  elif ilocal==5:
    XTrain=XTrain-outer(xcenter,ones(n))
    if mtest>0:
      xcenter=mean(XTest,axis=1)
      XTest=XTest-outer(xcenter,ones(n))

## itemwise normalizations
  if iscale==-1:
    pass
  elif iscale==0:     ## scale items by L2 norm
    xscale_tra=sqrt(np_sum(XTrain**2,axis=1))
    if mtest>0:
      xscale_tes=sqrt(np_sum(XTest**2,axis=1))
  elif iscale==1:     ## scale items by L1 norm
    xscale_tra=np_sum(abs(XTrain),axis=1)
    if mtest>0:
      xscale_tes=np_sum(abs(XTest),axis=1)
  elif iscale==2:     ## scale items by L_infty norm
    xscale_tra=np_max(abs(XTrain),axis=1)
    if mtest>0:
      xscale_tes=np_max(abs(XTest),axis=1)
  elif iscale==10:     ## scale items by Minowski with ipar
    xscale_tra=np_sum(abs(XTrain)**ipar,axis=1)**(1/ipar)
    if mtest>0:
      xscale_tes=np_sum(abs(XTest)**ipar,axis=1)**(1/ipar)

  if iscale in (0,1,2,10):    
    xscale_tra=xscale_tra+(xscale_tra==0)
    XTrain=XTrain/tile(xscale_tra.reshape(mtrain,1),(1,n))
    if mtest>0:
      xscale_tes=xscale_tes+(xscale_tes==0)
      XTest=XTest/tile(xscale_tes.reshape(mtest,1),(1,n))
          
  if iscale==3:   ## scale items by stereographic projection relative to zero
    xnorm2=np_sum(XTrain**2,axis=1)
    R=ipar
    xhom=ones(mtrain)/(xnorm2+R**2)
    xhom2=xnorm2-R**2
    XTrain=concatenate((2*R**2*XTrain*outer(xhom,ones(n)),R*xhom2*xhom), \
                       axis=1)
    if mtest>0:
      xnorm2=np_sum(XTest**2,axis=1)
      xhom=ones(mtest)/(xnorm2+R**2)
      xhom2=xnorm2-R**2
      XTest=concatenate((2*R**2*XTest*outer(xhom,ones(n)),R*xhom2*xhom), \
                        axis=1)

## variable wise normalization relative to zero
## test has to use of the training scale 

  if iscale==-1:
    pass
  elif iscale==4:     ## scale vars by std to zeros center
    xscale=std(XTrain,axis=0)
##    xscale=sqrt(mean(XTrain**2,axis=0)) 
  elif iscale==5:     ## scale vars by mad
    xscale=median(abs(XTrain),axis=0)
  elif iscale==6:     ## scale vars by absolut deviation
    xscale=mean(abs(XTrain),axis=0)

  if iscale in (4,5,6):
    xscale=xscale+(xscale==0)
    XTrain=XTrain/tile(xscale,(mtrain,1))
    if mtest>0:
      XTest=XTest/tile(xscale,(mtest,1))

  if iscale==-1:
    pass
  if iscale==7:     ## scale vars by average std to zero center
##    xscale=mean(std(XTrain,axis=0))
    xscale=mean(sqrt(mean(XTrain**2,axis=0)))
  elif iscale==8:     ## scale vars by max std to zero center
##    xscale=np_max(std(XTrain,axis=0))
    xscale=np_max(sqrt(mean(XTrain**2,axis=0)))
  elif iscale==9:     ## scale vars by median mad
    xscale=median(median(abs(XTrain),axis=0))
  elif iscale==11:    ## \sum_i||u-x_i||/m where u=0
    xscale=mean(sqrt(np_sum(XTrain**2,axis=1)))
  elif iscale==12:    ## \sum_i||u-x_i||/m where u=0
    xscale=XTrain.max()

##  print(xscale)
  if iscale in (7,8,9,11,12):
    xscale=xscale+(xscale==0)
    XTrain=XTrain/xscale
    if mtest>0:
      XTest=XTest/xscale

  if iscale==13:     ## scale by Mahalonobis
    xsigma=dot(XTrain.T,XTrain) ## covariance
    [w,v]=linalg.eigh(xsigma)
    iw=where(w<=10**(-10))[0]
    w[iw]=0.0
    iw=where(w>0.0)[0]
    w_sqinv=zeros(XTrain.shape[1])
    w_sqinv[iw]=1/sqrt(w[iw])
    XTrain=dot(XTrain,v)*outer(ones(mtrain),w_sqinv)
    if mtest>0:
      XTest=dot(XTest,v)*outer(ones(mtest),w_sqinv)
    
  return(XTrain,XTest,opar)
Пример #45
0
    def _solve_gs_preserve(self, A, f, mu, subsample_mapping, skip_gs=False):
        """
        Code notes from Daniil Kitchaev ([email protected]) - 2018-09-10

        This is a WORK IN PROGRESS based on Wenxuan's ground-state preservation fitting code.
        A, f, and mu as as in the other routines
        subsample mapping deals with the fact that weights change when fitting on a partial set (when figuring out mu)
        skin_gs gives the option of ignoring the constrained fitting part, which is helpful when figuring out mu

        In general, this code is really not production ready - the algorithm that serious numerical issues, and getting
        around them involved lots of fiddling with eigenvalue roundoffs, etc, as is commented below.

        There are also issues with the fact that constraints can be very difficult to satisfy, causing the solver to
        diverge (or just quit silently giving absurd results) - ths solution here appears to be to use MOSEK instead
        of cvxopt, and to iteratively remove constraints when they cause problems. Usually after cleaning up the data,
        everything can be fit though without removing constraints.

        At the end of the day, this algorithm seems to only be useful for niche applications because enforcing ground
        state preservation causes a giant bias in the fit and makes the error in E-above-hull highly correlated with the
        value of E-above-hull. The result is entropies are completely wrong, which is what you usually want out of a
        cluster expansion.

        So, use the code at your own risk. AFAIK, it works as described in Wenxuans paper, with various additions from
        me for numerical stability. It has not been extensively tested though or used in real projects due to the bias
        issue I described above. I think that unless the bias problem is resolved, this fitting scheme will not be
        of much practical use.
        """
        if not subsample_mapping:
            assert A.shape[0] == self.feature_matrix.shape[0]
            subsample_mapping = {}
            for i in range(self.feature_matrix.shape[0]):
                subsample_mapping[i] = i

        from cvxopt import matrix
        from cvxopt import solvers
        from pymatgen.core.periodic_table import get_el_sp
        try:
            import mosek
        except:
            raise ValueError("GS preservation fitting is finicky and MOSEK solvers are typically required for numerical stability.")
        solvers.options['show_progress'] = False
        solvers.options['MOSEK'] = {mosek.dparam.check_convexity_rel_tol: 1e-6}

        ehull = list(self.e_above_hull_input)
        structure_index_at_hull = [i for (i,e) in enumerate(ehull) if e < 1e-5]

        reduce_composition_at_hull = [
            self.structures[i].composition.element_composition.reduced_composition.element_composition for
            i in structure_index_at_hull]

        all_corr_in = np.array(self.feature_matrix)
        all_engr_in = np.array(self.normalized_energies)

        # Some structures can be degenerate in correlation space, even if they are distinct in reality. We can't
        # constrain their energies since as far as the CE is concerned, same correlation = same structure
        duplicated_correlation_set = []
        for i in range(len(all_corr_in)):
            if i not in structure_index_at_hull:
                for j in structure_index_at_hull:
                    if np.max(np.abs(all_corr_in[i] - all_corr_in[j])) < 1e-6:
                        logging.info("Structure {} ({} - {}) has the same correlation as hull structure {} ({} {})".format(i,
                                                                    self.structures[i].composition.element_composition.reduced_formula,
                                                                    self.spacegroups[i],
                                                                    j,
                                                                    self.structures[j].composition.element_composition.reduced_formula,
                                                                    self.spacegroups[j]))
                        duplicated_correlation_set.append(i)

        all_engr_in.shape = (len(all_engr_in), 1)
        f.shape = (f.shape[0], 1)

        # Adjust weights if subsample changed whats included and whats not
        weights_tmp = []
        for i in range(A.shape[0]):
            weights_tmp.append(self.weights[subsample_mapping[i]])

        subsample_mapping_inv = {}
        for i, j in subsample_mapping.items():
            subsample_mapping_inv[j] = i
        for i in duplicated_correlation_set:
            if i in subsample_mapping_inv.keys():
                weights_tmp[subsample_mapping_inv[i]] = 0


        weight_vec = np.array(weights_tmp)

        weight_matrix = np.diag(weight_vec.transpose())

        N_corr = A.shape[1]

        # Deal with roundoff error making P not positive semidefinite by using the SVD of A
        # At = USV*
        # At A = U S St Ut -> any negatives in S get squared
        # Unfortunately, this is usually not enough, so the next step is to explicitly add something small (1e-10)
        # to all eigenvalues so that eigenvalues close to zero are instead very slightly positive.
        # Otherwise, random numerical error makes the matrix not positive semidefinite, and the convex optimization
        # gets confused
        Aw = weight_matrix.dot(A)
        u, s, v = la.svd(Aw.transpose())
        Ss = np.pad(np.diag(s), ((0, u.shape[0] - len(s)),(0,0)), mode='constant', constant_values=0)
        P_corr_part = 2 * u.dot((Ss.dot(Ss.transpose()))).dot(u.transpose())
        P = np.lib.pad(P_corr_part, ((0, N_corr), (0, N_corr)), mode='constant', constant_values=0)
        P = 0.5 * (P + P.transpose())
        ev, Q = la.eigh(P)
        Qi = la.inv(Q)
        P = Q.dot(np.diag(np.abs(ev)+1e-10)).dot(Qi)

        q_corr_part = -2 * ((weight_matrix.dot(A)).transpose()).dot(f)
        q_z_part = np.ones((N_corr, 1)) / mu
        q = np.concatenate((q_corr_part, q_z_part), axis=0)

        G_1 = np.concatenate((np.identity(N_corr), -np.identity(N_corr)), axis=1)
        G_2 = np.concatenate((-np.identity(N_corr), -np.identity(N_corr)), axis=1)
        G_3 = np.concatenate((G_1, G_2), axis=0)
        h_3 = np.zeros((2 * N_corr, 1))

        # formulation is min 1/2 x'Px+ q'x s.t.: Gx<=h, Ax=b

        # P = 2 * A^T A
        # q = -2 * E^T A = q^T -> q = -2 * A^T E

        # See Wenxuan npjCompMat paper for derivation. All of the above mess is implementing this formula, plus dealing
        # with numerical issues with zero eigenvalues getting rounded off to something slightly negative

        init_vals = matrix(np.linalg.lstsq(self.feature_matrix, self.normalized_energies)[0])

        input_entries = []
        for s, e in zip(self.structures, self.energies):
            input_entries.append(PDEntry(s.composition.element_composition, e))
        max_e = max(input_entries, key=lambda e: e.energy_per_atom).energy_per_atom + 1000
        for el in self.ce.structure.composition.keys():
            input_entries.append(PDEntry(Composition({el: 1}).element_composition, max_e))
        pd_input = PhaseDiagram(input_entries)

        constraint_strings = []

        # Uncomment to save various matrices for debugging purposes
        #np.save("A.npy", A)
        #np.save("f.npy", f)
        #np.save("w.npy", weight_vec)
        #np.save("P.npy", P)
        #np.save("q.npy", q)
        #np.save("G_noC.npy", G_3)
        #np.save("h_noC.npy", h_3)

        # The next part deals with adding constraints based on on-hull/off-hull compositions
        # Once again, there are numerical errors that arise when some structures are very close in correlation space
        # or in energy, such that the solver runs into either numerical issues or something else. The solution seems
        # to be to add constraints in batches, and try the increasingly constrained fit every once in a while.
        # When the fitting fails, roll back to find the problematic constraint and remove it. Usually there isnt more
        # than one or two bad constrains, and looking at them by hand is enough to figure out why they are causing
        # problems.
        BATCH_SIZE = int(np.sqrt(len(all_corr_in)))
        tot_constraints = 0
        removed_constraints = 0
        if not skip_gs:
            for i in range(len(all_corr_in)):
                if i not in structure_index_at_hull and i not in duplicated_correlation_set:

                    reduced_comp = self.structures[i].composition.element_composition.reduced_composition.element_composition
                    if reduced_comp in reduce_composition_at_hull:  ## in hull composition

                        hull_idx = reduce_composition_at_hull.index(reduced_comp)
                        global_index = structure_index_at_hull[hull_idx]

                        G_3_new_line = np.concatenate((all_corr_in[global_index] - all_corr_in[i], np.zeros((N_corr))))

                        G_3_new_line.shape = (1, 2 * N_corr)
                        G_3 = np.concatenate((G_3, G_3_new_line), axis=0)
                        small_error = np.array(-1e-3) # TODO: This tolerance is actually quite big, but it can be reduced as needed
                        small_error.shape = (1, 1)
                        h_3 = np.concatenate((h_3, small_error), axis=0)
                        tot_constraints += 1
                        string = "{}|Added constraint from {}({} - {}) structure at hull comp".format(h_3.shape[0], reduced_comp, self.spacegroups[i], i)
                        print(string)
                        constraint_strings.append(string)

                    else:  # out of hull composition

                        comp_now = self.structures[i].composition.element_composition.reduced_composition.element_composition
                        decomposition_now = pd_input.get_decomposition(comp_now)
                        new_vector = -1.0 * all_corr_in[i]
                        for decompo_keys, decompo_values in decomposition_now.items():
                            reduced_decompo_keys = decompo_keys.composition.element_composition.reduced_composition.element_composition
                            index_1 = reduce_composition_at_hull.index(reduced_decompo_keys)
                            vertex_index_global = structure_index_at_hull[index_1]
                            new_vector = new_vector + decompo_values * all_corr_in[vertex_index_global]

                        G_3_new_line = np.concatenate((new_vector, np.zeros(N_corr)))

                        G_3_new_line.shape = (1, 2 * N_corr)
                        G_3 = np.concatenate((G_3, G_3_new_line), axis=0)

                        small_error = np.array(-1e-3)
                        small_error.shape = (1, 1)
                        h_3 = np.concatenate((h_3, small_error), axis=0)
                        tot_constraints += 1
                        string = "{}|Added constraint from {}({}) structure not at hull comp".format(h_3.shape[0], reduced_comp, i)
                        print(string)
                        constraint_strings.append(string)

                elif i in structure_index_at_hull:
                    if self.structures[i].composition.element_composition.is_element:
                        continue

                    entries_new = []
                    for j in structure_index_at_hull:
                        if not j == i:
                            entries_new.append(
                                PDEntry(self.structures[j].composition.element_composition, self.energies[j]))

                    for el in self.ce.structure.composition.keys():
                        entries_new.append(PDEntry(Composition({el: 1}).element_composition,
                                                   max(self.normalized_energies) + 1000))

                    pd_new = PhaseDiagram(entries_new)

                    comp_now = self.structures[i].composition.element_composition.reduced_composition.element_composition
                    decomposition_now = pd_new.get_decomposition(comp_now)

                    new_vector = all_corr_in[i]

                    abandon = False
                    print("Constraining gs of {}({})".format(self.structures[i].composition, self.structures[i].composition))
                    for decompo_keys, decompo_values in decomposition_now.items():
                        reduced_decompo_keys = decompo_keys.composition.element_composition.reduced_composition.element_composition
                        if not reduced_decompo_keys in reduce_composition_at_hull:
                            abandon = True
                            break

                        index = reduce_composition_at_hull.index(reduced_decompo_keys)
                        vertex_index_global = structure_index_at_hull[index]
                        new_vector = new_vector - decompo_values * all_corr_in[vertex_index_global]
                    if abandon:
                        continue

                    G_3_new_line = np.concatenate((new_vector, np.zeros(N_corr)))

                    G_3_new_line.shape = (1, 2 * N_corr)
                    G_3 = np.concatenate((G_3, G_3_new_line), axis=0)
                    small_error = np.array(-1e-3) # TODO: Same tolerance as above
                    small_error.shape = (1, 1)
                    h_3 = np.concatenate((h_3, small_error), axis=0)
                    tot_constraints += 1
                    string = "{}|Added constraint from {}({}) structure on hull, decomp".format(h_3.shape[0], comp_now, i)
                    print(string)
                    constraint_strings.append(string)

                if i % BATCH_SIZE == 0 or i == len(all_corr_in)-1:
                    valid = False
                    const_remove = 0
                    G_t = deepcopy(G_3)
                    h_t = deepcopy(h_3)
                    # Remove constraints until fit works
                    while not valid:
                        sol = solvers.qp(matrix(P), matrix(q), matrix(G_3), matrix(h_3), initvals=init_vals, solver='mosek')
                        if sol['status'] == 'optimal':
                            valid = True
                        else:
                            const_remove += 1
                            G_3 = G_t[:-1 * (const_remove),:]
                            h_3 = h_t[:-1 * (const_remove)]
                            removed_constraints += 1

                    if const_remove > 0:
                        constraint_strings.append("{}|Removed".format(G_t.shape[0] - const_remove + 1))

                    # Add constraints back in one by one and remove if they cause problems
                    for num_new in range(1, const_remove):
                        G_new_line = G_t[-1 * (const_remove - num_new),:]
                        h_new_line = h_t[-1 * (const_remove - num_new)]
                        G_new_line.shape = (1, 2 * N_corr)
                        h_new_line.shape = (1,1)
                        G_3 = np.concatenate((G_3, G_new_line), axis=0)
                        h_3 = np.concatenate((h_3, h_new_line), axis=0)
                        sol = solvers.qp(matrix(P), matrix(q), matrix(G_3), matrix(h_3), initvals=init_vals, solver='mosek')
                        removed_constraints -= 1
                        if sol['status'] != 'optimal':
                            G_3 = G_3[:-1, :]
                            h_3 = h_3[:-1]
                            removed_constraints += 1
                            constraint_strings.append("{}|Removed".format(G_t.shape[0] - const_remove + num_new + 1))
            # Uncomment for iterative saving matricex
            #np.save("G.npy", G_3)
            #np.save("h.npy", h_3)



        # Uncomment for debugging
        #np.save("G.npy", G_3)
        #np.save("h.npy", h_3)

        sol = solvers.qp(matrix(P), matrix(q), matrix(G_3), matrix(h_3), initvals=init_vals, solver='mosek')
        print("Final status: {}".format(sol['status']))
        print("Mu: {}".format(mu))
        print("Constrants: {}/{}".format(tot_constraints - removed_constraints, tot_constraints))
        ecis = np.array(sol['x'])[:N_corr, 0]

        # Uncomment for some debugging info
        #print(ecis)
        #for string in constraint_strings:
        #    print(string)
        return ecis
Пример #46
0
def _ladder_operators(basis, operator, sparsification=False, tol=1e-12, return_operators=True):
    # Alternative version of _ladder_operators().
    
    basisA = basis

    # Liouvillian matrix between basisA and basisB.
    (L_A_B, basisB) = liouvillian_matrix(basisA, -operator, return_extended_basis=True)

    # The smallest basis that includes only operators from basisA and basisB.
    smallest_basisAB_opstrings = [os for os in basisA if os in basisB]
    smallest_basisAB           = Basis(smallest_basisAB_opstrings)

    if len(smallest_basisAB) == 0:
        if return_operators:
            evecs_ladder_ops = []
        else:
            evecs_ladder_ops = np.zeros((len(basisA), 0), dtype=complex)
            
        return (evecs_ladder_ops, np.zeros(0, dtype=complex))
    
    # Find the relevant indices of operator strings in different bases.
    inds_os_AB_in_A    = []
    inds_os_AB_in_B    = []
    inds_os_notAB_in_A = []
    inds_os_notAB_in_B = []
    ind_os_B  = 0
    for os in basisB:
        if os in basisA:
            inds_os_AB_in_A.append(basisA.index(os))
            inds_os_AB_in_B.append(ind_os_B)
        else:
            inds_os_notAB_in_B.append(ind_os_B)
        ind_os_B += 1
    ind_os_A = 0
    for os in basisA:
        if os not in basisB:
            inds_os_notAB_in_A.append(ind_os_A)
        ind_os_A += 1
        
    # The full Liouvillian matrix.
    L = L_A_B #.toarray()

    # The original Liouvillian matrix projected into the smallest
    # basis made of operators in both basisA and basisB.
    # L_AB: A \intersect B -> A \intersect B
    L_AB = L[inds_os_AB_in_B, :]
    L_AB = L_AB[:, inds_os_AB_in_A].toarray()

    print('basisA     = \n{}'.format(basisA))
    print('basisB     = \n{}'.format(basisB))
    print('smallest_basisAB = \n{}'.format(smallest_basisAB))
    
    if len(inds_os_notAB_in_B) > 0:
        # The Liouvillian matrix whose output is projected out of basisAB.
        # L_notAB: A \intersect B -> B/(A \intersect B) = B/A
        L_notAB = L[inds_os_notAB_in_B, :]
        L_notAB = L_notAB[:, inds_os_AB_in_A].toarray()

        # Perform SVD on the L_notAB to determine the right null vectors.
        # These are the vectors we care about: they stay in basisAB after
        # applying the Liouvillian matrix.
        (left_svecs, svals, right_svecsH) = nla.svd(L_notAB)

        inds_zero_svals = np.where(np.abs(svals) < tol)[0]
        print('inds_zero_svals = {}'.format(inds_zero_svals))
        valid_vecs      = np.conj(np.transpose(right_svecsH))[:, inds_zero_svals]
        
        print('svals      = {}'.format(svals))
        #print('valid_vecs = {}'.format(valid_vecs))

        # The L_A matrix projected onto the valid vectors that stay in basisA.
        projected_L = np.dot(np.conj(np.transpose(valid_vecs)), np.dot(L_AB, valid_vecs))
    else:
        projected_L = L_AB

    print('projected_L = {}'.format(projected_L))
    check_hermitian = np.allclose(np.conj(np.transpose(projected_L))-projected_L, np.zeros(projected_L.shape))
    print('projected_L is Hermitian: {}'.format(check_hermitian))
        
    # Perform eigendecomposition on the projected L matrix.
    (evals, evecs) = nla.eigh(0.5*(projected_L + np.conj(np.transpose(projected_L))))

    print('evals = {}'.format(evals))
    #print('evecs = {}'.format(evecs))

    # Find all of the positive eigenvalues that
    # come paired with a negative eigenvalue partner.
    inds_pos           = np.where(np.logical_and(np.imag(evals) < tol, np.real(evals) > tol))[0]
    inds_pos_partnered = []
    for ind_pos_ev in inds_pos:
        inds_neg_ev = np.where(np.abs(evals[ind_pos_ev] + evals) < tol)[0]
        if len(inds_neg_ev) > 0: 
            inds_pos_partnered.append(ind_pos_ev)

    print('inds_pos           = {}'.format(inds_pos))
    print('inds_pos_partnered = {}'.format(inds_pos_partnered))
            
    ladder_evals = np.real(evals[inds_pos_partnered])
    if len(inds_os_notAB_in_B) > 0:
        ladder_evecsAB = np.dot(valid_vecs, evecs[:, inds_pos_partnered])
    else:
        ladder_evecsAB = evecs[:, inds_pos_partnered]
        
    ladder_evecs                     = np.zeros((len(basisA), len(ladder_evals)), dtype=complex) 
    ladder_evecs[inds_os_AB_in_A, :] = ladder_evecsAB

    # Post-process the eigenvalues and eigenvectors.
    inds_sort = np.argsort(ladder_evals)

    evals_ladder_ops = ladder_evals[inds_sort]
    evecs_ladder_ops = ladder_evecs[:, inds_sort]

    num_ladder_ops = len(evals_ladder_ops)
    if num_ladder_ops == 0:
        if return_operators:
            evecs_ladder_ops = []
            
        return (evecs_ladder_ops, evals_ladder_ops)
    
    """
    # Perform SVD on the Liouvillian matrix.
    (left_svecsB, svals, right_svecsAH) = nla.svd(L)
    right_svecsA = np.conj(np.transpose(right_svecsAH))

    # Identify the degenerate positive singular values subspaces.
    degenerate_subspace_inds = []
    degenerate_subspace_sval = []
    visited_inds = set()
    for ind_sv in range(len(svals)):
        subspace_inds = []
        inds_deg = np.where(np.logical_and(np.abs(svals[ind_sv]-svals) < tol, np.abs(svals) > tol))[0]
        for ind_deg in inds_deg:
            if ind_deg not in visited_inds:
                subspace_inds.append(ind_deg)
                visited_inds.add(ind_deg)
        if len(subspace_inds) > 0:
            degenerate_subspace_inds.append(subspace_inds)
            degenerate_subspace_sval.append(svals[ind_sv])

    

    projector_B_to_A = np.zeros((len(basisA), len(basisB)), dtype=complex)
    
    for ind_os_b in range(len(basisB)):
        os_b = basisB.op_strings[ind_os_b]
        if os_b in basisA:
            ind_os_a                             = basisA.index(os_b)
            projector_B_to_A[ind_os_a, ind_os_b] = 1.0

    projected_L = np.dot(projector_B_to_A, L)

    # For each degenerate subspace, project onto the right
    # singular vectors with singular value s and see if
    # the eigenvalues of the projected matrix are +/- s.
    # If they are, then the projection preserved the
    # singular vectors as eigenvectors of the projected
    # matrix.
    num_ladder_ops   = 0
    evecs_ladder_ops = []
    evals_ladder_ops = []
    for (subspace_inds, subspace_sval) in zip(degenerate_subspace_inds, degenerate_subspace_sval):
        if len(subspace_inds) > 1:    
            right_vecs = right_svecsA[:, subspace_inds]
            
            subspace_L = np.dot(np.conj(np.transpose(right_vecs)), np.dot(projected_L, right_vecs))

            (evals_sub, evecs_sub) = nla.eig(subspace_L)

            inds_eval_matches_sval  = np.where(np.abs(evals_sub - subspace_sval) < tol)[0]
            inds_eval_matches_msval = np.where(np.abs(evals_sub + subspace_sval) < tol)[0]

            assert(len(inds_eval_matches_sval) == len(inds_eval_matches_msval))

            if len(inds_eval_matches_sval) > 0:
                evals_ladder_ops.append(evals_sub[inds_eval_matches_sval])
                evecs_ladder_ops.append(evecs_sub[inds_eval_matches_sval])

                num_ladder_ops += len(inds_eval_matches_sval)
                
    if num_ladder_ops == 0:
        if return_operators:
            evecs_ladder_ops = []
        else:
            evecs_ladder_ops = np.zeros((len(basisA), 0), dtype=complex)
            
        return (evecs_ladder_ops, np.zeros(0, dtype=complex))
    
    evals_ladder_ops = np.concatenate(tuple(evals_ladder_ops))
    evecs_ladder_ops = np.hstack(tuple(evecs_ladder_ops))
    """
    
    # Go through each degenerate subspace and sparsify the basis of that subspace.
    if sparsification:
        inds_sort = np.argsort(np.real(evals_ladder_ops))
        evals_ladder_ops = evals_ladder_ops[inds_sort]
        evecs_ladder_ops = evecs_ladder_ops[:, inds_sort]

        ind_ev = 0
        while ind_ev < len(evals_ladder_ops):
            inds_jump = np.where(np.abs(evals_ladder_ops[ind_ev:] - evals_ladder_ops[ind_ev]) > 1e-10)[0]

            if len(inds_jump) == 0:
                ind_jump = 0
            else:
                ind_jump = inds_jump[0]

            inds_degenerate_eval = np.arange(ind_ev, ind_ev+ind_jump)
            if ind_jump > 1:
                evecs_ladder_ops[:,inds_degenerate_eval] = sparsify(evecs_ladder_ops[:,inds_degenerate_eval])

                ind_ev += ind_jump
            else:
                ind_ev += 1
        
    if not return_operators:
        return (evecs_ladder_ops, evals_ladder_ops)
    else:
        ladder_ops = []
        if isinstance(basis, Basis):
            for ind_ev in range(num_ladder_ops):
                ladder_op = Operator(evecs_ladder_ops[:, ind_ev], basisA.op_strings)
                ladder_ops.append(ladder_op)

            #print('Ladder operators:')
            #for op in ladder_ops:
            #    print(op)
        elif isinstance(basis, list) and isinstance(basis[0], Operator):
            raise NotImplementedError('A basis made of a list of Operators is not supported yet.')
        else:
            raise ValueError('Invalid basis of type: {}'.format(type(basis)))

        return (ladder_ops, evals_ladder_ops)
Пример #47
0
from math import *
from pylab import plot, show
from numpy import *
from numpy.linalg import eigh
A = array([[1, 2], [2, 4]])
print(A)
print(eigh(A))

E = []

hbar = 6.6 * (10**-34) / (2 * pi)
m_e = 9.1 * (10**-31)
L = 5 * (10**-10)
e = 1.6 * (10**-19)
for i in range(1, 11):
    E.append(i * i * pi * pi * (hbar**2) / (2 * m_e * (L**2)))
print(E)


def V_x(x):
    return (10 * e * x / L)


def Hphi(x, n, m):
    total = ((hbar**2) * (1 / (2 * m_e)) * ((n * pi / L)**2) *
             (sin(n * pi * x / L)))
    total += sin(n * pi * x / L) * V_x(x)
    total *= (2 / L) * sin(m * pi * x / L)
    return total

Пример #48
0
print("Degree Matrix: \n", Degree)

# In[293]:

print("4(d)")
Lmatrix = Degree - Adjacency

print("Laplace Matrix: \n", Lmatrix, "\n")

# In[294]:

print("4(d)")
#Eigen values and Eigenvectors

from numpy import linalg as LA
evals, evecs = LA.eigh(Lmatrix)

print("Eigenvalues of Laplace Matrix = \n", evals, "\n")
print("Eigenvectors of Laplace Matrix = \n", evecs, "\n")

# In[295]:

print("4(d)")
# Series plot of the smallest ten eigenvalues to determine the number of clusters

plt.figure(figsize=(8, 6))
plt.scatter(np.arange(0, 9, 1), evals[0:9, ])
plt.xlabel('Sequence')
plt.ylabel('Eigenvalue')
plt.grid()
plt.show()
Пример #49
0
    def solve(self, regparam):
        """Re-trains KronRLS for the given regparam
               
        Parameters
        ----------
        regparam : float, optional
            regularization parameter, regparam > 0

        Notes
        -----    
                
        Computational complexity of re-training:
        
        m = n_samples1, n = n_samples2, d = n_features1, e  = n_features2
        
        O(ed^2 + de^2) Linear version (assumption: d < m, e < n)
        
        O(m^3 + n^3) Kernel version
        """
        self.regparam = regparam
        if self.kernelmode:
            K1, K2 = self.K1, self.K2
            #assert self.Y.shape == (self.K1.shape[0], self.K2.shape[0]), 'Y.shape!=(K1.shape[0],K2.shape[0]). Y.shape=='+str(Y.shape)+', K1.shape=='+str(self.K1.shape)+', K2.shape=='+str(self.K2.shape)
            if not self.trained:
                self.trained = True
                evals1, V = la.eigh(K1)
                evals1 = np.mat(evals1).T
                V = np.mat(V)
                self.evals1 = evals1
                self.V = V

                evals2, U = la.eigh(K2)
                evals2 = np.mat(evals2).T
                U = np.mat(U)
                self.evals2 = evals2
                self.U = U
                self.VTYU = V.T * self.Y * U

            newevals = 1. / (self.evals1 * self.evals2.T + regparam)

            self.A = np.multiply(self.VTYU, newevals)
            self.A = self.V * self.A * self.U.T
            self.A = np.asarray(self.A)
            label_row_inds, label_col_inds = np.unravel_index(
                np.arange(K1.shape[0] * K2.shape[0]),
                (K1.shape[0], K2.shape[0]))
            label_row_inds = np.array(label_row_inds, dtype=np.int32)
            label_col_inds = np.array(label_col_inds, dtype=np.int32)
            self.predictor = KernelPairwisePredictor(self.A.ravel(),
                                                     label_row_inds,
                                                     label_col_inds)
        else:
            X1, X2 = self.X1, self.X2
            Y = self.Y.reshape((X1.shape[0], X2.shape[0]), order='F')
            if not self.trained:
                self.trained = True
                V, svals1, rsvecs1 = linalg.svd_economy_sized(X1)
                svals1 = np.mat(svals1)
                self.svals1 = svals1.T
                self.evals1 = np.multiply(self.svals1, self.svals1)
                self.V = V
                self.rsvecs1 = np.mat(rsvecs1)

                if X1.shape == X2.shape and (X1 == X2).all():
                    svals2, U, rsvecs2 = svals1, V, rsvecs1
                else:
                    U, svals2, rsvecs2 = linalg.svd_economy_sized(X2)
                    svals2 = np.mat(svals2)
                self.svals2 = svals2.T
                self.evals2 = np.multiply(self.svals2, self.svals2)
                self.U = U
                self.rsvecs2 = np.mat(rsvecs2)

                self.VTYU = V.T * Y * U

            kronsvals = self.svals1 * self.svals2.T

            newevals = np.divide(kronsvals,
                                 np.multiply(kronsvals, kronsvals) + regparam)
            self.W = np.multiply(self.VTYU, newevals)
            self.W = self.rsvecs1.T * self.W * self.rsvecs2
            self.predictor = LinearPairwisePredictor(
                np.array(self.W).ravel(order='F'))
Пример #50
0
def sorted_eig(m):
    evals, evects = eigh(m)
    sort_order = evals.argsort()[::-1]
    evals = np.sort(evals)[::-1]
    evects = evects[:, sort_order]
    return evals, evects
Пример #51
0
def plot_graph(graph, label=None, cache=False, max_id=None):
    val_map = [
        'cyan', 'red', 'blue', 'magenta', 'gray', 'purple', 'orange', 'yellow',
        'green', 'black', 'pink'
    ]

    if label is not None:
        if len(np.shape(label)) > 1:
            values = [val_map[_] for _ in np.where(label == 1)[1].tolist()]
        else:
            values = [val_map[int(_)] for _ in label]
    else:
        values = None
    # if graph.name == 'Zachary\'s Karate Club':
    #     vals = {'Mr. Hi': 0, 'Officer': 1}
    #     values = [vals[__['club']] for _, __ in graph._node.items()]

    if cache:
        pos = pk.load(open('./pos.pk', 'rb'))
    else:
        pos = nx.fruchterman_reingold_layout(graph, k=0.1, iterations=50)
        pk.dump(pos, open('./pos.pk', 'wb'))
    # pos = nx.circular_layout(graph,scale=1)
    # pos = nx.random_layout(graph)
    # pos = nx.shell_layout(graph)
    # pos = nx.spectral_layout(graph)

    if nx.bipartite.is_bipartite(graph):
        l, r = nx.bipartite.sets(graph)
        pos = {}
        pos.update((node, (1, index)) for index, node in enumerate(l))
        pos.update((node, (2, index)) for index, node in enumerate(r))

    print("\nPlotting a graph...")

    # plot graph
    plt.axis('off')
    plt.figure(1, figsize=(10, 10))
    gs = gridspec.GridSpec(2, 1, height_ratios=[1, 3])

    ax0 = plt.subplot(gs[0])
    # plot eigenvalues
    norm_lap = nx.normalized_laplacian_matrix(graph)
    eigval, eigvec = LA.eigh(norm_lap.A)
    ax0.plot(eigval, 'ro')

    ax1 = plt.subplot(gs[1])
    # nx.draw(graph, pos=pos, node_color=values, node_size=15, width=0.1)
    nx.draw(graph,
            pos,
            node_color=label[:0],
            node_size=20,
            width=0.1,
            cmap=plt.cm.rainbow,
            with_labels=False)
    # color bar
    sm = plt.cm.ScalarMappable(cmap=plt.cm.rainbow,
                               norm=plt.Normalize(vmin=np.min(label),
                                                  vmax=np.max(label)))
    sm._A = []
    divider = make_axes_locatable(ax1)
    cax = divider.append_axes("bottom", size="5%")
    cbar = plt.colorbar(sm,
                        cax=cax,
                        ticks=[-1, -.5, -.25, -.1, 0, .1, .25, .5, 1],
                        orientation='horizontal')
    cbar.ax.tick_params(labelsize=8)
    cbar.ax.set_xlabel('node text in graph is index=val', rotation=360)

    plt.title('original networks w/ eigenvalues')
    plt.show()

    # visualize eigen vectors
    # for _ in range(3):  # len(graph._node)):
    for _ in list(range(3)) + max_id.flatten().tolist():  # len(graph._node)):
        plt.figure(1, figsize=(7, 8))
        fig, ax = plt.subplots(2, 1, num=1)

        gs = gridspec.GridSpec(2, 1, height_ratios=[1, 3])

        # plot eigen vector values
        ax0 = plt.subplot(gs[0])
        cur_eigv = eigvec[:, _]
        print("{}:{:.4f}".format(_, eigval[_]))
        print(["{}:{:.4f}".format(k, v) for k, v in enumerate(cur_eigv)])
        ax0.plot(range(cur_eigv.shape[0]), cur_eigv, 'b--')
        for i, txt in enumerate(cur_eigv):
            ax0.annotate(i, (range(cur_eigv.shape[0])[i], cur_eigv[i]))

        # ax0.set_ylabel('scale')
        ax0.set_xlabel('eigenvector of i={}'.format(_))

        # plot eigen vector as labels on graph
        ax1 = plt.subplot(gs[1])
        nx.draw(graph,
                pos,
                node_color=cur_eigv,
                node_size=20,
                width=0.1,
                cmap=plt.cm.rainbow,
                with_labels=False)

        # label draw
        labels = {
            k: "{}={:.3f}".format(k, v)
            for k, v in enumerate(cur_eigv.tolist())
        }
        pos_higher = {}

        for k, v in pos.items():
            pos_higher[k] = (v[0],
                             v[1] + (1 if random.random() < 0.5 else -1) *
                             random.uniform(0.02, 0.03))
        # nx.draw_networkx_labels(graph, pos_higher, labels, font_size=6)

        # color bar
        sm = plt.cm.ScalarMappable(cmap=plt.cm.rainbow,
                                   norm=plt.Normalize(vmin=np.min(cur_eigv),
                                                      vmax=np.max(cur_eigv)))
        sm._A = []
        divider = make_axes_locatable(ax1)
        cax = divider.append_axes("bottom", size="5%")
        cbar = plt.colorbar(sm,
                            cax=cax,
                            ticks=[-1, -.5, -.25, -.1, 0, .1, .25, .5, 1],
                            orientation='horizontal')
        cbar.ax.tick_params(labelsize=8)
        cbar.ax.set_xlabel('node text in graph is index=val', rotation=360)
        plt.show()
Пример #52
0
ay = 2

for i in range(0, len(N)):

    Nx = N[i] #incrementing size of lattice
    Ny = N[i] #incrementing size of lattice
    coor = shps.square(Nx, Ny) #creating coordinate array
    NN = nbrs.NN_Arr(coor) #nearest neighbor array
    NNb = nbrs.Bound_Arr(coor) #boundary array

    H_sparse = spop.H0(coor, ax, ay, NN) #creating sparse hamiltonian

    start = time.time() #Time start for numpy

    H_dense = dpop.H0(coor, ax, ay, NN) #creating dense hamiltonian
    eigs, vecs = LA.eigh(H_dense) #diagonalizing

    end = time.time() #Time end for numpy
    t_dense[i] = end-start #append time taken to diagonalize

    start = time.time() #Time start for scipy

    H_sparse = spop.H0(coor, ax, ay, NN) #creating sparse hamiltonian
    num = int(H_sparse.shape[0]/2) #Number of eigenvalues and eigenvectors you want
    sigma = 0 #This is the eigenvalue we search around
    which = 'LM' #Largest magnitude eigenvalues
    spLA.eigsh(H_sparse, k = num, sigma = sigma, which = which) #diagonlizing

    end = time.time() #time end for scipy
    t_sparse[i] = end-start #append time to sparse time array
Пример #53
0
    def write_op2(self,
                  op2,
                  op2_ascii,
                  itable,
                  date,
                  is_mag_phase=False,
                  endian='>'):
        #if self.nnodes != 9:
        #return
        import inspect
        frame = inspect.currentframe()
        call_frame = inspect.getouterframes(frame, 2)
        op2_ascii.write('%s.write_op2: %s\n' %
                        (self.__class__.__name__, call_frame[1][3]))

        if itable == -1:
            self._write_table_header(op2, op2_ascii, date)
            itable = -3

        #if isinstance(self.nonlinear_factor, float):
        #op2_format = '%sif' % (7 * self.ntimes)
        #raise NotImplementedError()
        #else:
        #op2_format = 'i21f'
        #s = Struct(op2_format)
        nnodes_expected = self.nnodes

        eids2 = self.element_node[:, 0]
        nodes = self.element_node[:, 1]

        eids3 = self.element_cid[:, 0]
        cids3 = self.element_cid[:, 1]

        # table 4 info
        #ntimes = self.data.shape[0]
        nnodes = self.data.shape[1]
        nelements = len(eids2)

        # 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
        #ntotal = ((nnodes * 21) + 1) + (nelements * 4)
        ntotal = 4 + 21 * nnodes_expected

        #print('shape = %s' % str(self.data.shape))
        assert nnodes > 1, nnodes
        assert self.ntimes == 1, self.ntimes

        device_code = self.device_code
        op2_ascii.write('  ntimes = %s\n' % self.ntimes)

        #fmt = '%2i %6f'
        #print('ntotal=%s' % (ntotal))
        #assert ntotal == 193, ntotal

        struct1 = Struct(b(endian + 'ii4si'))
        struct2 = Struct(b(endian + 'i20f'))

        cen = b'GRID'
        for itime in range(self.ntimes):
            self._write_table_3(op2, op2_ascii, itable, itime)

            # record 4
            header = [4, -4, 4, 4, 1, 4, 4, 0, 4, 4, ntotal, 4, 4 * ntotal]
            op2.write(pack('%ii' % len(header), *header))
            op2_ascii.write('r4 [4, 0, 4]\n')
            op2_ascii.write('r4 [4, %s, 4]\n' % (itable - 1))
            op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))

            oxx = self.data[itime, :, 0]
            oyy = self.data[itime, :, 1]
            ozz = self.data[itime, :, 2]
            txy = self.data[itime, :, 3]
            tyz = self.data[itime, :, 4]
            txz = self.data[itime, :, 5]
            o1 = self.data[itime, :, 6]
            o2 = self.data[itime, :, 7]
            o3 = self.data[itime, :, 8]
            ovm = self.data[itime, :, 9]
            p = (o1 + o2 + o3) / -3.

            #print('eids3', eids3)
            cnnodes = nnodes_expected + 1
            for i, deid, node_id, doxx, doyy, dozz, dtxy, dtyz, dtxz, do1, do2, do3, dp, dovm in zip(
                    count(), eids2, nodes, oxx, oyy, ozz, txy, tyz, txz, o1,
                    o2, o3, p, ovm):
                #print('  eid =', deid, node_id)

                j = where(eids3 == deid)[0]
                assert len(j) > 0, j
                cid = cids3[j][0]
                A = [[doxx, dtxy, dtxz], [dtxy, doyy, dtyz],
                     [dtxz, dtyz, dozz]]
                (Lambda,
                 v) = eigh(A)  # a hermitian matrix is a symmetric-real matrix

                #node_id, oxxi, txyi, o1i, v[0, 1], v[0, 2], v[0, 0], pi, ovmi,
                #'', oyyi, tyzi, o2i, v[1, 1], v[1, 2], v[1, 0],
                #'', ozzi, txzi, o3i, v[2, 1], v[2, 2], v[2, 0]
                #(grid_device, sxx, sxy, s1, a1, a2, a3, pressure, svm,
                #syy, syz, s2, b1, b2, b3,
                #szz, sxz, s3, c1, c2, c3)

                if i % cnnodes == 0:
                    data = [
                        deid * 10 + self.device_code, cid, cen, nnodes_expected
                    ]
                    op2_ascii.write('  eid=%s cid=%s cen=%s nnodes = %s\n' %
                                    tuple(data))
                    op2.write(struct1.pack(*data)
                              #pack(b'2i 4s i', *data)
                              )
                #else:
                op2_ascii.write('    nid=%i\n' % node_id)

                data = [
                    node_id,
                    doxx,
                    dtxy,
                    do1,
                    v[0, 1],
                    v[0, 2],
                    v[0, 0],
                    dp,
                    dovm,
                    doyy,
                    dtyz,
                    do2,
                    v[1, 1],
                    v[1, 2],
                    v[1, 0],
                    dozz,
                    dtxz,
                    do3,
                    v[2, 1],
                    v[2, 2],
                    v[2, 0],
                ]
                op2_ascii.write(
                    '      oxx, txy, o1, v01, v02, v00, p, ovm = %s\n' %
                    data[1:8])
                op2_ascii.write(
                    '      oyy, tyz, o2, v11, v12, v10         = %s\n' %
                    data[8:14])
                op2_ascii.write(
                    '      ozz, txz, o3, v21, v22, v20         = %s\n' %
                    data[14:])
                op2.write(struct2.pack(*data))
                i += 1

            itable -= 2
            header = [
                4 * ntotal,
            ]
            op2.write(pack('i', *header))
            op2_ascii.write('footer = %s' % header)
        header = [
            4,
            itable,
            4,
            4,
            1,
            4,
            4,
            0,
            4,
        ]
        op2.write(pack('%ii' % len(header), *header))
    def sortedEigenValuesAndVectors(sigma):

        eigenValues, eigenVectors = LA.eigh(sigma)
        sorted_indices = np.argsort(
            eigenValues)  #Returns the indices that would sort an array.
        return eigenValues[sorted_indices], eigenVectors[:, sorted_indices]
Пример #55
0
chan_3d1_3s1 = potential.load(2, 3, 'EM420new', '12010', 50, 'np')
chan_3d1 = potential.load(2, 3, 'EM420new', '12210', 50, 'np')

# Compute reference Hamiltonian
hamiltonian_ref = _get_coupled_channel_hamiltonian(
    [chan_3s1, chan_3s1_3d1, chan_3d1_3s1, chan_3d1])

# Create coupled channel potential from channels
c_potential = potential.CoupledPotential(
    [chan_3s1, chan_3s1_3d1, chan_3d1_3s1, chan_3d1])

# Compute alternate Hamiltonian
hamiltonian_coupled = c_potential.with_weights() + c_potential.kinetic_energy()

# Compute bound state eigenvalues
ev_ref = np.amin(eigh(hamiltonian_ref)[0])
ev = np.amin(eigh(hamiltonian_coupled)[0])

# Print unevolved results
print('Unevolved')
print('E_ref = E_srg = {} MeV'.format(hbarc**2 / (2 * red_mass) * ev_ref))
print('E_alt = {} MeV\n'.format(hbarc**2 / (2 * red_mass) * ev))

# Set up SRG evolution
v_mask = np.array([[0 for _ in range(len(c_potential.nodes))]
                   for _ in range(len(c_potential.nodes))])
k_mask = np.array([[1 for _ in range(len(c_potential.nodes))]
                   for _ in range(len(c_potential.nodes))])

srg_obj = srg.SRG(c_potential, v_mask, k_mask)
Пример #56
0
    def write_f06(self,
                  f06,
                  header=None,
                  page_stamp='PAGE %s',
                  page_num=1,
                  is_mag_phase=False,
                  is_sort1=True):
        if header is None:
            header = []
        nnodes, msg_temp = _get_f06_header_nnodes(self, is_mag_phase)

        # write the f06
        ntimes = self.data.shape[0]

        eids2 = self.element_node[:, 0]
        nodes = self.element_node[:, 1]

        eids3 = self.element_cid[:, 0]
        cids3 = self.element_cid[:, 1]

        for itime in range(ntimes):
            dt = self._times[itime]
            header = _eigenvalue_header(self, header, itime, ntimes, dt)
            f06.write(''.join(header + msg_temp))

            #print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
            oxx = self.data[itime, :, 0]
            oyy = self.data[itime, :, 1]
            ozz = self.data[itime, :, 2]
            txy = self.data[itime, :, 3]
            tyz = self.data[itime, :, 4]
            txz = self.data[itime, :, 5]
            o1 = self.data[itime, :, 6]
            o2 = self.data[itime, :, 7]
            o3 = self.data[itime, :, 8]
            ovm = self.data[itime, :, 9]
            p = (o1 + o2 + o3) / -3.

            cnnodes = nnodes + 1
            for i, deid, node_id, doxx, doyy, dozz, dtxy, dtyz, dtxz, do1, do2, do3, dp, dovm in zip(
                    count(), eids2, nodes, oxx, oyy, ozz, txy, tyz, txz, o1,
                    o2, o3, p, ovm):

                j = where(eids3 == deid)[0]
                cid = cids3[j]
                A = [[doxx, dtxy, dtxz], [dtxy, doyy, dtyz],
                     [dtxz, dtyz, dozz]]
                (Lambda,
                 v) = eigh(A)  # a hermitian matrix is a symmetric-real matrix

                # o1-max
                # o2-mid
                # o3-min
                assert do1 >= do2 >= do3, 'o1 >= o2 >= o3; eid=%s o1=%e o2=%e o3=%e' % (
                    deid, do1, do2, do3)
                [oxxi, oyyi, ozzi, txyi, tyzi, txzi, o1i, o2i, o3i, pi,
                 ovmi] = write_floats_13e([
                     doxx, doyy, dozz, dtxy, dtyz, dtxz, do1, do2, do3, dp,
                     dovm
                 ])

                if i % cnnodes == 0:
                    f06.write('0  %8s    %8iGRID CS  %i GP\n' %
                              (deid, cid, nnodes))
                    f06.write(
                        '0              %8s  X  %-13s  XY  %-13s   A  %-13s  LX%5.2f%5.2f%5.2f  %-13s   %s\n'
                        '               %8s  Y  %-13s  YZ  %-13s   B  %-13s  LY%5.2f%5.2f%5.2f\n'
                        '               %8s  Z  %-13s  ZX  %-13s   C  %-13s  LZ%5.2f%5.2f%5.2f\n'
                        % ('CENTER', oxxi, txyi, o1i, v[0, 1], v[0, 2],
                           v[0, 0], pi, ovmi, '', oyyi, tyzi, o2i, v[1, 1],
                           v[1, 2], v[1, 0], '', ozzi, txzi, o3i, v[2, 1],
                           v[2, 2], v[2, 0]))
                else:
                    f06.write(
                        '0              %8s  X  %-13s  XY  %-13s   A  %-13s  LX%5.2f%5.2f%5.2f  %-13s   %s\n'
                        '               %8s  Y  %-13s  YZ  %-13s   B  %-13s  LY%5.2f%5.2f%5.2f\n'
                        '               %8s  Z  %-13s  ZX  %-13s   C  %-13s  LZ%5.2f%5.2f%5.2f\n'
                        % (node_id, oxxi, txyi, o1i, v[0, 1], v[0, 2], v[0, 0],
                           pi, ovmi, '', oyyi, tyzi, o2i, v[1, 1], v[1, 2],
                           v[1, 0], '', ozzi, txzi, o3i, v[2, 1], v[2, 2],
                           v[2, 0]))
                i += 1
            f06.write(page_stamp % page_num)
            page_num += 1
        return page_num - 1
Пример #57
0
def steady_state(lindblad, *, sparse=None, method="ed", rho0=None, **kwargs):
    r"""Computes the numerically exact steady-state of a lindblad master equation.
    The computation is performed either through the exact diagonalization of the
    hermitian :math:`L^\dagger L` matrix, or by means of an iterative solver (bicgstabl)
    targeting the solution of the non-hermitian system :math:`L\rho = 0`
    and :math:`\mathrm{Tr}[\rho] = 1`.

    Note that for systems with 7 or more sites it is usually computationally impossible
    to build the full lindblad operator and therefore only `iterative` will work.

    Note that for systems with hilbert spaces with dimensions above 40k, tol
    should be set to a lower value if the steady state has non-trivial correlations.

    Args:
        lindblad: The lindbladian encoding the master equation.
        sparse: Whever to use sparse matrices (default: False for ed, True for iterative)
        method: 'ed' (exact diagonalization) or 'iterative' (iterative bicgstabl)
        rho0: starting density matrix for the iterative diagonalization (default: None)
        kwargs...: additional kwargs passed to bicgstabl

    For full docs please consult SciPy documentation at
    https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.bicgstab.html

    Keyword Args:
        maxiter: maximum number of iterations for the iterative solver (default: None)
        tol: The precision for the calculation (default: 1e-05)
        callback: User-supplied function to call after each iteration. It is called as callback(xk),
                  where xk is the current solution vector

    Returns:
        The steady-state density matrix.
    """
    from numpy import sqrt, array

    if sparse is None:
        sparse = True

    M = lindblad.hilbert.physical.n_states

    if method == "ed":
        if not sparse:
            from numpy.linalg import eigh
            from warnings import warn

            warn(
                """For reasons unknown to me, using dense diagonalisation on this
                matrix results in very low precision of the resulting steady-state
                since the update to numpy 1.9.
                We suggest using sparse=True, however, if you wish not to, you have
                been warned.
                Your digits are your reponsability now.""")

            lind_mat = lindblad.to_dense()

            ldagl = lind_mat.H * lind_mat
            w, v = eigh(ldagl)

        else:
            from scipy.sparse.linalg import eigsh

            lind_mat = lindblad.to_sparse()
            ldagl = lind_mat.H * lind_mat

            w, v = eigsh(ldagl, which="SM", k=2)

        print("Minimum eigenvalue is: ", w[0])
        rho = v[:, 0].reshape((M, M))
        rho = rho / rho.trace()

    elif method == "iterative":
        # An extra row is added at the bottom of the therefore M^2+1 long array,
        # with the trace of the density matrix. This is needed to enforce the
        # trace-1 condition.
        L = lindblad.to_linear_operator(sparse=sparse, append_trace=True)

        # Initial density matrix ( + trace condition)
        Lrho_start = np.zeros((M**2 + 1), dtype=L.dtype)
        if rho0 is None:
            Lrho_start[0] = 1.0
            Lrho_start[-1] = 1.0
        else:
            Lrho_start[:-1] = rho0.reshape(-1)
            Lrho_start[-1] = rho0.trace()

        # Target residual (everything 0 and trace 1)
        Lrho_target = np.zeros((M**2 + 1), dtype=L.dtype)
        Lrho_target[-1] = 1.0

        # Iterative solver
        print("Starting iterative solver...")
        res, info = bicgstab(L, Lrho_target, x0=Lrho_start, **kwargs)

        rho = res[:-1].reshape((M, M))
        if info == 0:
            print("Converged trace is ", rho.trace())
        elif info > 0:
            print("Failed to converge after ", info, " ( trace is ",
                  rho.trace(), " )")
        elif info < 0:
            print("An error occured: ", info)

    else:
        raise ValueError("method must be 'ed' or 'iterative'")

    return rho
Пример #58
0
 def layoutDisplay(self, display):
     nodes = []
     edges = []
     for visibles in display.visibles.itervalues():
         for visible in visibles:
             if visible.isPath():
                 edges.append(visible)
             elif visible.parent is None:
                 nodes.append(visible)
     n=len(nodes)
     if n > 1:
         # Build the adjacency matrix
         A = zeros((n, n))
         for edge in edges:
             (pathStart, pathEnd) = edge.pathEndPoints()
             n1 = nodes.index(pathStart.rootVisible())
             n2 = nodes.index(pathEnd.rootVisible())
             if self.weightingFunction is None:
                 weight = 1.0
             else:
                 weight = self.weightingFunction(edge)
             if edge.flowTo() is None or edge.flowTo():
                 A[n1, n2] = A[n1, n2] + weight
             if edge.flowFrom() is None or edge.flowFrom():
                 A[n2, n1] = A[n2, n1] + weight
         #print A.tolist()
         
         # This is equivalent to the MATLAB code from <http://mit.edu/lrv/www/elegans/>:
         #   c=full((A+A')/2);
         #   d=diag(sum(c));
         #   l=d-c;
         #   b=sum(c.*sign(full(A-A')),2);
         #   z=pinv(l)*b;
         #   q=d^(-1/2)*l*d^(-1/2);
         #   [vx,lambda]=eig(q);
         #   x=d^(-1/2)*vx(:,2);
         #   y=d^(-1/2)*vx(:,3);
         
         A_prime = A.T
         c = (A + A_prime) / 2.0
         d = diag(c.sum(0))
         l = mat(d - c)
         print c, d
         if display.viewDimensions == 2:
             z = zeros((n, 1))
         else:
             b = (c * sign(A - A_prime)).sum(1).reshape(1, n)
             z = inner(pinv(l), b)
         d2 = mat(d**-0.5)
         d2[isinf(d2)] = 0
         q = d2 * l * d2
         eVec = eigh(q)[1]
         x = d2 * mat(eVec[:,1])
         y = d2 * mat(eVec[:,2])
         xMin, xMax = x.min(), x.max()
         xOff = (xMax + xMin) / 2.0
         xSize = xMax - xMin if self.autoScale else 1.0
         yMin, yMax = y.min(), y.max()
         yOff = (yMax + yMin) / 2.0
         ySize = yMax - yMin if self.autoScale else 1.0
         zMin, zMax = z.min(), z.max()
         zOff = (zMax + zMin) / 2.0
         zSize = zMax - zMin if self.autoScale and zMax != zMin else 1.0
         for i in range(n):
             nodes[i].setPosition((0.0 if xSize == 0 else (x[i,0] - xOff) / xSize * self.scaling[0], \
                                   0.0 if ySize == 0 else (y[i,0] - yOff) / ySize * self.scaling[1], \
                                   0.0 if zSize == 0 else (z[i,0] - zOff) / zSize * self.scaling[2]))
Пример #59
0
G = nx.grid_graph([5, 5])
n = 25
AM = nx.adjacency_matrix(G)
NLM = (nx.normalized_laplacian_matrix(G)).todense()
LM = (nx.laplacian_matrix(G)).todense()
'''
Labels = [G.nodes[i]['club'] != 'Mr. Hi' for i in G.nodes()]


plt.figure()
plt.title("Data Labels")
nx.draw(G, node_color=Labels )
plt.show()
'''
#scipy.linalg.eigh(NLM,eigvals=1)#replace with this for speed
NLMva, NLMve = LA.eigh(NLM)

LMva, LMve = LA.eigh(LM)

Fv = LMve[:, 1]
xFv = [Fv.item(x) for x in range(n)]
NFv = NLMve[:, 1]
xNFv = [NFv.item(x) for x in range(n)]

plt.figure()
plt.title("Laplacian Eigenvalues")
nx.draw(G, node_color=xFv)
plt.show()

plt.figure()
plt.title("Normalized Laplacian Eigenvalues")
Пример #60
0
    print("""\n Loading results from step 1 \n """)
    beta_file_name = output_dir + "beta.mat"
    mat = loadmat(beta_file_name)
    b_0 = mat['beta']
    sigma_file_name = output_dir + "sigma.mat"
    mat = loadmat(sigma_file_name)
    sigma_eta = mat['sigma_eta']
    omega_eps = mat['omega_eps']
    res_y = y_data * 0
    for j in range(m):
        res_y[:, :, j] = y_data[:, :, j] - np.dot(x_data, b_0[:, :, j])
    inv_s = np.zeros(shape=(n_v, m, m))
    for l in range(n_v):
        if m > 1:
            inv_s2 = inv(sigma_eta[l, :, :]+omega_eps[l, :, :])
            w, v = eigh(np.squeeze(inv_s2))
            w = np.real(w)
            w[w < 0] = 0
            w_diag = np.diag(w ** (1 / 2))
            inv_s_tp = np.dot(np.dot(v, w_diag), v.T)
            inv_s[l, :, :] = np.real(inv_s_tp)
        else:
            inv_s[l, :, :] = (sigma_eta[l, :, :]+omega_eps[l, :, :])**(-0.5)

    """++++++++++++++++++++++++++++++++++++"""
    print("""\n Threshold on the difference of images to get initial disease regions and effects\n""")
    y1 = y_data[np.nonzero(z == 1)[0], :, :]
    x1 = x_data[np.nonzero(z == 1)[0], :]
    n1 = int(sum(z))
    res_y1 = res_y[np.nonzero(z == 1)[0], :, :]
    # threshold = np.percentile(np.squeeze(res_y1[:, :, 0]), 5, axis=1)