Example #1
0
    def turn(self):
        return
        adp = self.adp['cart_int']
        adp = matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
                      [float(adp[3]), float(adp[1]), float(adp[5])],
                      [float(adp[4]), float(adp[5]), float(adp[2])]])
        w, v = eig(adp)

        keep = w.tolist().index(min(w))
        vectors = [array((w[i] * v[:, i]).flatten().tolist()[0]) for i in range(3)]
        # print(vectors)

        value = 0
        for i in range(3):
            if not i == keep:
                value += w[i]
        value *= 0.5
        for i in range(3):
            if not i == keep:
                w[i] = value
        v = [array((w[i] * v[:, i]).flatten().tolist()[0]) for i in range(3)]
        # print vectors
        adp = matrix([[v[0][0], v[1][0], v[2][0]],
                      [v[0][1], v[1][1], v[2][1]],
                      [v[0][2], v[1][2], v[2][2]]])
        adp = (adp + adp.T) / 2
        # print adp

        w, v = eig(adp)

        keep = w.tolist().index(min(w))
        vectors = [array((w[i] * v[:, i]).flatten().tolist()[0]) for i in range(3)]
def propagateRLHamiltonian(t, k, omega, delta, epsilon, U, n):  
    t=np.array(t)    

    Energy1, V1 = LA.eig(RamanLatHamiltonian(0.0, 0.0, 0.0, 0.0, U, n))
    sort=np.argsort(Energy1)
    V1sorted=V1[:,sort]
    psi0=V1sorted[:,0]
   # psi0[np.divide(3*n,2)]=1.0+0.0*1j
    H = RamanLatHamiltonian(k, omega, delta ,epsilon,U,n)
    Energy, V = LA.eig(H)

    V = V + 1j*0.0
    Vinv = np.conjugate(np.transpose(V))

    # np.outer(t, Energy).flatten() creates a matrix for all t
    U = np.diag(np.exp(-1j*np.outer(t, Energy).flatten()))  

    a = np.dot(Vinv, psi0)
    # This repeats a so that the shape is consitent with U
    aa = np.outer(np.ones(t.size),a).flatten()
                      
    # Have to add the transpose to make shapes match 
    b = np.dot(U, aa)                                     
    # Same block diagonal trick for eigenvector matrix
    VV = sLA.block_diag(*([V]*t.size))                          
    psi = np.dot(VV, b)
    
    pops=np.absolute(psi)**2.0                     
    # Since you want the first value, need to take every 3rd row 
    # and extract the values you want from the diagonal
    latPops=np.sum(pops.reshape(t.size,n,3)[:,np.divide(n,2)-1:np.divide(n,2)+2,:],axis=2).flatten() 
    #populations in the -2k_L, 0, and +2k_L lattice sites, summed over spin sites,in time step blocks
    spinPops=np.sum(pops.reshape(t.size,n,3),axis=1).flatten() 
    #populations in each spin state, summed over lattice sites, in time step blocks 
    return spinPops
Example #3
0
    def _predict(self,k1,k2,y,gamma):
        la,Qa = LA.eig(k1)
        lb,Qb = LA.eig(k2)

        la = la.flatten()
        lb = lb.flatten()
        la = np.diag(la)
        lb = np.diag(lb)

        # http://stackoverflow.com/questions/17035767/kronecker-product-in-python-and-matlab
        diagLa = np.diag(la)
        diagLa = diagLa.reshape((len(diagLa),1))
        diagLbTrans = np.diag(lb).transpose()
        diagLbTrans = diagLbTrans.reshape((1,len(diagLbTrans)))

        l = sparse.kron( diagLbTrans,diagLa ).toarray()
        inverse = l / (l+gamma)

        m1 = Qa.transpose().dot(y).dot(Qb)
        m2 = m1 * inverse

        ypred = Qa.dot(m2).dot( Qb.transpose() )
        ypred = ypred.real

        return ypred
Example #4
0
    def compute_score(self):
        self.scores = []
    
        # We now have a dictionary
        # start with a 'row' of all zeroes
        adjacency = []
        adjacency = adjacency + [0]*(len(self.user_dict) - len(adjacency))
        # Adjacency Matrix
        A = np.zeros( shape=(len(self.user_dict), len(self.user_dict)) )
        # keep track of A's rows
        outer_count = 0
        for mentioning_user in self.user_dict:
            inner_count = 0
            for mentioned_user in self.user_dict:
                if( mentioned_user in self.user_dict[mentioning_user]['mentioned'] ):
                    adjacency[inner_count] = 1
                else:
                    adjacency[inner_count] = 0
                inner_count += 1
            # print adjacency
            A[outer_count] = adjacency
            outer_count += 1

        self.scores = [np.dot(A, np.transpose(A)), np.dot(np.transpose(A), A)]
        dictList = []
        print "Hub:"
        w, v = LA.eig(np.dot(A, np.transpose(A)))
        i = np.real_if_close(w).argmax()
        principal = v[:,i]
        print self.user_dict.keys()[principal.argmax()]
        print "Authority:"
        w, v = LA.eig(np.dot(A, np.transpose(A)))
        i = np.real_if_close(w).argmax()
        principal = v[:,i]
        print self.user_dict.keys()[principal.argmax()]
Example #5
0
    def get_left_channels(self, energy, nchan=1):
        self.initialize()
        g_s_ii = self.greenfunction.retarded(energy)
        lambda_l_ii = self.selfenergies[0].get_lambda(energy)
        lambda_r_ii = self.selfenergies[1].get_lambda(energy)

        if self.greenfunction.S is not None:
            s_mm = self.greenfunction.S
            s_s_i, s_s_ii = linalg.eig(s_mm)
            s_s_i = np.abs(s_s_i)
            s_s_sqrt_i = np.sqrt(s_s_i)  # sqrt of eigenvalues
            s_s_sqrt_ii = np.dot(s_s_ii * s_s_sqrt_i, dagger(s_s_ii))
            s_s_isqrt_ii = np.dot(s_s_ii / s_s_sqrt_i, dagger(s_s_ii))

        lambdab_r_ii = np.dot(np.dot(s_s_isqrt_ii, lambda_r_ii), s_s_isqrt_ii)
        a_l_ii = np.dot(np.dot(g_s_ii, lambda_l_ii), dagger(g_s_ii))
        ab_l_ii = np.dot(np.dot(s_s_sqrt_ii, a_l_ii), s_s_sqrt_ii)
        lambda_i, u_ii = linalg.eig(ab_l_ii)
        ut_ii = np.sqrt(lambda_i / (2.0 * np.pi)) * u_ii
        m_ii = 2 * np.pi * np.dot(np.dot(dagger(ut_ii), lambdab_r_ii), ut_ii)
        T_i, c_in = linalg.eig(m_ii)
        T_i = np.abs(T_i)

        channels = np.argsort(-T_i)[:nchan]
        c_in = np.take(c_in, channels, axis=1)
        T_n = np.take(T_i, channels)
        v_in = np.dot(np.dot(s_s_isqrt_ii, ut_ii), c_in)

        return T_n, v_in
Example #6
0
    def fit(self):
        from numpy.linalg import eig
        import scipy as sp
        import scipy.stats as stats

        n_samps, n_feats = self.shape()

        #Cross-Correlation and covariance matrix
        #Eigenvalues
        lcorr = eig (np.corrcoef(self._data.T))[0][::-1]
        lcov  = eig (np.cov     (self._data.T))[0][::-1]

        ems = []
        for i in range(len(self._far)):
            n_ems = 0
            pf    = self._far[i]
            for j in range(n_feats):
                sigma_sqr = (2*lcov[j]/n_samps) + (2*lcorr[j]/n_samps) + (2/n_samps) * lcov[j] * lcorr[j]
                sigma = sp.sqrt(sigma_sqr)

                print(sigma)
                # stats.norm.ppf not valid with sigma
                # using the module of the complex number : abs(sigma)
                tau = -stats.norm.ppf(pf, 0, abs(sigma))
                if (lcorr[j]-lcov[j]) > tau: 
                    n_ems += 1

            ems.append(n_ems)

        self.vd_  = ems

        return self.vd_
Example #7
0
def CholDecomp(amatrix):
    # Routine from "An iterative algorithm to produce
    # a positive definite correlation matrix from an
    # approximate correlation matrix" Iman and
    # Davenport, 1982 (Sandia report SAND81-1376)
    EigVal = linalg.eig(amatrix)[0]
    EigVec = linalg.eig(amatrix)[1]
    epsilon = np.array(EigVal <= 0, dtype="i") * 0.001
    if np.sum(epsilon) > 0:
        for i in range(10):
            EigVal = EigVal + epsilon
            EigValMat = np.diag(EigVal)
            amatrix = np.dot(EigVec, EigValMat)
            amatrix = np.dot(amatrix, transpose(EigVec))
            EigVal = linalg.eig(amatrix)[0]
            EigVec = linalg.eig(amatrix)[1]
            epsilon = np.array(EigVal <= 0, dtype="i") * 0.001
            if np.sum(epsilon) == 0:
                break
    decompmatrix = linalg.cholesky(amatrix)
    # need to scale decompmatrix so that diagonals equal 1
    # simply setting them to one is preferred--see Method A in
    # Iman and Davenport
    step = NVar + 1
    decompmatrix.flat[::step] = 1.0
    return decompmatrix
Example #8
0
 def fit(self, x):
     self.matrix = x
     x = np.cov(x)
     ev = eig(x)[0]
     self.eg = eig(x)[1]
     self.ev = []
     for i in range(ev.shape[0]):
         self.ev.append([ev[i], i])
     self.ev[::-1].sort()
def linear_algebra():
    """ Use the `numpy.linalg` library to do Linear Algebra 
        For a reference on math, see 'Linear Algebra explained in four pages'
        http://minireference.com/static/tutorials/linear_algebra_in_4_pages.pdf
    """

    ### Setup two vectors
    x = np.array([1, 2, 3, 4])
    y = np.array([5, 6, 7, 8])

    ### Vector Operations include addition, subtraction, scaling, norm (length),
    # dot product, and cross product
    print np.vdot(x, y)  # Dot product of two vectors


    ### Setup two arrays / matrices
    a = np.array([[1, 2],
                  [3, 9]])
    b = np.array([[2, 4],
                  [5, 6]])


    ### Dot Product of two arrays
    print np.dot(a, b)


    ### Solving system of equations (i.e. 2 different equations with x and y)
    print LA.solve(a, b)


    ### Inverse of a matrix undoes the effects of the Matrix
    # The matrix multipled by the inverse matrix returns the 
    # 'identity matrix' (ones on the diagonal and zeroes everywhere else); 
    # identity matrix is useful for getting rid of the matrix in some equation
    print LA.inv(a)  # return inverse of the matrix
    print "\n"


    ### Determinant of a matrix is a special way to combine the entries of a
    # matrix that serves to check if matrix is invertible (!=0) or not (=0)
    print LA.det(a)  # returns the determinant of the array
    print "\n"  # e.g. 3, means that it is invertible


    ### Eigenvectors is a special set of input vectors for which the action of
    # the matrix is described as simple 'scaling'.  When a matrix is multiplied
    # by one of its eigenvectors, the output is the same eigenvector multipled
    # by a constant (that constant is the 'eigenvalue' of the matrix)
    print LA.eigvals(a)  # comput the eigenvalues of a general matrix
    print "\n"
    print LA.eigvalsh(a)  # Comput the eigenvalues of a Hermitian or real symmetric matrix
    print "\n"
    print LA.eig(a)  # return the eigenvalues for a square matrix
    print "\n"
    print LA.eigh(a)  # return the eigenvalues or eigenvectors of a Hermitian or symmetric matrix
    print "\n"
Example #10
0
def plotEqqFqqA(streams, Q_t, alpha, p = 0):
    """
     p = plot e_qq and f_qq (YES/NO)
     flag = record all data for cov_mat and eigenvalues (YES/NO)
    """
    # N = number of timesteps + 1 for initial Q_0
    N = len(Q_t) 

    # Calculate F_qq #  (deviation fron orthogonality)
    f_qq = zeros((N,1))                                                                
    index = 0
    for q_t_i in Q_t:       
        X = dot(q_t_i.T , q_t_i) 
        FQQ = X - eye(X.shape[0])  
        f_qq[index, 0] = 10 * log10(trace(dot(FQQ.T, FQQ)))
        index += 1

    # Calculate E_qq (deviation from eigenvector subspace)
    e_qq = zeros((N-1,1))
    g_qq = zeros((N-1,1))
    cov_mat = zeros((streams.shape[1],streams.shape[1]))    
    for i in range(streams.shape[0]):
        
        data = streams[i,:]
        data = data.reshape(data.shape[0],1) # store as column vector 
        cov_mat = alpha * cov_mat + dot(data , data.T)
        W , V = eig(cov_mat)
                    
        # sort eigenVectors in according to deccending eigenvalue
        eig_idx = W.argsort() # Get sort index
        eig_idx = eig_idx[::-1] # Reverse order (default is accending)
        
        # v_r = highest r eigen vectors accoring to thier eigenvalue.
        V_r = V[:, eig_idx[:Q_t[i+1].shape[1]]]
        # Hopefuly have sorted correctly now 
        # Currently V_r is [1st 2nd, 3rd 4th] highest eigenvector 
        # according to eigenvalue     
        
        Y = dot(V_r , V_r.T) - dot(Q_t[i+1] , Q_t[i+1].T)  
        e_qq[i, 0] = 10 * log10(trace(dot(Y.T , Y)))
        
        # Calculate angle between projection matrixes
        A = dot(dot(dot(V_r.T , Q_t[i+1]) , Q_t[i+1].T) , V_r) 
        eigVal , eigVec = eig(A)
        angle = arccos(sqrt(max(eigVal)))        
        g_qq[i,0] = angle
        
    if p != 0:    
        figure()
        plot(f_qq)
        title('Deviation from orthonormality')    
        figure()
        plot(e_qq)
        title('Deviation of true tracked subspace') 
    
    return e_qq, f_qq, g_qq     
Example #11
0
def Godunov_linear_solv(A , q_l , q_r , mode):

	dim = np.size(q_l)
	#Distinguish between 1dim case and system.
	#Case of a system: 
	if(dim > 1):
		eigenvalue , eigenvector = LA.eig(A)
		r = eigenvector
		eigenvalue , l = LA.eig(A.T)
		wavespeed_Godunov = eigenvalue * t_step / x_step	#Vector!
		wavespeed_LF = t_step / (x_step*2)					#Skalar!
		
		U = np.empty((x.size,t.size))
		#Sets the Q_i up for the first time step with the initial data. 
		#Q[j,i] is a matrix and contains the values for component i at x[j] at each time step
		q = initial_values( q_l , q_r , eigenvector , x )
		#iterating over time
		for j in range(np.size(t)) :
			#Values for the animation are saved in U
			U[:,j] = q[:,2]									#Change here to animate other components

			#Godunov
			if( mode == 1):
				q = update_Godunov(wavespeed_Godunov, q , x , l , r)
			
			#Lax-Friedrich		
			if( mode == 2):
				q = update_LF(wavespeed_LF, q , x , l , r , A)
				
			#Lax-Wendroff
			if( mode == 3):
				q = update_LW(wavespeed_LF, q , x , l , r , A)
	
	#1dim case with the wavespeed A, that gets passed instead of a matrix:	
	
	else:
		U = np.empty((x.size,t.size))
		q = np.zeros( np.size(x) )
		for i in range(np.size(q)) :
			q[i] = initial_values_1dim( x[i] )

		for j in range(np.size(t)) :
			U[:,j] = q
			qtemp = q
			if ( A > 0):
				for i in range(np.size(q)):
					if( i == 0 ):				qtemp[i] = q[i] - ((A * t_step / x_step) * (q[i] - q[np.size(q) - 1]))
					else:						qtemp[i] = q[i] - ((A * t_step / x_step) * (q[i] - q[i-1]))
			else:
				for i in range(np.size(q)):
					if( i == np.size(q) - 1):	qtemp[i] = q[i] - ((A * t_step / x_step) * (q[0] - q[i]))
					else:						qtemp[i] = q[i] - ((A * t_step / x_step) * (q[i+1] - q[i]))
			q = qtemp
	
	return U
Example #12
0
def sorted_eigenvalues_vectors(matrix, hermitian=False):
    # i-th column(!) of v is eigenvector to i-th eigenvalue in w
    if hermitian:
        w,V = la.eigh(matrix)
    else:
        w,V = la.eig(matrix)
    w,V = la.eig(matrix)
    order = w.argsort()
    w = w[order]
    V = V[:,order]
    return w,V
def f(X):
    M = inv(X + .000001*np.eye(X.shape[0]))
    #return np.trace(M.dot(X))
    w, v = eig(M.dot(X))
    w_M, _ = eig(M)
    w_X, _ = eig(X)
    w.sort()
    w_M.sort()
    w_X.sort()

    print w[-5] - w_X[-5] * w_M[4]
    return w.max()
def get_bond_fc_with_sem(crds, fcmatrix, nat1, nat2, scalef, bondavg):

    crd1 = crds[3*nat1-3:3*nat1]
    crd2 = crds[3*nat2-3:3*nat2]
    disbohr = calc_bond(crd1, crd2) #unit is bohr
    dis = disbohr * B_TO_A #Transfer bohr to angstrom

    vec12 = array(crd2) - array(crd1) #vec12 is vec2 - vec1
    vec12 = [i/(disbohr) for i in vec12]
    vec12 = array(vec12)
 
    #bond force constant matrix, size 3 * 3
    bfcmatrix = array([[float(0) for x in range(3)] for x in range(3)])
   
    #1. First way to chose the matrix-----------------
    for i in range(0, 3):
      for j in range(0, 3):
        bfcmatrix[i][j] = -fcmatrix[3*(nat1-1)+i][3*(nat2-1)+j]
    eigval, eigvector = eig(bfcmatrix)
    fc = 0.0
    for i in range(0, 3):
      ev = eigvector[:,i]
      fc = fc + eigval[i] * abs(dot(ev, vec12))
    fcfinal1 = fc * HB2_TO_KCAL_MOL_A2 * 0.5

    if bondavg == 1:
      #2. Second way to chose the matrix-----------------
      for i in range(0, 3):
        for j in range(0, 3):
          bfcmatrix[i][j] = -fcmatrix[3*(nat2-1)+i][3*(nat1-1)+j]
      eigval, eigvector = eig(bfcmatrix)
      fc = 0.0
      for i in range(0, 3):
        ev = eigvector[:,i]
        fc = fc + eigval[i] * abs(dot(ev, vec12))
      fcfinal2 = fc * HB2_TO_KCAL_MOL_A2 * 0.5

      #Hatree/(Bohr^2) to kcal/(mol*angstrom^2)
      #Times 0.5 factor since AMBER use k(r-r0)^2 but not 1/2*k*(r-r0)^2

      fcfinal = average([fcfinal1, fcfinal2])
      stdv = std([fcfinal1, fcfinal2])
      fcfinal = fcfinal * scalef * scalef
      stdv = stdv * scalef * scalef
      return dis, fcfinal, stdv

    elif bondavg == 0:

      fcfinal = fcfinal1 * scalef * scalef
      return dis, fcfinal
def _parallelAnalysis(ff, n):

	""" Select the number of components for PCA using parallel analysis.
	
	Parameters
	----------
	ff : array_like
		Flat field data as numpy array. Each flat field is a single row 
		of this matrix, different rows are different observations.

	n : int
		Number of repetitions for parallel analysis.

	Return value
	------------
	V : array_like
		Eigen values.

	numPC : int
		Number of components for PCA.

	"""
	# Disable a warning:
	simplefilter("ignore", ComplexWarning)
	stdEFF = std(ff, axis=1, ddof=1)

	kpTrk = zeros((ff.shape[1], n), dtype=float32)
	stdMat = tile(stdEFF,(ff.shape[1], 1)).T

	for i in range(0, n):
		
		sample = stdMat * (randn(ff.shape[0], ff.shape[1])).astype(float32)		
		D, V = eig(cov(sample, rowvar=False))
		kpTrk[:,i] = sort(D).astype(float32)

	mean_ff_EFF = mean(ff,axis=1)
	
	F = ff - tile(mean_ff_EFF, (ff.shape[1], 1)).T
	D, V = eig(cov(F, rowvar=False))

	# Sort eigenvalues from smallest to largest:
	idx = D.argsort()   
	D = D[idx]
	V = V[:,idx]

	sel = zeros(ff.shape[1], dtype=float32)	
	sel[D > (mean(kpTrk, axis=1) + 2*std(kpTrk, axis=1, ddof=1))] = 1
	numPC = sum(sel).astype(int_)

	return (V, numPC)
def create(l,k,p): #esta funcion crea las quasienergias del operador de floquet, las quasienergias estan separadas en 
    x,y=LA.eig(R(l,J_y)) # un bloque de paridad positiva y otro de paridad negativa, al final regresa un vector que 
    x_sort=np.sort(x) #contiene la distribucion NNS de las quasienergias para ambos bloques si se desean solo 
    y_sort=y[:,x.argsort()] # las quasienergias hacer return dist_eig
    new_flo=chabas(flo(l,k,p),y_sort)
    pos_new_flo=pos_block(l,new_flo)
    neg_new_flo=neg_block(l,new_flo)
    eig_vals_pos,eig_vecs_pos=LA.eig(pos_new_flo)
    eig_vals_neg,eig_vecs_neg=LA.eig(neg_new_flo)
    r_pos,eig_ene_pos1=cart2pol(np.real(eig_vals_pos),np.imag(eig_vals_pos))
    r_neg,eig_ene_neg1=cart2pol(np.real(eig_vals_neg),np.imag(eig_vals_neg))
    eig_ene_pos=np.sort(eig_ene_pos1)
    eig_ene_neg=np.sort(eig_ene_neg1)
    eig_ene=np.append(eig_ene_pos,eig_ene_neg)
    return eig_ene
Example #17
0
def eigs_sorted(Q):
    """
    Calculate eigenvalues and spectral matrices of a matrix Q. 
    Return eigenvalues in ascending order 

    Parameters
    ----------
    Q : array_like, shape (k, k)

    Returns
    -------
    eigvals : ndarray, shape (k,)
        Eigenvalues of M.
    A : ndarray, shape (k, k, k)
        Spectral matrices of Q.
    """

    eigvals, M = nplin.eig(Q)
    N = nplin.inv(M)
    k = N.shape[0]
    A = np.zeros((k, k, k))
    for i in range(k):
        A[i] = np.dot(M[:, i].reshape(k, 1), N[i].reshape(1, k))
    sorted_indices = eigvals.real.argsort()
    eigvals = eigvals[sorted_indices]
    A = A[sorted_indices, : , : ]
    return eigvals, A
Example #18
0
def test_jordbloc():
    """Simple test of jordbloc. Do we recover the correct eigenvalues?"""
    evalue = 5.911  # arbitrary value
    a = rogues.jordbloc(10, evalue)
    w, v = nl.eig(a)
    b = evalue * np.ones(10)
    npt.assert_array_equal(w, b)
Example #19
0
 def calcPCA(self, data):
     data -= np.mean(data, axis=0)
     # data = data / np.std(data, axis=0)
     c = np.cov(data, rowvar=0)
     values, vectors = la.eig(c)
     featureVector = vectors[:, [values.tolist().index(x) for x in np.sort(values)[::-1]]]
     return (np.matrix(featureVector) * np.matrix(data.T)).T
Example #20
0
def myPCA(X,num_comp):
    nd = np.shape(X)
    n = nd[0]
    d = nd[1]
    # Verificar que el numero de comp. princ. sea menor a la dimension de X
    if (num_comp > d):
        print('Error: numero de componentes principales es mayor a la dimension de X')
    elif (num_comp <= 0):
        print('Error: numero de comp. principales menor o igual a 0')
    else:
        A = np.asmatrix(X)
        # Debemos restar de cada columna la media de los datos
        for i in range(d):
            A[0:,i] = np.asarray(A[0:,i]) - np.mean(A[0:,i])
        
        B = A.transpose()
        #Calculamos la matriz de covarainza
        M = B*A
        # Diagonaliza W son lo val propios ordenados y v la matriz de vect. propios corresp.
        w, v = LA.eig(M)
        # Define la matrix para la reduccion de dim
        T = v[:,0:num_comp]
        # transforma los datos
        T_X = T.transpose()*B
        return {'eigen_val':w, 'eigen_vec':v, 'T':T, 'TX':(T_X.transpose())}
Example #21
0
def jpca(X, npc=12, symm=-1,verbose=False):
    """
    Find jPCA vector pairs
    Input:
    ------
        - X -- array of shape (Nsamplex x Kfeatures) 
        - npcs -- number of principal components to use [12]
        - symm -- search for skew-symmetric solution if symm-1 or 
          for symmetric solution if symm=1
    Output:
    -------
        - jPCs : array of jPCA vector pairs sorted from highest frequency 
                 rotations (oscillations) to lower frequency oscillations 
        - Vh : PC vectors 
        - s : PC singular values
        - Xc: data center, which was substracted prior to PCA

    """
    U, Vh, s, Xc = pca(X, npc)
    dU = np.diff(U, axis=0)
    Mskew = skew_symm_solve(dU, U[:-1], symm=symm,verbose=verbose)
    evals, evecs = eig(Mskew)
    jPCs = [make_eigv_real(evecs[:,p]) for p in 
            by_chunks(xrange(len(evals)))]
    return array(jPCs), Vh, s, Xc
Example #22
0
    def construct_rankarray(self):
        """Constructs the rank array"""
        from numpy import ones, argmax
        from numpy import linalg
        # M = (1 - m)*A + m*S
        # A = (1 - alpha)*B + alpha*C
        # B = normalised gain matrix
        # C = normalised intrinsic value matrix
        # S = gets rid of sub stochasticity for rows of all 0
        self.a_matrix = (1 - self.alpha)*self.g_matrix + \
            self.alpha*self.intrinsicvalue_array
        self.s_matrix = (1.0/self.size)*ones((self.size, self.size))
        mval = 0.15
        # Basic PageRank algorithm
        self.m_matrix = (1 - mval)*self.a_matrix + mval*self.s_matrix
        # Calculate eigenvalues, eigenvectors as usual
        [eigval, eigvec] = linalg.eig(self.m_matrix)

        maxeigindex = argmax(eigval)
        # Store value for downstream checking
        self.maxeig = eigval[maxeigindex].real
        # Cuts array into the eigenvector corrosponding to the eigenvalue above
        self.rank_array = eigvec[:, maxeigindex]
        # This is the 1-dimensional array composed of rankings (normalised)
        self.rank_array = (1/sum(self.rank_array))*self.rank_array
        # Remove the useless imaginary +0j
        self.rank_array = self.rank_array.real
def matrix_exponential(A):
    """Calculate the exact matrix exponential using the eigenvalue decomposition approach.

    @param A:   The square matrix to calculate the matrix exponential of.
    @type A:    numpy rank-2 array
    @return:    The matrix exponential.  This will have the same dimensionality as the A matrix.
    @rtype:     numpy rank-2 array
    """

    # Is the original matrix real?
    complex_flag = is_complex(A[0, 0])

    # The eigenvalue decomposition.
    W, V = eig(A)

    # Calculate the exact exponential.
    eA = dot(dot(V, diag(exp(W))), inv(V))

    # Return the complex matrix.
    if complex_flag:
        return array(eA)

    # Return only the real part.
    else:
        return array(eA.real)
Example #24
0
def matrix_mf():
	author_csv = authors()#return the authors dictionary
	total_authors = len(author_csv)
	paper_id = papers()
	total_papers = len(paper_id)

	author_matrix = zeros((total_authors,total_authors))
	author_paper = zeros((total_authors,total_papers))

	author_paper = _init_paper_author(author_paper,author_csv,paper_id)
	author_matrix = _init_author_matrix(author_matrix,author_paper,author_csv)
		
	evals,evec = linalg.eig(author_matrix)#return the eigenvalues and eigenvalues vectors
	print 'catching the eigenvalues and eigeenvectors'

	b = sort(evals)
	b.sort(cmp = lambda x,y:cmp(y,x))

	power = sum(b)
	sub_power = 0
	index = 0
	for i in range(0,len(b)):
			sub_power += b[i]
			index += 1
			if sub_power / power > 0.8:
				break

	idx = evals.argsort()
	evec = evec[:,idx]
	evec = fliplr(evec)[:,0:index]
	
	print 'mapping the author_matrix to a new vector space'
	new_user_matrix = dot(evec.T,author_matrix.T)

	new_user_matrix.dump('user_feature_matrix.plk')
Example #25
0
def princomp(A,numpc=0):
    print 'Computing PC'
    nd = 2
    shp = numpy.shape(A)
    if numpy.ndim(A) == 3:
        at = []
        nd = 3
        for i in range(len(A)):
            at.append(A[i].flatten())
        A = numpy.array(at)
        at = None
        del at
    # computing eigenvalues and eigenvectors of covariance matrix
    M = (A-mean(A.T,axis=1)).T # subtract the mean (along columns)
    [latent,coeff] = linalg.eig(cov(M))

    A = None
    del A
    
    p = size(coeff,axis=1)
    idx = argsort(latent) # sorting the eigenvalues
    idx = idx[::-1]       # in ascending order
    # sorting eigenvectors according to the sorted eigenvalues
    coeff = coeff[:,idx]
    latent = latent[idx] # sorting eigenvalues
    if numpc < p or numpc >= 0:
        coeff = coeff[:,range(numpc)] # cutting some PCs
        score = dot(coeff.T,M) # projection of the data in the new space
    latent = None
    
    if nd == 3:
        coeff = coeff.flatten('a').reshape((numpc,shp[1],shp[2]))
    return coeff#,score,latent
def myPCA(A):
	# function [W,LL,m]=mypca(A)
	# computes PCA of matrix A
	# A: D by N data matrix. Each column is a random vector
	# W: D by K matrix whose columns are the principal components in decreasing order
	# LL: eigenvalues
	# m: mean of columns of A
	# Note: "lambda" is a Python reserved word
	# compute mean, and subtract mean from every column
	[r,c] = A.shape
	m = np.mean(A,1)
	mmat = np.tile(m, (1,c))
	print "MMAT SHAPE ",mmat.shape
	A = A - mmat
	B = np.dot(np.transpose(A), A)
	[d,v] = linalg.eig(B)
	# v is in descending sorted order
	# compute eigenvectors of scatter matrix
	W = np.dot(A,v)
	Wnorm = ComputeNorm(W)

	W1 = np.tile(Wnorm, (r, 1))
	W2 = W / W1
	LL = d[0:-1]
	W = W2[:,0:-1]      #omit last column, which is the nullspace
	return W, LL, m
Example #27
0
   def calculateCovarEigenVectors(self):
     """ calculate eigenvalues and eigenvectors of co-variance matrix """

     try:
       import numpy.linalg as la
     except:
       return False
     if self.metrics_covar is None:
       return False
     else:
       if len(self.metrics_covar)== 0:
         return False
       else:
         num_iter = len(self.metrics_covar)
         # just process the final record
         covar_list = self.metrics_covar[num_iter-1]
         num_covar_matrices = len(covar_list)
         self.eigenvectors = []
         for i in range(num_covar_matrices):
           covar = covar_list[i]
           shape = covar.shape
           if covar.min() == 0.0 and covar.max() == 0.0:
             self.eigenvectors.append(None)
           elif shape[0] != shape[1]:
             self.eigenvectors.append(None)
           else:
             # gets both eigenvalues and eigenvectors
             self.eigenvectors.append(la.eig(covar))
             # for moment, just get eigenvalues
#            self.eigenvectors.append(la.eigvals(covar))
         return True
def spec_meth(shift_g, shift_q):
    q_mean = shift_q  # constant part of the creation rate of species m
    g_mean = (
        shift_g
    )  # special in this example: creation rate of both species are regulated by species n with the same functional dependency
    delta_q = delta_matrix(
        q_n0, nmax, q_mean
    )  # diagonal matrix with entries (q_mean - q(n)) on the n^th diagonal element
    delta_g = delta_matrix(g_n0, nmax, g_mean)
    dual_base_mat_n = dual_mat(g_mean, nmax, jmax)  # contains elements <j|n> TODO: not normalized!
    base_mat_n = mat(g_mean, nmax, jmax)  # contains elements <n|j> - normalized :-)
    base_mat_m = mat(q_mean, mmax, kmax)  # contains elements <m|k> - normalized :-)
    new_delta_q = np.dot(np.dot(dual_base_mat_n.T, delta_q), base_mat_n)  # delta_q in new base {|j>}
    new_delta_g = np.dot(np.dot(dual_base_mat_n.T, delta_g), base_mat_n)
    sub = subdiag(jmax, jmax)

    # gen will contain the coefficients of the generating function in the |j,m> base
    gen = np.zeros((jmax, kmax))

    # first generating function: vector over indecies j with k=0
    gen[:, 0] = np.dot(dual_base_mat_n.T, p_vec)
    allcond = []
    for k in np.arange(1, kmax):
        diag = np.zeros((jmax, jmax))
        for j in np.arange(jmax):
            diag[j, j] = j + rho * k
        gen[:, k] = la.solve(-rho * (diag + np.dot(sub, new_delta_g)), np.dot(new_delta_q, gen[:, k - 1]))
        eigval = la.eig(-rho * (diag + np.dot(sub, new_delta_g)))
        cond = np.float(np.max(np.abs(eigval[0]))) / np.min(np.abs(eigval[0]))
        allcond.append(cond)
    p_mat = np.dot(
        base_mat_n, np.dot(gen, base_mat_m.T)
    )  # matrix containing the full distribution, i.e. for row n and column m it contains p(n,m)
    return (p_mat, allcond)
Example #29
0
    def fitToData(self, data):
        '''
        param data: numpy array where [:,0] is x and [:,1] is y
        '''
        x = data[:, 0][:, np.newaxis]
        y = data[:, 1][:, np.newaxis]
        D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
        S = np.dot(D.T, D)
        C = np.zeros([6, 6])
        C[0, 2] = C[2, 0] = 2; C[1, 1] = -1
        E, V = eig(np.dot(inv(S), C))
        n = np.argmax(np.abs(E))
        self.parameters = V[:, n]

        axes = self.ellipse_axis_length()
        self.a = axes[0]
        self.b = axes[1]
        self.angle = self.ellipse_angle_of_rotation()

        if not self.a or not self.b or self.parameters == None or np.iscomplexobj(self.parameters) or \
           math.isnan(self.a) or math.isnan(self.b) or math.isnan(self.ellipse_center()[0]) or \
           np.iscomplex(self.ellipse_center()[0]) or np.iscomplex(self.a) or np.iscomplex(self.b) or \
           np.iscomplexobj(self.angle):
            self.a = 0
            self.b = 0
            self.parameters = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            self.angle = 0
            self.error = True
Example #30
0
    def eval(cls, A):
        from .customized_commands import MatrixAsVector
        from numpy import array
        from numpy.linalg import eig
        from sympy.core import sympify
        
        # convert A to a numpy array of type float64
        try:
            A_array = array(A.tolist(), dtype='float64')
        except (TypeError, AttributeError):
            raise ValueError("Argument to eigenvects_tuple must be a matrix of numerical entries")

        [evals,evects] = eig(A_array)

        eigtuplelist = []
        for i in range(evals.size):
            eigtuplelist.append([sympify(evals[i]),
                                 sympify(evects[:,i].tolist())])


        eigtuplelist.sort(key=lambda w: customized_sort_key(w[0]))

        eiglist=[]
        for t in eigtuplelist:
            eiglist.append(MatrixAsVector(t[1]))
        return TupleNoParen(*eiglist)
Example #31
0
def process_image(imgname, imgprefix):
    p = EMData(imgname, 0)
    print("Preprocessing")
    # This will (hopefully) isolate the mini-circle
    ##p.process_inplace("normalize.edgemean")
    ##p.write_image("000.mrc",0)
    ##p.process_inplace("mask.dust3d",{"voxels":6000,"threshold":3})
    ##p.process_inplace("threshold.belowtozero",{"minval":3})
    ##p.process_inplace("filter.lowpass.gauss",{"cutoff_abs":0.1})
    ##p.process_inplace("normalize.edgemean")
    ##p.process_inplace("mask.dust3d",{"voxels":30000,"threshold":3})
    ##p.process_inplace("threshold.belowtozero",{"minval":3})
    ##p.process_inplace("threshold.binary",{"value":p["mean"]+p["sigma"]*5.0})
    ##p.process_inplace("filter.lowpass.gauss",{"cutoff_abs":0.1})
    ##p.process_inplace("normalize.unitsum")
    ##p.mult(10000.0)
    ##p.write_image("aaa.mrc",0)
    ##p=EMData("aaa.mrc",0)
    ##p.process_inplace("filter.lowpass.gauss",{"cutoff_abs":0.05})
    ###p.process_inplace("filter.lowpass.gauss",{"cutoff_abs":0.1})
    ##p.process_inplace("normalize.unitsum")
    ##p.mult(10000.0)
    ##p.mult(30.0)
    ##p.write_image("bbb.mrc",0)

    p.process_inplace("normalize.edgemean")
    p.process_inplace("filter.lowpass.gauss", {"cutoff_abs": 0.1})
    p.process_inplace("normalize.edgemean")
    p.process_inplace("mask.dust3d", {"voxels": 4000, "threshold": 2})
    p.process_inplace("mask.dust3d", {"voxels": 2000, "threshold": 3})
    p.write_image("aaa.mrc", 0)
    p.process_inplace("mask.auto3d", {
        "nshells": 2,
        "nshellsgauss": 3,
        "radius": 35,
        "threshold": 1.6
    })
    p.process_inplace("threshold.belowtozero", {"minval": 0})
    p.process_inplace("threshold.binary",
                      {"value": p["mean"] + p["sigma"] * 5.0})
    p.process_inplace("filter.lowpass.gauss", {"cutoff_abs": 0.1})
    p.process_inplace("normalize.unitsum")
    p.mult(10000.0)
    # Align the circle for setting initial points
    print("Inertia matrix and alignment")
    # compute the resulting inertia matrix
    an = Analyzers.get("inertiamatrix", {"verbose": 0})
    an.insert_image(p)
    mxi = an.analyze()
    mx = EMNumPy.em2numpy(mxi[0])

    # Compute the eigenvalues/vectors
    eigvv = LA.eig(
        mx)  # a 3-vector with eigenvalues and a 3x3 with the vectors
    if min(eigvv[0]) == 0:
        print("error on ", pf)
        drn -= 1
        sys.exit(1)

    eig = [(old_div(1.0, eigvv[0][i]), eigvv[1][:, i])
           for i in range(3)]  # extract for sorting
    eig = sorted(eig)  # now eig is sorted in order from major to minor axes
    #T=array([eig[0][1],eig[1][1],eig[2][1]])            # reassemble sorted matrix

    T = Transform((float(i)
                   for i in (eig[0][1][0], eig[0][1][1], eig[0][1][2], 0,
                             eig[1][1][0], eig[1][1][1], eig[1][1][2], 0,
                             eig[2][1][0], eig[2][1][1], eig[2][1][2], 0)))

    m = EMData(p)

    print("Initializing start points...")
    # Calculate the length of three axis of the circle
    SX = m.get_xsize()
    SY = m.get_ysize()
    SZ = m.get_zsize()
    xsta = SX
    xend = 0
    ysta = SY
    yend = 0
    zsta = SZ
    zend = 0
    for i in range(SX):
        for j in range(SY):
            for k in range(SZ):
                if (m.get_value_at(i, j, k) > 0.1):
                    v = T.transform(i, j, k)
                    x = v[0]
                    y = v[1]
                    z = v[2]
                    if (x < xsta): xsta = x
                    if (x > xend): xend = x
                    if (y < ysta): ysta = y
                    if (y > yend): yend = y
                    if (z < zsta): zsta = z
                    if (z > zend): zend = z
    print(xsta, xend, ysta, yend, zsta, zend)
    #print SX,SY,SZ
    #print (xsta+xend)/2-SX/2,(ysta+yend)/2-SY/2,(zsta+zend)/2-SZ/2
    iT = T.inverse()
    bestres = 10000  # Best result
    for ttt in range(
            10
    ):  # Run 10 times with random start poingt for best result (and ambiguous score? # todo #)
        numofbp = 21  # Maximum number of points (+1 for showing circle in chimera)
        nowbp = 5  # Start number
        stepsz = 500  # Interval of print status & check stablization
        plen = old_div(
            (nowbp * math.sin(old_div(math.pi, nowbp))), (math.pi)
        )  # Penalty factor of length for representing a circle using polygon

        #totlen=2*336*3.3*plen # total length
        totlen = 336 * 3.3 * plen  # total length

        #m=EMData(sys.argv[1],0)
        pa = PointArray()
        pa.set_number_points(nowbp)
        # set initial parameters
        pa.sim_set_pot_parms(old_div(totlen, nowbp), .01, .0,
                             35.9 * pi / 180.0, 0.0, 8000.0, m,
                             old_div(totlen, nowbp) * .6, 800)
        startrange = 40  # Range of random start positions
        startphase = random.uniform(0,
                                    2.0 * pi / nowbp)  # Random starting phase
        #xsft=random.uniform(-startrange,startrange)
        #ysft=random.uniform(-startrange,startrange)
        #zsft=random.uniform(-startrange,startrange)

        # Initializing points on one plane, and then transform to the plane of the circle
        for i in range(nowbp):
            ang = 2.0 * pi * i / nowbp + startphase
            #	pa.set_vector_at(i,Vec3f(0,sin(ang)*100,cos(ang)*100),1.0)
            vx = 0 + random.uniform(-startrange, startrange)
            vy = sin(ang) * (yend - ysta) * 2.3 + random.uniform(
                -startrange, startrange)  #- ((ysta+yend)*2-SY*2)
            vz = cos(ang) * (zend - zsta) * 2.3 + random.uniform(
                -startrange, startrange)  #- ((zsta+zend)*2-SZ*2)
            pa.set_vector_at(i, Vec3f(vx, vy, vz), 1.0)
        #pa.save_to_pdb("uuu.pdb")
        pa.transform(T)
        pa.save_to_pdb("ttt.pdb")

        now_potential = pa.sim_potential()
        isstable = 0  # When the result is stable for 3*stepsz iterations, add points or stop
        skipping = 0  # When stop, skip the next iterations
        bestpintrail = 100000  # Best potential score in this trail

        for i in range(200000):
            if skipping == 0:  # run one simulation step
                pa.sim_minstep_seq(.1)
                if random.uniform(0, 1.0) > .9996 and nowbp > 10:
                    print("swapping................")
                    sn = random.randint(0, 9)
                    s = [sn, sn + 9]
                    for ii in range(s[0], old_div((s[0] + s[1]), 2)):
                        jj = s[1] + s[0] - ii
                        tmp = pa.get_vector_at(ii)
                        pa.set_vector_at(ii, pa.get_vector_at(jj), 1.0)
                        pa.set_vector_at(jj, tmp, 1.0)
                    pa.save_to_pdb("swap.pdb")

            if isstable > 5:
                if nowbp * 2 < numofbp:  # Add points when result is stable and number of current points is lower than the total number
                    print("adding points....")
                    pa.sim_add_point_double(
                    )  # Put one additional point on each edge
                    nowbp = nowbp * 2
                    print(nowbp)
                    plen = old_div((nowbp * math.sin(old_div(math.pi, nowbp))),
                                   (math.pi))  # Recalculate the length penalty
                    totlen = 336 * 3.3 * plen
                    #totlen=2*336*3.3*plen
                    pa.sim_set_pot_parms(old_div(totlen, nowbp), .5, 100,
                                         35.9 * pi / 180.0, 0.0, 800.0, m,
                                         old_div(totlen, nowbp) * .6, 10000)
                    #pa.sim_set_pot_parms(totlen/nowbp, 1, 150, 35.9*pi/180.0, 0.0, 8000.0,m,totlen/nowbp*.9,10000)
                    isstable = 0
                else:
                    skipping = 1

            #if i==500: print "aaa"
            if i % stepsz == 0:

                print(old_div(i, stepsz))
                pa.sim_printstat()

                old_potential = now_potential
                now_potential = pa.sim_potential()
                if (abs(old_div(
                    (now_potential - old_potential), old_potential)) < .005):
                    isstable += 1
                    #print "aaa"
                else:
                    isstable = 0

                #print now_potential,bestpintrail
                if (now_potential < bestpintrail and nowbp * 2 >= numofbp):
                    bestpintrail = now_potential
                    bestpa = pa.copy()

                ## output pdb file for md movies
                #fname="%d.pdb"%(i/stepsz)
                #pa.save_to_pdb(fname)

                #pdbfile = open(fname, "r")
                #lines = pdbfile.readlines()
                #pdbfile.close()

                #panum=pa.get_number_points()
                #if panum<numofbp:
                #outfile=open(fname,"a")
                #for k in range(panum,numofbp):
                #ln=lines[0]
                #s=str(k)+' '
                #if (len(s)<3): s=' '+s
                #ln=ln.replace(' 0 ',s)
                #outfile.write(ln)
                #outfile.close()

        if bestpintrail < bestres:
            bestres = bestpintrail
            fname = imgprefix + "_result.pdb"
            bestpa.save_to_pdb(fname)

            pdbfile = open(fname, "r")
            lines = pdbfile.readlines()
            pdbfile.close()

            panum = bestpa.get_number_points()
            if panum < numofbp:
                outfile = open(fname, "a")
                for k in range(panum, numofbp):
                    ln = lines[0]
                    s = str(k) + ' '
                    if (len(s) < 3): s = ' ' + s
                    ln = ln.replace(' 0 ', s)
                    outfile.write(ln)
                outfile.close()

            bestpa.sim_add_point_double()
            bestpa.sim_add_point_double()
            bestpa.sim_add_point_double()
            img = bestpa.pdb2mrc_by_summation(SX, 4.52, 10., -1)
            img.write_image("img.mrc", 0)

        #

    print(bestres)

    #compute the resulting inertia matrix
    img = EMData("img.mrc")

    img.process_inplace("mask.addshells.gauss", {"val1": 10, "val2": 30})
    img.write_image("maskimg.mrc", 0)
    finalimg = EMData(imgname, 0)
    #finalimg=EMData("0143r_ori.mrc",0)
    #finalimg.process_inplace("xform.flip",{"axis":"y"})
    finalimg.process_inplace("mask.fromfile", {
        "filename": "maskimg.mrc",
        "ismaskset": 0
    })
    finalimg.process_inplace("filter.lowpass.gauss", {"cutoff_abs": 0.1})

    finalimg.process_inplace("normalize.edgemean")
    finalimg.process_inplace("mask.dust3d", {"voxels": 4000, "threshold": 4})
    finalimg.process_inplace("threshold.belowtozero", {"minval": 0})
    finalimg.write_image(imgprefix + "_finalimg.mrc", 0)
    an = Analyzers.get("inertiamatrix", {"verbose": 0})
    an.insert_image(finalimg)
    mxi = an.analyze()
    mx = EMNumPy.em2numpy(mxi[0])

    # Compute the eigenvalues/vectors
    eigvv = LA.eig(
        mx)  # a 3-vector with eigenvalues and a 3x3 with the vectors
    eig = [(old_div(1.0, eigvv[0][i]), eigvv[1][:, i])
           for i in range(3)]  # extract for sorting
    eig = sorted(eig)  # now eig is sorted in order from major to minor axes
    #T=array([eig[0][1],eig[1][1],eig[2][1]])            # reassemble sorted matrix

    T = Transform((float(i)
                   for i in (eig[0][1][0], eig[0][1][1], eig[0][1][2], 0,
                             eig[1][1][0], eig[1][1][1], eig[1][1][2], 0,
                             eig[2][1][0], eig[2][1][1], eig[2][1][2], 0)))

    finalimg.transform(T)

    #finalimg.write_image("trans_finalimg.mrc",0)
    # now the shape is aligned to Z/Y/X so the greatest axial extent should be along Z
    an = Analyzers.get("shape", {"verbose": 0})
    an.insert_image(finalimg)
    shp = an.analyze()[0]

    # Z/Y - should always be >1, Y/X, Z/X
    #out.write("%1.3g\t%1.3g\t%1.3g\t# %s\n"%(shp[2]/shp[1],shp[1]/shp[0],shp[2]/shp[0],pf.split("/")[-1]))
    shape = sorted([abs(shp[0]), abs(shp[1]), abs(shp[2])])
    print(shape)

    print("%1.3g\t%1.3g\t%1.3g\t#" %
          (old_div(shape[2], shape[1]), old_div(
              shape[1], shape[0]), old_div(shape[2], shape[0])))
    return [
        old_div(shape[2], shape[1]),
        old_div(shape[1], shape[0]),
        old_div(shape[2], shape[0])
    ]
Example #32
0
def LIM(xDat, lag):
    import numpy as np
    from numpy import linalg as LA

    # Take transpose of input data matrix
    xDat_T = np.transpose(xDat)

    # ------------------------------------------------------------------
    # STEP 1: Compute the lagged and contemporaneous covariance matrices
    sizes = np.shape(
        xDat
    )  #Get size of matrix to determine how many data points and how many time records to consider
    nDat = sizes[0]
    nT = sizes[1]

    #Get the value of the data (xDat) at the specified lag to use in computing the lagged covariance matrix
    xLagged = np.full([nDat, nT - lag],
                      np.nan)  #Initialize matrix full of NaNs
    for iT in range(nT - lag):  #Get the value of the data at the specified lag
        xLagged[:, iT] = xDat[:, iT + lag]

    # Initialize matrices full of NaNs
    c0 = np.full([nDat, nDat], np.nan)  #Initialize matrix full of NaNs
    cT = np.full([nDat, nDat], np.nan)  #Initialize matrix full of NaNs

    # Compute covariance matrices for each data point
    for iR in range(nDat):
        for iC in range(nDat):
            # Contemporaneous covariance matrix:
            c0[iR, iC] = np.nansum(xDat[iR, :] * xDat_T[:, iC]) / np.nansum(
                np.isfinite(xDat[iR, :] * xDat_T[:, iC]))
            # Lagged covariance matrix:
            cT[iR,
               iC] = np.nansum(xLagged[iR, :] * xDat_T[:-lag, iC]) / np.nansum(
                   np.isfinite((xLagged[iR, :] * xDat_T[:-lag, iC])))

    # --------------------------------------------------------------------
    # STEP 2: Compute the Green function, defining its eigen values and vectors

    G = cT.dot(
        LA.inv(c0)
    )  #The Green function is defined as the product between covariance matrices

    # Define the modes (u) and eigen-values (g) of G
    g, u = LA.eig(G)

    iSort = g.argsort()[::-1]  #Sort the eigen values and vectors in order
    g = g[iSort]
    u = u[:, iSort]

    # Define the adjoints (v) based on the transpose of G
    eigVal_T, v = LA.eig(np.transpose(G))
    iSortT = eigVal_T.argsort()[::-1]
    eigVal_T = eigVal_T[iSortT]
    v = v[:, iSortT]

    # But modes should ultimately be sorted by decreasing decay time (i.e., decreasing values of 1/beta.real)

    # Compute Beta
    b_tau = np.log(g)
    b_alpha = b_tau / lag

    # Sort data by decreasing decay time
    sortVal = -1 / b_alpha.real  #Decay time

    iSort2 = sortVal.argsort()[::-1]  #Sorted indices
    u = u[:, iSort2]
    v = v[:, iSort2]
    g = g[iSort2]
    b_alpha = b_alpha[iSort2]

    # Make diagonal array of Beta (values should be negative)
    beta = np.zeros((nDat, nDat), complex)
    np.fill_diagonal(beta, b_alpha)

    #Need to normalize u so that u_transpose*v = identitity matrix, and u*v_transpose = identity matrix as well
    normFactors = np.dot(np.transpose(u), v)
    normU = np.dot(u, LA.inv(normFactors))

    # --------------------------------------------------------------------
    # STEP 3: Compute L and Q matrices

    # Compute L matrix as normU * beta * v_transpose
    L = np.dot(normU, np.dot(beta, np.transpose(v)))

    # Compute Q matrix
    Q_negative = np.dot(L, c0) + np.dot(c0, np.transpose(L))
    Q = -Q_negative

    # Also define the periods and decay times
    periods = (2 * np.pi) / b_alpha.imag
    decayT = -1 / b_alpha.real

    # --------------------------------------------------------------------
    # RETURN statement
    return (b_alpha, L, Q, G, c0, cT, normU, v, g, periods, decayT)
import numpy.linalg as linalg
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import seaborn as sns
from sklearn.decomposition import PCA

data = np.loadtxt('pca_toydata.txt')
X_std = StandardScaler().fit_transform(data)
#center the data
#center data
#mean = np.mean(data, axis=0)
mean = np.mean(X_std, axis=0)
data_scaled = data - mean

dataset_mat = np.cov(X_std.T)
eigenValues, eigenVectors = linalg.eig(dataset_mat)
idx = eigenValues.argsort()[::-1]
e_Values = eigenValues[idx]
e_Vectors = eigenVectors[:, idx]

#variance vs pc index
var = (e_Values / sum(e_Values))
var_percent = var * 100

labels = ['PC' + str(x) for x in range(1, len(var) + 1)]
plt.bar(x=range(1, len(var)+1), height = var,\
        tick_label=labels)
#that first principal component is responsible for 93.24% variance
#that second principal component is responsible for 6.7% variance
plt.title('bar-plot of variance vs PC index')
plt.xlabel('number of components')
Example #34
0
def scrubcov(_cov):
    w, v = LA.eig(_cov)
    for n, el in enumerate(w):
        if el < 0:
            w[n] = 0
    return np.dot(v, np.diag(np.sqrt(w)))
Example #35
0
for i in range(len(KL)):
    KL[i,i] = 0.5
    KY[i,i] = 0.5

KY_inv = inv(KY)
VL_inv = inv(VL)

print 'Matrices inverted'

M_aux1 = dot(VL_inv,KL)
M_aux2 = dot(VY,M_aux1)
Mat = dot(KY_inv,M_aux2)

print 'Matrix generated'

eVal,eVec = eig(-Mat)

for i in range(len(KL)):
    KL[i,i] = 0.

eVal2,eVec2 = eig(-KL)

Lambda = 2*real(eVal2)
E1_lap = 1*(1+Lambda)/(Lambda-1)
E1_yuk = 1/real(eVal)
w_lap = sqrt(1/(1-E1_lap))
w_yuk = sqrt(1/(1-E1_yuk))
print 'Yukawa-Laplace difference'
print abs(w_lap-w_yuk)[0:50]/abs(w_lap)[0:50]

P = zeros((len(M),3))
Example #36
0
    def ppca(self,Y_mat,d=20,dia=False):

        from numpy import shape, isnan, nanmean, average, zeros, log, cov
        from numpy import matmul as mm
        from numpy.matlib import repmat
        from numpy.random import normal
        from numpy.linalg import inv, det, eig
        from numpy import identity as eye
        from numpy import trace as tr
        from scipy.linalg import orth
        """
           Implements probabilistic PCA for data with missing values,
           using a factorizing distribution over hidden states and hidden observations.
           Args:
               Y:   (N by D ) input numpy ndarray of data vectors
               d:   (  int  ) dimension of latent space
               dia: (boolean) if True: print objective each step
           Returns:
               ss: ( float ) isotropic variance outside subspace
               C:  (D by d ) C*C' + I*ss is covariance model, C has scaled principal directions as cols
               M:  (D by 1 ) data mean
               X:  (N by d ) expected states
               Ye: (N by D ) expected complete observations (differs from Y if data is missing)
               Based on MATLAB code from J.J. VerBeek, 2006. http://lear.inrialpes.fr/~verbeek
        """
        Y = Y_mat.copy()
        N, D = shape(Y)  # N observations in D dimensions (i.e. D is number of features, N is samples)
        threshold = 1E-4  # minimal relative change in objective function to continue
        hidden = isnan(Y)
        missing = hidden.sum()

        if (missing > 0):
            M = nanmean(Y, axis=0)
        else:
            M = average(Y, axis=0)

        Ye = Y - repmat(M, N, 1)

        if (missing > 0):
            Ye[hidden] = 0

        # initialize
        C = normal(loc=0.0, scale=1.0, size=(D, d))
        CtC = mm(C.T, C)
        X = mm(mm(Ye, C), inv(CtC))
        recon = mm(X, C.T)
        recon[hidden] = 0
        ss = np.sum((recon - Ye) ** 2) / (N * D - missing)

        count = 1
        old = np.inf

        # EM Iterations
        while (count):
            Sx = inv(eye(d) + CtC / ss)  # E-step, covariances
            ss_old = ss
            if (missing > 0):
                proj = mm(X, C.T)
                Ye[hidden] = proj[hidden]

            X = mm(mm(Ye, C), Sx / ss)  # E-step: expected values

            SumXtX = mm(X.T, X)  # M-step
            C = mm(mm(mm(Ye.T, X), (SumXtX + N * Sx).T), inv(mm((SumXtX + N * Sx), (SumXtX + N * Sx).T)))
            CtC = mm(C.T, C)
            ss = (np.sum((mm(X, C.T) - Ye) ** 2) + N * np.sum(CtC * Sx) + missing * ss_old) / (N * D)
            # transform Sx determinant into numpy float128 in order to deal with high dimensionality
            Sx_det = np.min(Sx).astype(np.float64) ** shape(Sx)[0] * det(Sx / np.min(Sx))
            objective = N * D + N * (D * log(ss) + tr(Sx) - log(Sx_det)) + tr(SumXtX) - missing * log(ss_old)

            rel_ch = np.abs(1 - objective / old)
            old = objective

            count = count + 1
            if (rel_ch < threshold and count > 5):
                count = 0
            if (dia == True):
                print('Objective: %.2f, Relative Change %.5f' % (objective, rel_ch))

        C = orth(C)
        covM = cov(mm(Ye, C).T)
        vals, vecs = eig(covM)
        ordr = np.argsort(vals)[::-1]
        vals = vals[ordr]
        vecs = vecs[:, ordr]

        C = mm(C, vecs)
        X = mm(Ye, C)

        # add data mean to expected complete data
        Ye = Ye + repmat(M, N, 1)

        # return C, ss, M, X, Ye
        return Ye
 def get_u(self):
     A = np.dot(np.diag(self.p/np.sum(self.S*self.p,axis=1)),np.transpose(self.S))
     return la.eig(A)
Example #38
0
File: q1.py Project: syedmeisam/UCF
        return False

    # get D
    dimension = shape1[0]
    # for each basis vector in vector1
    for i in range(0,dimension):
        # for each basis vector in vector2
        for j in range(0, dimension):
            e1 = vec1[i]
            e2 = vec2[j]
            s = np.square(np.absolute(np.matmul(e1, np.transpose(e2))))
            # rounded since the values were not exact 0.5 ~ 0.499999
            if np.round(s.item(0, 0), decimals=4) != (1/dimension):
                #print(s.item(0, 0))
                return False
    return True


pauliX = np.mat('[0 1;1 0]')
pauliY = np.mat('[0 -1j;1j 0]')
pauliZ = np.mat('[1 0;0 -1]')

valsX, vecsX = eig(pauliX)
valsY, vecsY = eig(pauliY)
valsZ, vecsZ = eig(pauliZ)


print(mutually_unbiased(vecsX, vecsY))
print(mutually_unbiased(vecsX, vecsZ))
print(mutually_unbiased(vecsY, vecsZ))
Example #39
0
    def compute_G_decomp(self,
                         mu_ccp_only=False,
                         mu_only=False,
                         plot_only=False):

        # compute delta_{c,c'}
        mu_ccp = self.compute_delta_c_cp()

        C = len(mu_ccp)

        mu_ccp_flat = []
        for c in range(C):
            for c_ in range(C):
                mu_ccp_flat.append(mu_ccp[c][c_])

        if mu_ccp_only:
            return {'mu_ccp': mu_ccp}

        # compute delta_c
        print("Computing delta_c")
        mu = []
        for c in range(C):
            s = self.my_zero()
            for c_ in range(C):
                if c != c_:
                    s = self.my_sum(s, mu_ccp[c][c_])
            avg = self.my_div_const(s, C - 1)
            mu.append(avg)

        if mu_only:
            return {'mu': mu}

        # compute distances between {delta_c}_c and {delta_{c,c'}}_{c,c'}
        # (a total of C+C**2 elements)
        # these distances will later be passed to t-SNE
        print("Computing distances for t-SNE plot")
        V = []
        labels = []
        for c in range(C):
            V.append(mu[c])
            labels.append([c])
        for c in range(C):
            for c_ in range(C):
                V.append(mu_ccp[c][c_])
                labels.append([c, c_])

        N = C + C**2
        dist = np.zeros([N, N])
        for c in range(N):
            print('Iteration: [{}/{}]'.format(c + 1, N))
            for c_ in range(N):
                dist[c, c_] = self.my_norm(self.my_sub(V[c], V[c_]))**2

        if plot_only:
            return {'dist': dist, 'labels': labels}

        # delta_{c,c}
        mu_cc = []
        for c in range(C):
            mu_cc.append(mu_ccp[c][c])

        # compute G0
        print("Computing G0")
        mu_cc_T_mu_cc = np.zeros([C, C])
        for c in range(C):
            for c_ in range(C):
                mu_cc_T_mu_cc[c, c_] = self.my_inner(mu_cc[c], mu_cc[c_]) / C
        G0_eigval, _ = LA.eig(mu_cc_T_mu_cc)
        G0_eigval = sorted(G0_eigval, reverse=True)

        # compute G1
        print("Computing G1")
        muTmu = np.zeros([C, C])
        for c in range(C):
            for c_ in range(C):
                muTmu[c, c_] = self.my_inner(mu[c], mu[c_]) * (C - 1) / C
        G1_eigval, _ = LA.eig(muTmu)
        G1_eigval = sorted(G1_eigval, reverse=True)

        # compute G1+2
        print("Computing G1+2")
        mu_ccp_T_mu_ccp = np.zeros([C**2, C**2])
        for c in range(C**2):
            for c_ in range(C**2):
                mu_ccp_T_mu_ccp[c, c_] = self.my_inner(mu_ccp_flat[c],
                                                       mu_ccp_flat[c_]) / C
        G12_eigval, _ = LA.eig(mu_ccp_T_mu_ccp)
        G12_eigval = sorted(G12_eigval, reverse=True)

        # compute G_2
        print("Computing G2")
        nu = []
        for c in range(C):
            nu.append([])
            for c_ in range(C):
                nu[c].append(self.my_sub(mu_ccp[c][c_], mu[c]))

        nu_flat = []
        for c in range(C):
            for c_ in range(C):
                if c != c_:
                    nu_flat.append(nu[c][c_])

        gram_nu_flat = np.zeros([C * (C - 1), C * (C - 1)])
        for c in range(C * (C - 1)):
            for c_ in range(C * (C - 1)):
                gram_nu_flat[c,
                             c_] = self.my_inner(nu_flat[c], nu_flat[c_]) / C
        G2_eigval, _ = LA.eig(gram_nu_flat)
        G2_eigval = sorted(G2_eigval, reverse=True)

        # density is 1/(number of eigenvalues)
        G0_eigval_density = np.ones(len(G0_eigval)) * 1 / len(G0_eigval)
        G1_eigval_density = np.ones(len(G1_eigval)) * 1 / len(G1_eigval)
        G12_eigval_density = np.ones(len(G12_eigval)) * 1 / len(G12_eigval)
        G2_eigval_density = np.ones(len(G2_eigval)) * 1 / len(G2_eigval)

        res = {
            'mu_ccp': mu_ccp,
            'mu_ccp_flat': mu_ccp_flat,
            'mu': mu,
            'nu': nu,
            'nu_flat': nu_flat,
            'G0_eigval': G0_eigval,
            'G0_eigval_density': G0_eigval_density,
            'G1_eigval': G1_eigval,
            'G1_eigval_density': G1_eigval_density,
            'G2_eigval': G2_eigval,
            'G2_eigval_density': G2_eigval_density,
            'G12_eigval': G12_eigval,
            'G12_eigval_density': G12_eigval_density,
            'dist': dist,
            'labels': labels,
        }

        return res
Example #40
0
#Coefficients

a = 8
b = 10 / 2
c = -3
d = -2
e = 4

#hyper parameters
V = np.array(([a, b], [b, c]))
u = 0.5 * np.array(([d, e]))
f = -2
Vinv = LA.inv(V)

#Eigenvalues and eigenvectors
D_vec, P = LA.eig(V)
D = np.diag(D_vec)
#print(D_vec)

#Angle between asymptotes
#theta1 = np.arccos((np.sqrt(D_vec[0])-np.sqrt(-D_vec[1]))/(np.sqrt(D_vec[0])+np.sqrt(-D_vec[1])))
theta = (np.pi) / 2
Q = np.array(([np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]))

#print(theta,Q)

sig1 = np.sqrt(np.absolute(D_vec))
ineg = np.array(([1, 0], [0, -1]))
sig2 = ineg @ sig1
#print(sig1,sig2)
Example #41
0
def coint_johansen(x, p, k):

    #    % error checking on inputs
    #    if (nargin ~= 3)
    #     error('Wrong # of inputs to johansen')
    #    end
    nobs, m = x.shape

    #why this?  f is detrend transformed series, p is detrend data
    if (p > -1):
        f = 0
    else:
        f = p

    x = detrend(x, p)
    dx = tdiff(x, 1, axis=0)
    #dx    = trimr(dx,1,0)
    z = mlag(dx, k)  #[k-1:]
    #    print z.shape
    z = trimr(z, k, 0)
    z = detrend(z, f)
    #    print dx.shape
    dx = trimr(dx, k, 0)

    dx = detrend(dx, f)
    #r0t   = dx - z*(zdx)
    r0t = resid(dx, z)  #diff on lagged diffs
    #lx = trimr(lag(x,k),k,0)
    lx = lag(x, k)
    lx = trimr(lx, 1, 0)
    dx = detrend(lx, f)
    #    print 'rkt', dx.shape, z.shape
    #rkt   = dx - z*(zdx)
    rkt = resid(dx, z)  #level on lagged diffs
    skk = np.dot(rkt.T, rkt) / rows(rkt)
    sk0 = np.dot(rkt.T, r0t) / rows(rkt)
    s00 = np.dot(r0t.T, r0t) / rows(r0t)
    sig = np.dot(sk0, np.dot(inv(s00), (sk0.T)))
    tmp = inv(skk)
    #du, au = eig(np.dot(tmp, sig))
    au, du = eig(np.dot(tmp, sig))  #au is eval, du is evec
    #orig = np.dot(tmp, sig)

    #% Normalize the eigen vectors such that (du'skk*du) = I
    temp = inv(chol(np.dot(du.T, np.dot(skk, du))))
    dt = np.dot(du, temp)

    #JP: the next part can be done much  easier

    #%      NOTE: At this point, the eigenvectors are aligned by column. To
    #%            physically move the column elements using the MATLAB sort,
    #%            take the transpose to put the eigenvectors across the row

    #dt = transpose(dt)

    #% sort eigenvalues and vectors

    #au, auind = np.sort(diag(au))
    auind = np.argsort(au)
    #a = flipud(au)
    aind = flipud(auind)
    a = au[aind]
    #d = dt[aind,:]
    d = dt[:, aind]

    #%NOTE: The eigenvectors have been sorted by row based on auind and moved to array "d".
    #%      Put the eigenvectors back in column format after the sort by taking the
    #%      transpose of "d". Since the eigenvectors have been physically moved, there is
    #%      no need for aind at all. To preserve existing programming, aind is reset back to
    #%      1, 2, 3, ....

    #d  =  transpose(d)
    #test = np.dot(transpose(d), np.dot(skk, d))

    #%EXPLANATION:  The MATLAB sort function sorts from low to high. The flip realigns
    #%auind to go from the largest to the smallest eigenvalue (now aind). The original procedure
    #%physically moved the rows of dt (to d) based on the alignment in aind and then used
    #%aind as a column index to address the eigenvectors from high to low. This is a double
    #%sort. If you wanted to extract the eigenvector corresponding to the largest eigenvalue by,
    #%using aind as a reference, you would get the correct eigenvector, but with sorted
    #%coefficients and, therefore, any follow-on calculation would seem to be in error.
    #%If alternative programming methods are used to evaluate the eigenvalues, e.g. Frame method
    #%followed by a root extraction on the characteristic equation, then the roots can be
    #%quickly sorted. One by one, the corresponding eigenvectors can be generated. The resultant
    #%array can be operated on using the Cholesky transformation, which enables a unit
    #%diagonalization of skk. But nowhere along the way are the coefficients within the
    #%eigenvector array ever changed. The final value of the "beta" array using either method
    #%should be the same.

    #% Compute the trace and max eigenvalue statistics */
    lr1 = zeros(m)
    lr2 = zeros(m)
    cvm = zeros((m, 3))
    cvt = zeros((m, 3))
    iota = ones(m)
    t, junk = rkt.shape
    for i in range(0, m):
        tmp = trimr(log(iota - a), i, 0)
        lr1[i] = -t * np.sum(tmp, 0)  #columnsum ?
        #tmp = np.log(1-a)
        #lr1[i] = -t * np.sum(tmp[i:])
        lr2[i] = -t * log(1 - a[i])
        cvm[i, :] = c_sja(m - i, p)
        cvt[i, :] = c_sjt(m - i, p)
        aind[i] = i
    #end

    result = Holder()
    #% set up results structure
    #estimation results, residuals
    result.rkt = rkt
    result.r0t = r0t
    result.eig = a
    result.evec = d  #transposed compared to matlab ?
    result.lr1 = lr1
    result.lr2 = lr2
    result.cvt = cvt
    result.cvm = cvm
    result.ind = aind
    result.meth = 'johansen'

    print('--------------------------------------------------')
    print('--> Trace Statistics')
    print('variable statistic Crit-90% Crit-95%  Crit-99%')
    for i in range(len(result.lr1)):
        print('r =', i, 't', result.lr1[i], result.cvt[i, 0], result.cvt[i, 1],
              result.cvt[i, 2])
    print('--------------------------------------------------')
    print('--> Eigen Statistics')
    print('variable statistic Crit-90% Crit-95%  Crit-99%')
    for i in range(len(result.lr2)):
        print('r =', i, 't', result.lr2[i], result.cvm[i, 0], result.cvm[i, 1],
              result.cvm[i, 2])
    print('--------------------------------------------------')
    print('eigenvectors:n', result.evec)
    print('--------------------------------------------------')
    print('eigenvalues:n', result.eig)
    print('--------------------------------------------------')

    return result
Example #42
0
T_0 = np.matrix('1 .3 ; 0 .7')
T_1 = np.matrix('.2 0 ; .8 1')

I = np.identity(n)
Ipi = np.identity(c)

#Look at this. It's interpretable
Api = Hplus * np.transpose(H) * A_0 * H

print "Api:\n", Api, "\n"

#should be equal and column stochastic
Ppi_1 = alpha * np.transpose(B_0) * H * LA.inv(Ipi - (1 - alpha) * Api) * Hplus
P_1 = np.transpose(B_0) * alpha * LA.inv(I - (1 - alpha) * A_0) * H * Hplus

print "Ppi_1 == P_1:", np.allclose(Ppi_1, P_1)

print "Ppi_1 col stoch:", np.allclose(np.sum(Ppi_1, axis=0), np.matrix('1 1'))

w, e_0 = LA.eig(P_1[0, 0] * T_0 + P_1[1, 0] * T_1)
domeigv = np.where(abs(w - 1) < 0.000001)[0][0]
L_10 = e_0[:, domeigv] / np.sum(e_0[:, domeigv])
print "L_10: Gen 1 lang distro in community 0"
print L_10

w, e_1 = LA.eig(P_1[0, 1] * T_0 + P_1[1, 1] * T_1)
domeigv = np.where(abs(w - 1) < 0.000001)[0][0]
L_11 = e_1[:, domeigv] / np.sum(e_1[:, domeigv])
print "L_10: Gen 1 lang distro in community 1"
print L_11
 def get_pi_discrete(self):
     self.get_PI_discrete()
     return la.eig(np.transpose(self.PI))
Example #44
0
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg as LA

X = np.random.multivariate_normal(np.array([0, 0]), np.array([[1, 0], [0, 1]]),
                                  500).T
X = (X.T - np.mean(X, axis=1)).T

C = (1 / X.shape[0]) * np.matmul(X, X.T)
eigenValues, eigenVectors = LA.eig(C)

Y = X / np.sqrt(X.shape[0])
U, s, Vt = LA.svd(Y)
pc = U @ np.diag(s)
pc = pc[:, ::-1]

explained_variance = np.var(pc, axis=0)
explained_variance.cumsum()

plt.scatter([0, 1], explained_variance)
plt.scatter([0, 1], explained_variance.cumsum())
plt.scatter(X[0, :], X[1, :])
 def get_pi(self):
     self.get_PI()
     return la.eig(np.transpose(self.PI))
Example #46
0
def pdopt_pixel(tm, om, numph=60, reg=0.0):
    """Phase diversity coherence optimization for a single pixel.
    
    Same functionality as the pdopt function above, but for a single pixel
    only.  This is the function called when plotting a coherence region.
    
    Arguments:
        tm (array): The polarimetric covariance (T) matrix of the data,
            with dimensions: [num_pol, num_pol].  Note that in the
            HDF5 file, covariance matrix elements below the diagonal are
            zero-valued, in order to save disk space.  The (j,i) elements
            should therefore calculated from the complex conjugate of the
            (i,j) elements using the kapok.lib.makehermitian() function before
            the matrix is passed to this function.  Note: This should be the
            average matrix of the two tracks forming the baseline, assuming
            polarimetric stationarity.
        om (array): The polarimetric interferometric (Omega) matrix of the
            data, with dimensions [num_pol, num_pol].
        numph (int): The number of phase shifts to calculate coherences for.
            The higher the number, the smaller the spacing of the coherences
            around the coherence region perimeter.  The smaller the number,
            the faster the computation time.  Default: 30.
        reg (float): Regularization factor.  The tm matrix is added to
            the matrix reg*Tr(tm)*I, where Tr(tm) is the trace of tm, and I
            is the identity matrix.  Similarly, the omega matrix is added to
            the matrix reg*Tr(om)*I.  This regularization reduces the spread
            of the coherence region for pixels where the backscatter is
            highly polarization dependent.
    
    Returns:
        gammamax (complex): the optimized coherence with the max eigenvalue.
        gammamin (complex): the optimized coherence with the min eigenvalue.
        gammaregion (array): Every coherence from the solved eigenvalue
            problems.  These coherences will lie around the edge of the
            coherence region.
    
    """
    cohdiff = 0
    gammaregion = np.empty((numph * 2 + 1), dtype='complex')

    # Matrix regularization:
    if reg > 0:
        tm = tm + reg * np.trace(tm) * np.eye(3)
        om = om + reg * np.trace(om) * np.eye(3)

    for Ph in range(0, numph):  # loop through rotation angles
        Pr = Ph * np.pi / numph  # phase shift to be applied

        # Apply phase shift to omega matrix:
        z12 = om.copy() * np.exp(1j * Pr)
        z12 = 0.5 * (z12 + np.transpose(np.conj(z12)))

        # Solve the eigenvalue problem:
        nu, w = linalg.eig(np.dot(linalg.inv(tm), z12))

        wH = np.transpose(np.conj(w))

        Tmp = np.dot(om, w)
        Tmp12 = np.dot(wH, Tmp)

        Tmp = np.dot(tm, w)
        Tmp11 = np.dot(wH, Tmp)

        l = np.argmin(nu)
        gmin = Tmp12[l, l] / np.abs(Tmp11[l, l])  # min eigenvalue coherence

        l = np.argmax(nu)
        gmax = Tmp12[l, l] / np.abs(Tmp11[l, l])  # max eigenvalue coherence

        gammaregion[Ph] = gmin
        gammaregion[Ph + numph] = gmax

        if (np.abs(gmax - gmin) > cohdiff):
            cohdiff = np.abs(gmax - gmin)
            gammamax = gmax
            gammamin = gmin

    gammaregion[-1] = gammaregion[
        0]  # copy the first coherence to the end of the array, for a continuous coherence region plot

    return gammamax, gammamin, gammaregion
Example #47
0
print ('vect cross =', my_vect1 % my_vect2)
print ('vect dot   =', my_vect1 ^ my_vect2)

# Test quaternions
my_quat = chrono.ChQuaternionD(1,2,3,4)
my_qconjugate = ~my_quat
print ('quat. conjugate  =', my_qconjugate)
print ('quat. dot product=', my_qconjugate ^ my_quat)
print ('quat. product=',     my_qconjugate % my_quat)

# Test matrices and NumPy interoperability
mlist = [[1,2,3,4], [5,6,7,8], [9,10,11,12], [13,14,15,16]]
ma = chrono.ChMatrixDynamicD() 
ma.SetMatr(mlist)   # Create a Matrix from a list. Size is adjusted automatically.
npmat = np.asarray(ma.GetMatr()) # Create a 2D npy array from the list extracted from ChMatrixDynamic
w, v = LA.eig(npmat)  # get eigenvalues and eigenvectors using numpy
mb = chrono.ChMatrixDynamicD(4,4)
prod = v * npmat   # you can perform linear algebra operations with numpy and then feed results into a ChMatrixDynamicD using SetMatr 
mb.SetMatr(v.tolist())    # create a ChMatrixDynamicD from the numpy eigenvectors
mr = chrono.ChMatrix33D()
mr.SetMatr([[1,2,3], [4,5,6], [7,8,9]])
print  (mr*my_vect1);


# Test frames -
#  create a frame representing a translation and a rotation
#  of 20 degrees on X axis
my_frame = chrono.ChFrameD(my_vect2, chrono.Q_from_AngAxis(20*chrono.CH_C_DEG_TO_RAD, chrono.ChVectorD(1,0,0)))
my_vect5 = my_vect1 >> my_frame

# Print the class hierarchy of a chrono class
Example #48
0
def dynmodes(n=6, lat0=5., plot=False, model='Fratantoni_etal1995'):
    """
	Computes the discrete eigenmodes (dynamical modes)
	for a quasi-geostrophic ocean with n isopycnal layers.
	Rigid lids are placed at the surface and the bottom.

	Inputs:
	-------
	n:    Number of layers.
	lat0: Reference latitude.
	H:    List of rest depths of each layer.
    S:    List of potential density values for each layer.
	"""
    omega = 2 * np.pi / 86400.  # [rad s^{-1}]
    f0 = 2 * omega * np.sin(lat0 * np.pi / 180)  # [s^{-1}]
    f02 = f0**2  # [s^{-2}]
    g = 9.81  # [m s^{-1}]
    rho0 = 1027.  # [kg m^{-3}]

    if model == 'Fratantoni_etal1995':
        H = np.array([80., 170., 175., 250., 325., 3000.])
        S = np.array([24.97, 26.30, 26.83, 27.12, 27.32, 27.77])
        tit = 'Modelo de seis camadas para a CNB de Fratantoni \textit{et al.} (1995)'
        figname = 'vmodes_fratantoni_etal1995'
    elif model == 'Bub_Brown1996':
        H = np.array([150., 440., 240., 445., 225., 2500.])
        S = np.array([24.13, 26.97, 27.28, 27.48, 27.74, 27.87])
        tit = 'Modelo de seis camadas para a CNB de Bub e Brown (1996)'
        figname = 'vmodes_bub_brown1996'

    # Normalized density jumps.
    E = (S[1:] - S[:-1]) / rho0
    # Rigid lids at the surface and the bottom,
    # meaning infinite density jumps.
    E = np.hstack((np.inf, E, np.inf))

    # Building the tridiagonal matrix.
    A = np.zeros((n, n))
    for i in range(n):
        A[i, i] = -f02 / (E[i + 1] * g * H[i]) - f02 / (E[i] * g * H[i]
                                                        )  # The main diagonal.
        if i > 0:
            A[i, i - 1] = f02 / (E[i] * g * H[i])
        if i < (n - 1):
            A[i, i + 1] = f02 / (E[i + 1] * g * H[i])

    # get eigenvalues and convert them
    # to internal deformation radii
    lam, v = eig(A)
    lam = np.abs(lam)

    # Baroclinic def. radii in km:
    uno = np.ones((lam.size, lam.size))
    Rd = 1e-3 * uno / np.sqrt(lam)
    Rd = np.unique(Rd)
    Rd = np.flipud(Rd)

    np.disp("Deformation radii [km]:")
    [np.disp(int(r)) for r in Rd]

    # orthonormalize eigenvectors, i.e.,
    # find the dynamical mode vertical structure functions.
    F = np.zeros((n, n))

    for i in range(n):
        mi = v[:, i]  # The vertical structure of the i-th vertical mode.
        fac = np.sqrt(np.sum(H * mi * mi) / np.sum(H))
        F[:, i] = 1 / fac * mi

    F = -F
    F[:, 0] = np.abs(F[:, 1])
    F = np.fliplr(F)

    Fi = np.vstack((F[0, :], F))
    Fi = np.flipud(Fi)
    for i in range(n - 1):
        Fi[i, :] = F[i + 1, :]
    Fi = np.flipud(Fi)

    # Plot the vertical modes.
    if plot:
        plt.close('all')
        kw = dict(fontsize=15, fontweight='black')
        fig, ax = plt.subplots()
        ax.hold(True)
        Hc = np.sum(H)
        z = np.flipud(np.linspace(-Hc, 0, 1000))
        Hp = np.append(0, H)
        Hp = -np.cumsum(Hp)

        # build modes for plotting purposes
        Fp = np.zeros((z.size, n))
        fo = 0

        for i in range(n):
            f1 = near(z, Hp[i])[0][0]
            for j in range(fo, f1):
                Fp[j, :] = F[i, :]
                fo = f1

        for i in range(n):
            l = 'Modo %s' % str(i)
            ax.plot(Fp[:, i], z, label=l)
        xl, xr = ax.set_xlim(-5, 5)
        ax.hlines(Hp, xl, xr, linestyle='dashed')
        ax.hold(False)
        ax.set_title(tit, **kw)
        ax.set_xlabel('Autofunção [adimensional]', **kw)
        ax.set_ylabel('Profundidade [m]', **kw)
        try:
            rstyle(ax)
        except:
            pass
        ax.legend(loc='lower left', fontsize=20, fancybox=True, shadow=True)
        fmt = 'png'
        fig.savefig(figname + '.' + fmt, format=fmt, bbox='tight')
Example #49
0
    def robot_localization(self):

        xr = 40
        yr = 40
        orir = np.array([[0]])
        global ct
        ct = 0

        while (1):

            dm = self.calc_dist(11, xr, yr, orir)

            #print(self.act_state);

            ys = self.act_state[2] + 0.0
            xs = self.act_state[0] + 0.0

            self.predition_step()

            # #node of drone to Subscribe IMU data
            # self.subsIMU = rospy.Subscriber('/imu/data_raw',Imu,self.sub_pub_calRot)
            # #and to to Subscribe camera data
            # self.image_sub = rospy.Subscriber('/camera/depth/image',Image,self.save_image)
            # rate = rospy.Rate(10)

            # rospy.spin()
            self.line_z = np.concatenate((dm, orir))

            points = self.observation_model(len(self.line_z))

            if (no_update == 0):
                if (self.matching_step(points).all() <= self.gama):
                    self.update_step()

            if (xr < 60):
                xr += 1
            elif (xr == 60 and orir < np.pi / 4):
                orir = orir + np.array([[np.pi / 10]])

            time.sleep(.1)

            ct += 1
            if (ct == 42):
                print ct

            plt.ion()
            fig = plt.figure(1)
            pl.figure(1)
            ax = fig.add_subplot(111)
            cov_plot = np.array([[self.act_cov[0, 0], self.act_cov[0, 2]],
                                 [self.act_cov[2, 0], self.act_cov[2, 2]]])

            endymin = 50 - ys + (10 * math.sin(self.act_state[4] - 0.5061))
            endxmin = xs - 50 + (10 * math.cos(self.act_state[4] - 0.5061))
            endymax = 50 - ys + (10 * math.sin(self.act_state[4] + 0.5061))
            endxmax = xs - 50 + (10 * math.cos(self.act_state[4] + 0.5061))

            endyrmin = 50 - yr + (10 * math.sin(orir - 0.5061))
            endyrmax = 50 - yr + (10 * math.sin(orir + 0.5061))
            endxrmin = xr - 50 + (10 * math.cos(orir - 0.5061))
            endxrmax = xr - 50 + (10 * math.cos(orir + 0.5061))

            line1, = ax.plot(xr - 50, 50 - yr, 'ro')
            line1, = ax.plot(xs - 50, 50 - ys, 'go')

            s = -2 * math.log(1 - 0.95)

            w, v = LA.eig(cov_plot * s)
            order = w.argsort()[::-1]

            w_ = w[order]
            v_ = v[:, order]

            angle = np.degrees(np.arctan2(*v_[:, 0][::-1]))
            #angle = np.degrees(np.arctan2(v[1, 0], v[0,0]))
            #print cov_plot

            pos = [xs - 50, 50 - ys]

            width = 2 * np.sqrt(w[0])
            height = 2 * np.sqrt(w[1])
            ells = Ellipse(xy=pos,
                           width=width,
                           height=height,
                           angle=angle,
                           color='black')
            ells.set_facecolor('none')

            #t = np.linspace(0, 2*math.pi, 100)
            #ells = Ellipse([xs-50, 50-ys], 2*np.sqrt(w[0]), 2*np.sqrt(w[1]), angle * 180 / np.pi)
            ax.add_artist(ells)

            #plt.plot( -50+xs+((2*np.sqrt(w[0])))*np.cos(t) , 50-ys+((2*np.sqrt(w[1])))*np.sin(t))
            plt.grid(color='lightgray', linestyle='--')

            #pl.plot(xs,y)
            plt.axis([-50, 50, -50, 50])
            #line1.set_ydata(np.sin(0.5 * x + phase))

            ax.plot([xr - 50, endxrmin], [50 - yr, endyrmin], 'r')
            ax.plot([xr - 50, endxrmax], [50 - yr, endyrmax], 'r')
            ax.plot([xs - 50, endxmin], [50 - ys, endymin], 'g')
            ax.plot([xs - 50, endxmax], [50 - ys, endymax], 'g')
            ax.plot([-20, 20], [-20, -20], 'b')
            ax.plot([-20, 20], [20, 20], 'b')
            ax.plot([-20, -20], [-20, 20], 'b')
            ax.plot([20, 20], [-20, 20], 'b')

            fig.canvas.draw()
            #plt.axis([0, 5, 0, 5])
            #a = anim.FuncAnimation(fig, update, frames=10, repeat=False)
            #plt.show()
            plt.gcf().clear()
import numpy as np
from numpy import linalg as LA

x0 = np.array([1000, 1000, 1000]).T

A = np.array([[0, 0, .33], [.18, 0, 0], [0, .71, .94]])

w, v = LA.eig(A)

print("特征值:", w)
print("特征向量:", v)
# c = LA.inv(v).dot(x0)
Example #51
0
import matplotlib.pyplot as plt
# import tensorflow as tf
import tensorflow.compat.v1 as tf
from printdescribe import print2

tf.disable_v2_behavior()
plt.style.use("ggplot")
plt.rcParams["figure.figsize"] = 10, 8
plt.rcParams["axes.facecolor"] = "0.92"
np.random.seed(42)

# Create square matrix
A = np.arange(3, 19, dtype=float).reshape(-1, 4)
# Ai =  = np.arange(3,19).reshape(-1,4)
# perform eigendecomposition
val, vec = LA.eig(A)

# print the result
print2(val, vec)

# checking the result
# Au = lamda * u
Avec = A @ vec[:, 0]
lambdavec = val[0] * vec[:, 0]

# print the results
print2(Avec, lambdavec)

# Sorting the eigenvalues in ascending order
# first find the ascending indexes using argsort()
indx = np.argsort(val)
#Arithmetic operations using functions
print("\nADDITION of A,B matrices:\n", np.add(A, B))
print("\nSUBTRACTION of A,B matrices:\n", np.subtract(A, B))
print("\nMULTIPLICATION of A,B matrices:\n", np.matmul(A, B))
print("\nDIVISION of A,B matrices\n", np.divide(B, A))
# Rank of a matrix
print("Rank of A:", np.linalg.matrix_rank(A))
# Trace of matrix A
print("\nTrace of A:", np.trace(A))
# Determinant of a matrix
print("\nDeterminant of A:", np.linalg.det(A))
# Inverse of matrix A
print("\nInverse of A:\n", np.linalg.inv(A))
print("\nMatrix A raised to power 3:\n", np.linalg.matrix_power(A, 3))
#calculation of eign values and vectors using eig() function
a, b = rkv.eig(A)
print("\nEigen values of matrix A are:\n", a)
print("\nEigen vectors of matrix A are:\n", b)
#calculation of eign values and vectors using eigh() function
c, d = rkv.eigh(com)
print("\nEigen values of complex matrix are:\n", c)
print("\nEigen vectors of complex matrix are:\n", d)
#Creating vectors
vector1 = 2 + 3j
vector2 = 4 + 5j
#dot product of martices A, B
product = np.dot(vector1, vector2)
print("\nDot Product :\n", product)
#dot product complex conjugate of A with B vector
vproduct = np.vdot(vector1, vector2)
print("\nComplex conjugate Dot Product :\n ", vproduct)
Example #53
0
import numpy as np
from numpy import linalg as geek

# Creating an array using diag
# function
a = np.diag((1, 2, 3))

print("Array is :", a)

# calculating an eigen value
# using eig() function
c, d = geek.eig(a)

print("Eigen value is :", c)
print("Eigen value is :", d)
Example #54
0
    gmm_h = GaussianMixture(n_components=k).fit(x30D)
    labels = gmm_h.predict(x30D)
    getEmpProbTable(3, k, z30D, labels, prob_z)

    gauss_mean = gmm_h.means_
    #clust_vect_corr = (Comp_mean / nla.norm(Comp_mean, axis=1).reshape((Comp_mean.shape[0], 1))) \
    #.dot((gauss_mean / nla.norm(gauss_mean, axis=1).reshape((gauss_mean.shape[0], 1))).T)
    #with printoptions(precision=1, suppress=True):
    #  print("Correlation between GMM component mean and data model mean:\n", clust_vect_corr)

    gauss_cov = np.empty((k, d))
    W_ALL = []
    for j in range(k):
        gauss_cov[j, :] = np.diag(gmm_h.covariances_[j, :, :])
        COVS = gmm_h.covariances_[j, :, :]
        W, V = LA.eig(COVS)
        W_ALL.append(W)
        print("Eigen Values ")
        print(W)
    plt.figure()
    plt.imshow(W_ALL)
    plt.title("Eigen Value k={:2d}".format(k))

plt.show()
#clust_vect_corr = (Comp_var / nla.norm(Comp_var, axis=1).reshape((Comp_var.shape[0], 1))) \
#.dot((gauss_cov / nla.norm(gauss_cov, axis=1).reshape((gauss_cov.shape[0], 1))).T)
#with printoptions(precision=1, suppress=True):
# print("Correlation between GMM component covariance and data model covariance:\n", clust_vect_corr)
#print("Component covariance matrix (diagonal, transposed): \n", Comp_var.T)
#print("GMM component covariance matrix (Most match, re-ordered, transposed): \n",
# gauss_cov[np.argmax(clust_vect_corr, axis=1), :].T)
Example #55
0
def RAM(q=0.5, f=f0, rs=0.0, loss=0.0):
    ts = sqrt(1 - absolute(rs)**2)
    w = 2.0 * pi * f
    k = w / vf  #-1.0j*loss
    p11 = p22 = rs * exp(-1.0j * k * p)  #Morgan 8.1
    p12 = ts * exp(-1.0j * k * p)  #Morgan 8.1

    g = 1.0 / p * arccos(
        cos(k * p) / ts + 0.0j)  #adding 0.0j so arccos does not return nan

    sinNgp = sin(N * g * p)
    sinNm1gp = sin((N - 1) * g * p)
    singp = sin(g * p)
    P11 = p11 * sinNgp / (sinNgp - p12 * sinNm1gp)
    P12 = P21 = p12 * singp / (sinNgp - p12 * sinNm1gp)
    P22 = p22 * sinNgp / (sinNgp - p12 * sinNm1gp)

    In = -1.0j * rhob(k) * sqrt(w * W * Gs / 2.0) * (
        (cnm1 + bn) * exp(-0.5j * k * p) + (cn * bnm1) * exp(0.5j * k * p))

    cnm1 = 0

    P32 = P31
    P13 = -P31 / 2.0
    P23 = -P32 / 2.0
    Ga = absolute(P13)**2 + absolute(P23)**2
    Ba = hilbert
    C = sqrt(2) * W * epsinf * Np

    P33 = Ga + 1.0j * Ba + 1.0j * w * C

    A = 1.0 / ts * matrix([[exp(-1.0j * k * p), rs], [-rs, exp(1j * k * p)]])
    AN = A**(2 * N)

    ei, vec = eig(A)
    AN = dot(dot(vec, diag(ei)**(2 * N)), inv(vec))
    print AN
    print A**N

    #p11 = r*exp(-1j*k*p)
    #p12 = t*exp(-1j*k*p)
    P11 = zeros(len(k), complex)
    P12 = zeros(len(k), complex)
    P13 = zeros(len(k), complex)
    P21 = zeros(len(k), complex)
    P22 = zeros(len(k), complex)
    P23 = zeros(len(k), complex)
    for i, K in enumerate(k):
        #RAM = array([[p12[i]-p11[i]**2/p12[i],p11[i]/p12[i]],[-p11[i]/p12[i], 1/p12[i]]])
        A = 1.0 / t * array([[exp(-1j * K * p), r], [-r, exp(1j * K * p)]])
        ei, vec = linalg.eig(A)
        AN = dot(dot(vec, diag(ei)**(2 * N)), linalg.inv(vec))
        P11[i] = -AN[1, 0] / AN[1, 1]
        P21[i] = AN[0, 0] + P11[i] * AN[0, 1]
        P12[i] = 1.0 / AN[1, 1]
        P22[i] = AN[0, 1] / AN[1, 1]
        cb0 = array([[1], [P11[i]]])
        D = -1j * rho_fs * sqrt(K * v * W * Gamma / 2.0)
        B = array([(1.0 - r / t + 1.0 / t) * exp(-1j * K * p / 2.0),
                   (1.0 + r / t + 1.0 / t) * exp(1j * K * p / 2.0)])
        Atot = dot(linalg.inv(eye(2) - dot(A, A)), eye(2) - AN)
        """
        Here we have I_{n+1} = D * [B1 B2] * [c_n;b_n] = D*B*A**n*[c_0;b_0]
        Summing over even n gives inv(I-A**2)*(I-A**(2*N))
        I_0 is slightly different: letting c_-1=b_-1=0, I_0 becomes D*[exp(-1j...),exp(1j...)]*[c_0;b_0]
        """
        I = D * dot(B, dot(Atot, cb0))
        P13[i] = -I[0] / 2.0
        cb0 = array([[0], [1.0 / AN[1, 1]]])
        I = D * dot(B, dot(Atot, cb0))
        P23[i] = -I[0] / 2.0
    return P11, P12, P13, P21, P22, P23
Example #56
0
import numpy as np
from numpy.linalg import eig

if __name__ == "__main__":
    A1 = np.array([[4, -2], [1, 1]])

    eigenvalues1, eigenvectors1 = eig(A1)
    print(eigenvalues1, eigenvectors1)

    A2 = np.array([[0, 1], [1, 0]])
    eigenvalues2, eigenvectors2 = eig(A2)
    print(eigenvalues2, eigenvectors2)


if __name__ == '__main__':
    x = [1, 2, 3, 4, 4.1]
    y = [1, 2, 3, 4, 5.5]
    xer = [0.1, 0.1, 0.1, 0.1, 0.1]
    yer = [0.1, 0.1, 0.1, 0.1, 0.1]

    ox = 0.1
    oy = 0.1

    pxy = corrcoef(x, y)[0][1]

    covar = ox * oy * pxy
    covmat = [[ox * ox, covar], [covar, oy * oy]]
    w, _v = eig(covmat)

    if ox > oy:
        a = (max(w))**0.5
        b = (min(w))**0.5
    else:
        a = (min(w))**0.5
        b = (max(w))**0.5

    dx = 1
    dy = 1
    height = 1
    width = 1
    aspectratio = (dy / height) / (dx / width)
    rotation = math.degrees(0.5 * math.atan(1 / aspectratio * (2 * covar) /
                                            (ox**2 - oy**2)))
Example #58
0
    def render(self,
               mode='empty',
               record=False,
               traj_num=0,
               batch_outputs=None):
        if not hasattr(self, 'traj'):
            raise ValueError(
                'Must do a env.reset() first before calling env.render()')

        num_agents = self.env_core.nb_agents
        if type(self.env_core.agents) == list:
            agent_pos = [
                self.env_core.agents[i].state for i in range(num_agents)
            ]
        else:
            agent_pos = self.env_core.agents.state

        num_targets = self.env_core.nb_targets
        if type(self.env_core.targets) == list:
            target_true_pos = [
                self.env_core.targets[i].state[:2] for i in range(num_targets)
            ]
            target_b_state = [
                self.env_core.belief_targets[i].state
                for i in range(num_targets)
            ]  # state[3:5]
            target_cov = [
                self.env_core.belief_targets[i].cov for i in range(num_targets)
            ]
        else:
            target_true_pos = self.env_core.targets.state[:, :2]
            target_b_state = self.env_core.belief_targets.state[:, :
                                                                2]  # state[3:5]
            target_cov = self.env_core.belief_targets.cov

        if self.n_frames % self.skip == 0:
            self.fig.clf()
            ax = self.fig.subplots()
            im = None
            if mode == 'empty':
                im = ax.imshow(self.map,
                               cmap='gray_r',
                               origin='lower',
                               extent=[
                                   self.mapmin[0], self.mapmax[0],
                                   self.mapmin[1], self.mapmax[1]
                               ])

            # Perimeter defense
            perimeter = plt.Circle(self.origin,
                                   self.perimeter_radius,
                                   color='r',
                                   fill=False)
            ax.add_artist(perimeter)

            # Plot agents and targets
            for ii in range(num_agents):
                #agents positions
                ax.plot(agent_pos[ii][0],
                        agent_pos[ii][1],
                        marker=(3, 0, agent_pos[ii][2] / np.pi * 180 - 90),
                        markersize=10,
                        linestyle='None',
                        markerfacecolor='b',
                        markeredgecolor='b')
                ax.plot(self.traj[ii][0], self.traj[ii][1], 'b.', markersize=1)
                #agents sensor indicators short range
                sensor_arc = patches.Arc((agent_pos[ii][0], agent_pos[ii][1]),
                                         METADATA['sensor_r'] * 2,
                                         METADATA['sensor_r'] * 2,
                                         angle=agent_pos[ii][2] / np.pi * 180,
                                         theta1=-METADATA['fov'] / 2,
                                         theta2=METADATA['fov'] / 2,
                                         facecolor='gray')
                ax.add_patch(sensor_arc)
                ax.plot([
                    agent_pos[ii][0], agent_pos[ii][0] + METADATA['sensor_r'] *
                    np.cos(agent_pos[ii][2] +
                           0.5 * METADATA['fov'] / 180.0 * np.pi)
                ], [
                    agent_pos[ii][1], agent_pos[ii][1] + METADATA['sensor_r'] *
                    np.sin(agent_pos[ii][2] +
                           0.5 * METADATA['fov'] / 180.0 * np.pi)
                ],
                        'k',
                        linewidth=0.5)
                ax.plot([
                    agent_pos[ii][0], agent_pos[ii][0] + METADATA['sensor_r'] *
                    np.cos(agent_pos[ii][2] -
                           0.5 * METADATA['fov'] / 180.0 * np.pi)
                ], [
                    agent_pos[ii][1], agent_pos[ii][1] + METADATA['sensor_r'] *
                    np.sin(agent_pos[ii][2] -
                           0.5 * METADATA['fov'] / 180.0 * np.pi)
                ],
                        'k',
                        linewidth=0.5)
                #agents sensor indicators long range
                sensor_arc = patches.Arc((agent_pos[ii][0], agent_pos[ii][1]),
                                         METADATA['sensor_r_long'] * 2,
                                         METADATA['sensor_r_long'] * 2,
                                         angle=agent_pos[ii][2] / np.pi * 180,
                                         theta1=-METADATA['fov_long'] / 2,
                                         theta2=METADATA['fov_long'] / 2,
                                         facecolor='gray')
                ax.add_patch(sensor_arc)
                ax.plot([
                    agent_pos[ii][0],
                    agent_pos[ii][0] + METADATA['sensor_r_long'] *
                    np.cos(agent_pos[ii][2] +
                           0.5 * METADATA['fov_long'] / 180.0 * np.pi)
                ], [
                    agent_pos[ii][1],
                    agent_pos[ii][1] + METADATA['sensor_r_long'] *
                    np.sin(agent_pos[ii][2] +
                           0.5 * METADATA['fov_long'] / 180.0 * np.pi)
                ],
                        'k',
                        linewidth=0.5)
                ax.plot([
                    agent_pos[ii][0],
                    agent_pos[ii][0] + METADATA['sensor_r_long'] *
                    np.cos(agent_pos[ii][2] -
                           0.5 * METADATA['fov_long'] / 180.0 * np.pi)
                ], [
                    agent_pos[ii][1],
                    agent_pos[ii][1] + METADATA['sensor_r_long'] *
                    np.sin(agent_pos[ii][2] -
                           0.5 * METADATA['fov_long'] / 180.0 * np.pi)
                ],
                        'k',
                        linewidth=0.5)
                self.traj[ii][0].append(agent_pos[ii][0])
                self.traj[ii][1].append(agent_pos[ii][1])

            for jj in range(num_targets):
                # ax.plot(self.traj_y[jj][0], self.traj_y[jj][1], 'r.', markersize=1)
                ax.plot(target_true_pos[jj][0],
                        target_true_pos[jj][1],
                        marker='o',
                        markersize=5,
                        linestyle='None',
                        markerfacecolor='r',
                        markeredgecolor='r')
                # Belief on target
                ax.plot(target_b_state[jj][0],
                        target_b_state[jj][1],
                        marker='o',
                        markersize=10,
                        linewidth=5,
                        markerfacecolor='none',
                        markeredgecolor='g')
                eig_val, eig_vec = LA.eig(target_cov[jj][:2, :2])
                belief_target = patches.Ellipse(
                    (target_b_state[jj][0], target_b_state[jj][1]),
                    2 * np.sqrt(eig_val[0]) * self.c_cf,
                    2 * np.sqrt(eig_val[1]) * self.c_cf,
                    angle=180 / np.pi *
                    np.arctan2(eig_vec[0][1], eig_vec[0][0]),
                    fill=True,
                    zorder=2,
                    facecolor='g',
                    alpha=0.5)
                ax.add_patch(belief_target)
                self.traj_y[jj][0].append(target_true_pos[jj][0])
                self.traj_y[jj][1].append(target_true_pos[jj][1])

            ax.set_xlim((self.mapmin[0], self.mapmax[0]))
            ax.set_ylim((self.mapmin[1], self.mapmax[1]))
            ax.set_aspect('equal', 'box')
            ax.grid()
            ax.set_title(' '.join(
                [mode.upper(), ': Trajectory',
                 str(traj_num)]))

            if not record:
                plt.draw()
                plt.pause(0.0005)
        self.n_frames += 1
Example #59
0
def pdopt(tm, om, numph=30, step=50, reg=0.0, returnall=False):
    """Phase diversity coherence optimization.
    
    Solves an eigenvalue problem in order to find the complex coherences with
    maximum separation (|a - b|) in the complex plane.  Of these two
    coherences, one should in theory represent the coherence with the
    smallest ground contribution present in the data (the 'high' coherence).
    The other then represents the coherence with the largest ground
    contribution present in the data (the 'low' coherence).
    
    Arguments:
        tm (array): The polarimetric covariance (T) matrix of the data,
            with dimensions: [az, rng, num_pol, num_pol].  Note that in the
            HDF5 file, covariance matrix elements below the diagonal are
            zero-valued, in order to save disk space.  The (j,i) elements
            should therefore calculated from the complex conjugate of the
            (i,j) elements using the kapok.lib.makehermitian() function before
            the matrix is passed to this function.  Note: This should be the
            average matrix of the two tracks forming the baseline, assuming
            polarimetric stationarity.
        om (array): The polarimetric interferometric (Omega) matrix of the
            data, with dimensions [az, rng, num_pol, num_pol].
        numph (int): The number of phase shifts to calculate coherences for.
            The higher the number, the smaller the spacing of the coherences
            around the coherence region perimeter.  The smaller the number,
            the faster the computation time.  Default: 30.
        step (int): Block size (in pixels) used for linalg.eig.  Higher values
            will use more memory but can run a little faster.
            Default: 50.
        reg (float): Regularization factor.  The tm matrix is added to
            the matrix reg*Tr(tm)*I, where Tr(tm) is the trace of tm, and I
            is the identity matrix.  Similarly, the omega matrix is added to
            the matrix reg*Tr(om)*I.  This regularization reduces the spread
            of the coherence region for pixels where the backscatter is
            highly polarization dependent.
        returnall (bool): True/False flag.  Set to true to return the
            weight vectors for the optimized coherences, as well as the
            pair of minor axis coherences (optimized coherence pair with
            minimum separation in the complex plane).  Default: False.
          
    Returns:
        gammamax (array): The optimized coherence with the max eigenvalue.
        gammamin (array): The optimized coherence with the min eigenvalue.
        gammaminormax (array): Of the coherences with the minimum separation
            in the complex plane (e.g., along the minor axis of a elliptical
            coherence region), this will be the one with the max eigenvalue.
            Only returned if returnall == True.
        gammaminormin (array): Of the coherences with the maximum separation
            in the complex plane (e.g., along the minor axis of a elliptical
            coherence region), this will be the one with the min eigenvalue.
            Only returned if returnall == True.
        wmax (array): The weight vector for the max eigenvalue coherence, if
            returnall == True.
        wmin (array): The weight vector for the min eigenvalue coherence, if
            returnall == True.
    
    """
    dim = np.shape(tm)

    # Matrix regularization:
    if reg > 0:
        regmat = np.zeros(dim, dtype='complex64')
        regmat[:, :] = np.eye(dim[2])
        regmat = regmat * reg * np.trace(tm, axis1=2,
                                         axis2=3)[:, :, np.newaxis, np.newaxis]
        tm = tm + regmat

        regmat = np.zeros(dim, dtype='complex64')
        regmat[:, :] = np.eye(dim[2])
        regmat = regmat * reg * np.trace(om, axis1=2,
                                         axis2=3)[:, :, np.newaxis, np.newaxis]
        om = om + regmat
        del regmat

    # Arrays to store coherence separation, and the two complex coherence values.
    cohsize = (dim[0], dim[1])  # number of az, rng pixels
    cohdiff = np.zeros(cohsize, dtype='float32')
    gammamax = np.zeros(cohsize, dtype='complex64')
    gammamin = np.zeros(cohsize, dtype='complex64')

    # Arrays to store minor axis coherences.
    mincohdiff = np.ones(cohsize, dtype='float32') * 99
    gammaminormax = np.zeros(cohsize, dtype='complex64')
    gammaminormin = np.zeros(cohsize, dtype='complex64')

    # Arrays to store polarimetric weighting vectors for the optimized coherences.
    weightsize = (dim[0], dim[1], dim[3])
    wmax = np.zeros(weightsize, dtype='complex64')
    wmin = np.zeros(weightsize, dtype='complex64')

    # Main Loop
    for Ph in np.arange(0, numph):  # loop through rotation angles
        Pr = Ph * np.pi / numph  # phase shift to be applied

        print('kapok.cohopt.pdopt | Current Progress: ' +
              str(np.round(Pr / np.pi * 100, decimals=2)) + '%. (' +
              time.ctime() + ')     ',
              end='\r')

        for az in range(0, dim[0], step):
            azend = az + step
            if azend > dim[0]:
                azend = dim[0]

            for rng in range(0, dim[1], step):
                rngend = rng + step
                if rngend > dim[1]:
                    rngend = dim[1]

                omblock = om[az:azend, rng:rngend]
                tmblock = tm[az:azend, rng:rngend]
                z12 = omblock.copy()

                # Apply phase shift to omega matrix:
                z12 = z12 * np.exp(1j * Pr)
                z12 = 0.5 * (z12 + np.rollaxis(np.conj(z12), 3, start=2))

                # Check if any pixels have singular covariance matrices.
                # If so, set those matrices to the identity, to keep an
                # exception from being thrown by linalg.inv().
                det = linalg.det(tmblock)
                ind = (det == 0)
                if np.any(ind):
                    tmblock[ind] = np.eye(dim[3])

                # Solve the eigenvalue problem:
                nu, w = linalg.eig(
                    np.einsum('...ij,...jk->...ik', linalg.inv(tmblock), z12))

                wH = np.rollaxis(np.conj(w), 3, start=2)

                Tmp = np.einsum('...ij,...jk->...ik', omblock, w)
                Tmp12 = np.einsum('...ij,...jk->...ik', wH, Tmp)

                Tmp = np.einsum('...ij,...jk->...ik', tmblock, w)
                Tmp11 = np.einsum('...ij,...jk->...ik', wH, Tmp)

                azind = np.tile(np.arange(0, w.shape[0]), (w.shape[1], 1)).T
                rngind = np.tile(np.arange(0, w.shape[1]), (w.shape[0], 1))

                lmin = np.argmin(nu, axis=2)
                gmin = Tmp12[azind, rngind, lmin, lmin] / np.abs(
                    Tmp11[azind, rngind, lmin, lmin])

                lmax = np.argmax(nu, axis=2)
                gmax = Tmp12[azind, rngind, lmax, lmax] / np.abs(
                    Tmp11[azind, rngind, lmax, lmax])

                ind = (np.abs(gmax - gmin) > cohdiff[az:azend, rng:rngend])

                # If we've found the coherences with the best separation
                # so far, save them.
                if np.any(ind):
                    (azupdate, rngupdate) = np.where(ind)

                    cohdiff[az + azupdate,
                            rng + rngupdate] = np.abs(gmax - gmin)[azupdate,
                                                                   rngupdate]
                    gammamax[az + azupdate, rng + rngupdate] = gmax[azupdate,
                                                                    rngupdate]
                    gammamin[az + azupdate, rng + rngupdate] = gmin[azupdate,
                                                                    rngupdate]

                    if returnall:
                        wmax[az + azupdate, rng + rngupdate, :] = np.squeeze(
                            w[azupdate, rngupdate, :, lmax[azupdate,
                                                           rngupdate]])
                        wmin[az + azupdate, rng + rngupdate, :] = np.squeeze(
                            w[azupdate, rngupdate, :, lmin[azupdate,
                                                           rngupdate]])

                # If returnall is True, also check if this coherence pair
                # has the smallest separation found so far.
                if returnall:
                    ind = (np.abs(gmax - gmin) < mincohdiff[az:azend,
                                                            rng:rngend])

                    if np.any(ind):
                        (azupdate, rngupdate) = np.where(ind)

                        mincohdiff[az + azupdate, rng +
                                   rngupdate] = np.abs(gmax - gmin)[azupdate,
                                                                    rngupdate]
                        gammaminormax[az + azupdate,
                                      rng + rngupdate] = gmax[azupdate,
                                                              rngupdate]
                        gammaminormin[az + azupdate,
                                      rng + rngupdate] = gmin[azupdate,
                                                              rngupdate]

    print('kapok.cohopt.pdopt | Optimization complete. (' + time.ctime() +
          ')          ')
    if returnall:
        return gammamax, gammamin, gammaminormax, gammaminormin, wmax, wmin
    else:
        return gammamax, gammamin
Example #60
0
 def numeric(self, values):
     if not (values[0].T == values[0]).all():
         raise Exception("lambda_min called on a non-symmetric matrix.")
     w, v = LA.eig(values[0])
     return min(w)