def tridiag_eigs(): # Most of this code is just constructing # tridiagonal matrices and calling functions # they have already written. m = 1000 k = 100 A = np.zeros((m, m)) a = rand(m) b = rand(m-1) np.fill_diagonal(A, a) np.fill_diagonal(A[1:], b) np.fill_diagonal(A[:,1:], b) Amul = lambda u: tri_mul(a, b, u) alpha, beta = lanczos(rand(m), Amul, k) H = np.zeros((alpha.size, alpha.size)) np.fill_diagonal(H, alpha) np.fill_diagonal(H[1:], beta) np.fill_diagonal(H[:,1:], beta) H_eigs = eig(H, right=False) H_eigs.sort() H_eigs = H_eigs[::-1] print H_eigs[:10] A = np.zeros((m, m)) np.fill_diagonal(A, a) np.fill_diagonal(A[1:], b) np.fill_diagonal(A[:,1:], b) A_eigs = eig(A, right=False) A_eigs.sort() A_eigs = A_eigs[::-1] print A_eigs[:10]
def algorithm3(self, e0, M): # Compute global maximum expansion rate Vtest = linspace(0,0.5,1000) muplot = [] for Vi in Vtest: J = self.Jac(0,[Vi,0]) muplot.append( 0.5*max(real(eig(J+J.T)[0])) ) index = argmax(muplot) mustar = muplot[index] Vstar = Vtest[index] self.d1 = [e0] self.d2 = [e0] self.theta = [0] c = [] for i in range(len(self.T)-1): # compute maximal expansion rate c_i in a neigbourhood of V[i] using global vector field bound M if abs(self.V[i] - Vstar) <= self.d1[i] + M*(self.T[i+1]-self.T[i]): c.append(mustar) elif self.V[i] + self.d1[i] + M*(self.T[i+1]-self.T[i]) < Vstar: J = Jac(0,[self.V[i] + self.d1[i] + M*(self.T[i+1]-self.T[i]),0]) c.append(0.5*max(real(eig(J+J.T)[0]))) else: J = Jac(0,[self.V[i] - self.d1[i] - M*(self.T[i+1]-self.T[i]),0]) c.append(0.5*max(real(eig(J+J.T)[0]))) # compute diameter of ball based on bound on expansion rate in neighbourhood of current state self.d1.append(exp(c[i]*(self.T[i+1]-self.T[i]))*self.d1[i]+self.tolerance) self.d2.append(exp(c[i]*(self.T[i+1]-self.T[i]))*self.d2[i]+self.tolerance) self.theta.append(0)
def plot_ritz(A, n, iters): ''' Plot the relative error of the Ritz values of `A'. ''' Amul = A.dot b = np.random.rand(A.shape[0]) Q = np.empty((len(b), iters+1), dtype = np.complex128) H = np.zeros((iters+1, iters), dtype = np.complex128) Q[:, 0] = b / la.norm(b) eigvals = np.sort(abs(la.eig(A)[0]))[::-1] eigvals = eigvals[:n] abs_err = np.zeros((iters,n)) for j in xrange(iters): Q[:, j+1] = Amul(Q[:, j]) for i in xrange(j+1): H[i,j] = np.vdot(Q[:,i].conjugate(), (Q[:, j+1])) Q[:,j+1] = Q[:,j+1] - H[i,j] * (Q[:,i]) H[j+1, j] = np.sqrt(np.vdot(Q[:, j+1], Q[:, j+1].conjugate())) Q[:,j+1] = Q[:,j+1] / H[j+1, j] if j < n: rit = np.zeros(n, dtype = np.complex128) rit[:j+1] = np.sort(la.eig(H[:j+1, :j+1])[0])[::-1] abs_err[j,:] = abs(eigvals - rit) / abs(eigvals) else: rit = np.sort(la.eig(H[:j+1,:j+1])[0])[::-1] rit = rit[:n] abs_err[j,:] = abs(eigvals - rit) / abs(eigvals) for i in xrange(n): plt.semilogy(abs_err[:,i]) plt.show()
def SlowDownFactor(temporalnet): """Returns a factor S that indicates how much slower (S>1) or faster (S<1) a diffusion process in the temporal network evolves on a second-order model compared to a first-order model. This value captures the effect of order correlations on dynamical processes. """ g2 = temporalnet.iGraphSecondOrder().components(mode="STRONG").giant() g2n = temporalnet.iGraphSecondOrderNull().components(mode="STRONG").giant() A2 = np.matrix(list(g2.get_adjacency())) T2 = np.zeros(shape=(len(g2.vs), len(g2.vs))) D2 = np.diag(g2.strength(mode='out', weights=g2.es["weight"])) for i in range(len(g2.vs)): for j in range(len(g2.vs)): T2[i,j] = A2[i,j]/D2[i,i] A2n = np.matrix(list(g2n.get_adjacency())) T2n = np.zeros(shape=(len(g2n.vs), len(g2n.vs))) D2n = np.diag(g2n.strength(mode='out', weights=g2n.es["weight"])) for i in range(len(g2n.vs)): for j in range(len(g2n.vs)): T2n[i,j] = A2n[i,j]/D2n[i,i] w2, v2 = spl.eig(T2, left=True, right=False) w2n, v2n = spl.eig(T2n, left=True, right=False) return np.log(np.abs(w2n[1]))/np.log(np.abs(w2[1]))
def eig_linearised(Z, modes): """Solves a linearised approximation to the eigenvalue problem from the impedance calculated at some fixed frequency. The equation :math:`L = -s^2 S` is solved for `s` Parameters ---------- Z : EfieImpedanceMatrixLoopStar The impedance matrix calculated in a loop-star basis modes : ndarray (int) A list or array of the mode numbers required Returns ------- s_mode : ndarray, complex The resonant frequencies of the modes (in Hz) The complex pole `s` corresponding to the mode's eigenfrequency j_mode : ndarray, complex Columns of this matrix contain the corresponding modal currents """ modes = np.asarray(modes) L = Z.matrices['L'] S = Z.matrices['S'] try: # Try to find the loop and star parts of the matrix (all relevant # matrices and vectors follow the same decomposition) loop, star = loop_star_indices(L) except AttributeError: loop = [[], []] star = [slice(None), slice(None)] if len(loop[0]) > 0 and len(loop[1]) > 0: L_conv = la.solve(L[loop[0], loop[1]], L[loop[0], star[1]]) L_red = (L[star[0], star[1]] - np.dot(L[star[0], loop[1]], L_conv)) # find eigenvalues, and star part of eigenvectors w, v_s = la.eig(S[star[0], star[1]], -L_red) vr = np.empty((L.shape[0], len(w)), np.complex128) vr[star[1]] = v_s vr[loop[1]] = -np.dot(L_conv, v_s) else: # Matrix does not have loop-star decomposition, so use the whole thing # TODO: implement some filtering to eliminate null-space solutions? w, vr = la.eig(S, -L) w_freq = np.sqrt(w) # make sure real part is negative w_freq = np.where(w_freq.real > 0, -w_freq, w_freq) w_selected = np.ma.masked_array(w_freq, abs(w_freq.real) > abs(w_freq.imag)) which_modes = np.argsort(abs(w_selected.imag))[modes] return w_freq[which_modes], vr[:, which_modes]
def coupled_modes(self, w, ignore_damping=False, **kwargs): M, B, C = self.linearised_matrices(w, **kwargs) if ignore_damping: wn, vn = linalg.eig(C, M) order = np.argsort(abs(wn)) wn = np.sqrt(abs(wn[order])) vn = vn[:, order] else: AA = r_[c_[zeros_like(C), C], c_[C, B]] BB = r_[c_[C, zeros_like(C)], c_[zeros_like(C), -M]] wn, vn = linalg.eig(AA, BB) order = np.argsort(abs(wn)) wn = abs(wn[order]) # Mode shapes are the first half of the rows; the second # half of the rows should be same multiplied by eigenvalues. vn = vn[:M.shape[0], order] # We expect all the modes to be complex conjugate; return # every other one. # First: make sure all are the same sign norm_vn = vn / vn[np.argmax(abs(vn), axis=0), range(vn.shape[1])] assert (np.allclose(wn[::2], wn[1::2], rtol=1e-4) and np.allclose(norm_vn[:, ::2], norm_vn[:, 1::2].conj(), atol=1e-2)), \ "Expect conjugate modes" wn = wn[::2] vn = norm_vn[:, ::2] return wn, vn
def spatialFilter(Ra,Rb): R = Ra + Rb E,U = la.eig(R) # CSP requires the eigenvalues E and eigenvector U be sorted in descending order ord = np.argsort(E) ord = ord[::-1] # argsort gives ascending order, flip to get descending E = E[ord] U = U[:,ord] # Find the whitening transformation matrix P = np.dot(np.sqrt(la.inv(np.diag(E))),np.transpose(U)) # The mean covariance matrices may now be transformed Sa = np.dot(P,np.dot(Ra,np.transpose(P))) Sb = np.dot(P,np.dot(Rb,np.transpose(P))) # Find and sort the generalized eigenvalues and eigenvector # Find and sort the generalized eigenvalues and eigenvector E1,U1 = la.eig(Sa,Sb) ord1 = np.argsort(E1) ord1 = ord1[::-1] E1 = E1[ord1] U1 = U1[:,ord1] # The projection matrix (the spatial filter) may now be obtained SFa = np.dot(np.transpose(U1),P) return SFa.astype(np.float32)
def FLQTEB(engine,app): nmatrix=len(engine.generators['h'].table) if app.path!=None: result=zeros((app.path.rank,nmatrix+1)) key=app.path.mesh.keys()[0] if len(app.path.mesh[key].shape)==1: result[:,0]=app.path.mesh[key] else: result[:,0]=array(xrange(app.path.rank[key])) for i,parameter in enumerate(list(app.path.mesh[key])): result[i,1:]=phase(eig(engine.evolution(t=app.ts.mesh['t'],**{key:parameter}))[0])/app.ts.volume['t'] else: result=zeros((2,nmatrix+1)) result[:,0]=array(xrange(2)) result[0,1:]=angle(eig(engine.evolution(t=app.ts.mesh['t']))[0])/app.ts.volume['t'] result[1,1:]=result[0,1:] if app.save_data: savetxt(engine.dout+'/'+engine.name.full+'_EB.dat',result) if app.plot: plt.title(engine.name.full+'_EB') plt.plot(result[:,0],result[:,1:]) if app.show: plt.show() else: plt.savefig(engine.dout+'/'+engine.name.full+'_EB.png')
def test_streamline_tensors(): # Small streamline streamline = [[1, 2, 3], [4, 5, 3], [5, 6, 3]] # Non-default eigenvalues: evals = [0.0012, 0.0006, 0.0004] streamline_tensors = life.streamline_tensors(streamline, evals=evals) npt.assert_array_almost_equal(streamline_tensors[0], np.array([[0.0009, 0.0003, 0.], [0.0003, 0.0009, 0.], [0., 0., 0.0004]])) # Get the eigenvalues/eigenvectors: eigvals, eigvecs = la.eig(streamline_tensors[0]) eigvecs = eigvecs[np.argsort(eigvals)[::-1]] eigvals = eigvals[np.argsort(eigvals)[::-1]] npt.assert_array_almost_equal(eigvals, np.array([0.0012, 0.0006, 0.0004])) npt.assert_array_almost_equal(eigvecs[0], np.array([0.70710678, -0.70710678, 0.])) # Another small streamline streamline = [[1, 0, 0], [2, 0, 0], [3, 0, 0]] streamline_tensors = life.streamline_tensors(streamline, evals=evals) for t in streamline_tensors: eigvals, eigvecs = la.eig(t) eigvecs = eigvecs[np.argsort(eigvals)[::-1]] # This one has no rotations - all tensors are simply the canonical: npt.assert_almost_equal(np.rad2deg(np.arccos( np.dot(eigvecs[0], [1, 0, 0]))), 0) npt.assert_almost_equal(np.rad2deg(np.arccos( np.dot(eigvecs[1], [0, 1, 0]))), 0) npt.assert_almost_equal(np.rad2deg(np.arccos( np.dot(eigvecs[2], [0, 0, 1]))), 0)
def regular_svd_by_pca(K, k=0): K_size = K.shape; if( K_size[0] < K_size[1] ): K_squared = np.dot(K, K.T); tsp, tUp = la.eig(K_squared); else: K_squared = np.dot(K.T, K); tsp, tVp = la.eig(K_squared); # As la.eig returns complex number, use its absolute value. tsp = abs(tsp); tsp = np.sqrt(tsp); n_pos_sigs = sum(tsp > 0); tSp = np.diag(map(lambda s: 1.0/s, tsp[0:n_pos_sigs])); if( K_size[0] < K_size[1] ): tVp = np.dot(K.T, tUp); tVp[:, 0:n_pos_sigs] = np.dot(tVp[:, 0:n_pos_sigs], tSp); else: tUp = np.dot(K, tVp); tUp[:, 0:n_pos_sigs] = np.dot(tUp[:, 0:n_pos_sigs], tSp); if( 0 < k and k < min(K_size) ): tUp = tUp[:, 0:k]; tVp = tVp[:, 0:k]; tsp = tsp[0:k]; return tUp, tsp, tVp;
def FindMaximumQAQ(A, vertices, tetra): lambdas = [] Q = np.zeros((4,4)) for i in range(4): Q[:,i] = vertices[tetra[i],:] print "Q", Q # Full problem: A_ = Q.T.dot(A).dot(Q) B_ = Q.T.dot(Q) e, V = eig(A_, B_) alpha = np.real(V[:,np.argmax(e)]) if np.all(alpha >= 0.) or np.all(alpha <= 0.): lambdas.append(np.max(np.real(e))) # Only three qs: for comb in combinations(range(4), 3): A__ = np.array([[A_[i,j] for j in comb] for i in comb]) B__ = np.array([[B_[i,j] for j in comb] for i in comb]) e, V = eig(A__, B__) alpha = np.real(V[:,np.argmax(e)]) if np.all(alpha >= 0.) or np.all(alpha <= 0.): lambdas.append(np.max(np.real(e))) # Only two qs: for comb in combinations(range(4), 2): A__ = np.array([[A_[i,j] for j in comb] for i in comb]) B__ = np.array([[B_[i,j] for j in comb] for i in comb]) e, V = eig(A__, B__) alpha = np.real(V[:,np.argmax(e)]) if np.all(alpha >= 0.) or np.all(alpha <= 0.): lambdas.append(np.max(np.real(e))) # Only one q: for i in range(4): lambdas.append((Q[:,i]).T.dot(A).dot(Q[:,i])) print lambdas return np.max(np.array(lambdas))
def pca(data, base_num=1): N, dim = data.shape data_m = data.mean(0) data_new = data - data_m # データ数 > 次元数 if N > dim: # データ行列の共分散行列 cov_mat = sp.dot(data_new.T, data_new) / float(N) # 固有値・固有ベクトルを計算 l, vm = linalg.eig(cov_mat) # 固有値が大きい順に並び替え axis = vm[:, l.argsort()[-min(base_num, dim) :][::-1]].T # 次元数 > データ数 else: base_num = min(base_num, N) cov_mat = sp.dot(data_new, data_new.T) / float(N) l, v = linalg.eig(cov_mat) # 固有値と固有ベクトルを並び替え idx = l.argsort()[::-1] l = l[idx] v = vm[:, idx] # 固有ベクトルを変換 vm = sp.dot(data_m.T, v[:, :base_num]) # (主成分の)基底を計算 axis = sp.zeros([base_num, dim], dtype=sp.float64) for ii in range(base_num): if l[ii] <= 0: break axis[ii] = vm[:, ii] / linalg.norm(vm[:, ii]) return axis
def test_create(self): basis_set = SphericalGTOSet() xyz = (0.0, 0.0, 0.0) r0 = 10.0 for n in range(-10, 10): z = 2.0**n basis_set.add_one_basis(0, 0, xyz, z) for L in [0]: basis_set.add_basis(L, (0.0, 0.0, +r0), z) basis_set.add_basis(L, (0.0, 0.0, -r0), z) basis_set.add_basis(L, (0.0, +r0, 0.0), z) basis_set.add_basis(L, (0.0, -r0, 0.0), z) basis_set.add_basis(L, (+r0, 0.0, 0.0), z) basis_set.add_basis(L, (-r0, 0.0, 0.0), z) smat = basis_set.s_mat() for e in sorted(abs(la.eig(smat)[0]))[0:10]: print e print "-----" hmat = basis_set.t_mat() + basis_set.v_mat(1.0, xyz) zmat = basis_set.xyz_mat((0, 0, 1)) for e in sorted(la.eig(hmat, smat)[0].real)[0:10]: print e """
def eig(A,B): """ To ensure matlab compatibility, we need to swap matrices A and B around !! """ (XX1,XX2) = LIN.eig(A,B) (XX3,XX4) = LIN.eig(B,A) return (mat(XX4),mat(XX1))
def __init__(self,matrix,error,overlap=None,overlap_err=None): if overlap_err is None: self.func = lambda mat:lin.eig(mat,overlap)[0] self.resample = lambda: gaussian_matrix_resample(matrix,error) else: self.func = lambda mats:lin.eig(mats[0],mats[1])[0] self.resample = lambda: (gaussian_matrix_resample(matrix,error), gaussian_matrix_resample(overlap,overlap_err))
def compute_method(self, parametro= None): ''' eigenvalues and eigen vectors ''' #first era method # self.__eraresOut.A, self.__eraresOut.B, self.__eraresOut.C = mr.compute_ERA_model(np.array(self._signalOut), 2) # second, eigenvalues and eigenvectors self.__eraresOut.lambdaValues, self.__eraresOut.lambdaVector = linalg.eig(self.__eraresOut.A) # self.__eraresRef.A, self.__eraresRefB, self.__eraresRefC = mr.compute_ERA_model(np.array(self._signalRef), 2) # second, eigenvalues and eigenvectors self.__eraresRef.lambdaValues, self.__eraresRef.lamdaVector = linalg.eig(self.__eraresRef.A)
def designCSP(dataA, dataB, nb): # return v, a, d n_channels = dataA.shape[0] q = dataA.shape[1] cA = np.zeros([dataA.shape[0], n_channels, n_channels]) cB = np.zeros([dataB.shape[0], n_channels, n_channels]) # Compute the covariance matrix of each epoch of the same class (A and B) for i in range(dataA.shape[0]): # cA[i,...] = np.cov(dataA[i,:,:]) c = np.dot(dataA[i, :, :], dataA[i, :, :].transpose()) cA[i, ...] = c / (np.trace(c) * q) # cA[i,...] = c # compute the mean of the covariance matrices of each epoch cA_mean = cA.mean(0) for i in range(dataB.shape[0]): # cB[i,...] = np.cov(dataB[i,:,:]) c = np.dot(dataB[i, :, :], dataB[i, :, :].transpose()) cB[i, ...] = c / (np.trace(c) * q) # cB[i,...] = c # compute the mean of the covariance matrices of each epoch cB_mean = cB.mean(0) lamb, v = lg.eig(cA_mean + cB_mean) # eigvalue and eigvector decomposition lamb = lamb.real # return only real part of eigen vector # returns the index of array lamb in crescent order index = np.argsort(lamb) # reverse the order, now index has the positon of lamb in descendent order index = index[::-1] lamb = lamb[index] # sort the eingenvalues in descendent order # the same goes for the eigenvectors along axis y v = v.take(index, axis=1) # whitening matrix computation Q = np.dot(np.diag(1 / np.sqrt(lamb)), v.transpose()) # eig decomposition of whiten cov matrix D, V = lg.eig(np.dot(Q, np.dot(cA_mean, Q.transpose()))) W_full = np.dot(V.transpose(), Q) # select only the neighbours defined in NB; get the first 3 eigenvectors W = W_full[:nb, :] W = np.vstack((W, W_full[-nb:, :])) # get the three last eigenvectors return W
def mysqrtm(m): m = 0.5 * (m.H + m) ls, vs = la.eigh(m) vs = np.matrix(vs) try: ls = [math.sqrt(max(l.real, 0)) for l in ls] except ValueError: print m.H - m print la.eig(m) raise ValueError return vs * np.diag(ls) * vs.H
def printEigen(A, F): print 'Pole Locations:' (w_A, v_A) = la.eig(A) (w_F, v_F) = la.eig(F) for i in range(0, len(w_A)): print 'w_A = ', w_A[i] print 'v_A = \n', v_A[:,i] for i in range(0, len(w_F)): print 'w_F = ', w_F[i] print 'v_F = \n', v_F[:,i] return
def main() : print power_iteration(Q, A) w, pi = ln.eig(A, left=True, right=False) print w print pi print power_iteration(Q, B) w, pi = ln.eig(B, left=True, right=False) print w print pi
def test_aligned_mem(): """Check linalg works with non-aligned memory""" # Allocate 804 bytes of memory (allocated on boundary) a = arange(804, dtype=np.uint8) # Create an array with boundary offset 4 z = np.frombuffer(a.data, offset=4, count=100, dtype=float) z.shape = 10, 10 eig(z, overwrite_a=True) eig(z.T, overwrite_a=True)
def eigen(A, B=None): """ This function is used to sort eigenvalues and eigenvectors e.g. for a given system linalg.eig will return eingenvalues as: (array([ 0. +89.4j, 0. -89.4j, 0. +89.4j, 0. -89.4j, 0.+983.2j, 0.-983.2j, 0. +40.7j, 0. -40.7j]) This function will sort this eigenvalues as: (array([ 0. +40.7j, 0. +89.4j, 0. +89.4j, 0.+983.2j, 0. -40.7j, 0. -89.4j, 0. -89.4j, 0.-983.2j]) Correspondent eigenvectors will follow the same order. Parameters ---------- A: array A complex or real matrix whose eigenvalues and eigenvectors will be computed. B: float or str Right-hand side matrix in a generalized eigenvalue problem. Default is None, identity matrix is assumed. Returns ---------- evalues: array Sorted eigenvalues evectors: array Sorted eigenvalues Examples: >>> L = sp.array([[2, -1, 0], ... [-4, 8, -4], ... [0, -4, 4]]) >>> lam, P = eigen(L) >>> lam array([ 0.56258062+0.j, 2.63206172+0.j, 10.80535766+0.j]) """ if B is None: evalues, evectors = la.eig(A) else: evalues, evectors = la.eig(A, B) if all(eigs == 0 for eigs in evalues.imag): if all(eigs > 0 for eigs in evalues.real): idxp = evalues.real.argsort() # positive in increasing order idxn = sp.array([], dtype=int) else: idxp = evalues.real.argsort()[int(len(evalues)/2):] # positive in increasing order idxn = evalues.real.argsort()[int(len(evalues)/2) - 1::-1] # negative in decreasing order else: idxp = evalues.imag.argsort()[int(len(evalues)/2):] # positive in increasing order idxn = evalues.imag.argsort()[int(len(evalues)/2) - 1::-1] # negative in decreasing order idx = sp.hstack([idxp, idxn]) return evalues[idx], evectors[:, idx]
def test_aligned_mem_complex(): """Check that complex objects don't need to be completely aligned""" # Allocate 1608 bytes of memory (allocated on boundary) a = zeros(1608, dtype=np.uint8) # Create an array with boundary offset 8 z = np.frombuffer(a.data, offset=8, count=100, dtype=complex) z.shape = 10, 10 eig(z, overwrite_a=True) # This does not need special handling eig(z.T, overwrite_a=True)
def _poles_and_tangential_directions(rom): """Compute the poles and tangential directions of a reduced order model.""" if isinstance(rom.E, IdentityOperator): poles, Y, X = spla.eig(to_matrix(rom.A, format='dense'), left=True, right=True) else: poles, Y, X = spla.eig(to_matrix(rom.A, format='dense'), to_matrix(rom.E, format='dense'), left=True, right=True) Y = rom.B.range.make_array(Y.conj().T) X = rom.C.source.make_array(X.T) b = rom.B.apply_adjoint(Y) c = rom.C.apply(X) return poles, b, c
def transitionMatrix(cg, minstrength=0.1): A = gk.CG2adj(cg) edges = scipy.where(A == 1) A[edges] = randweights(edges[0].shape[0], c=minstrength) l = linalg.eig(A)[0] c = 0 pbar = ProgressBar(widgets=['Searching for weights: ', Percentage(), ' '], maxval=10000).start() while max(l*scipy.conj(l)) > 1: A[edges] = randweights(edges[0].shape[0], c=c) c += 1 l = linalg.eig(A)[0] pbar.update(c) pbar.finish() return A
def ps_contour_plot(A, m = 20,epsilon_vals=None): '''Plots the pseudospectrum of the matrix A as a contour plot. Also, plots the eigenvalues. Parameters: A : square, 2D ndarray The matrix whose pseudospectrum is to be plotted m : int accuracy epsilon_vals : list of floats If k is in epsilon_vals, then the epsilon-pseudospectrum is plotted for epsilon=10**-k If epsilon_vals=None, the defaults of plt.contour() are used instead of any specified values. ''' n = A.shape[0] T = la.schur(A)[0] eigsA = np.diagonal(T) xvals, yvals = ps_grid(eigsA, m) sigmin = np.zeros((m, m)) for k in xrange(m): for j in xrange(m): T1 = (xvals[k] + 1j*yvals[j]) * np.eye(n) - T T2 = T1.T.conjugate() sigold = 0 qold = np.zeros((n, 1)) beta = 0 H = np.zeros((n, n)) q = np.random.normal(size=(n, 1)) + 1j * np.random.normal(size=(n, 1)) q = q/la.norm(q, ord=2) for p in xrange(n-1): b1 = la.solve(T2, q) b2 = la.solve(T1, b1) v = b2 - beta * qold alpha = np.real(np.vdot(q,v)) v = v - alpha * q beta = la.norm(v) qold = q q = v/beta H[p+1, p] = beta H[p, p+1] = beta H[p, p] = alpha sig = np.abs(np.max(la.eig(H[:p+1,:p+1])[0])) if np.abs(sigold/sig - 1) < .001: break sigold = sig sigmin[j, k] = np.sqrt(sig) plt.contour(xvals,yvals,np.log10(sigmin), levels=epsilon_vals) plt.scatter(la.eig(A)[0].real, la.eig(A)[0].imag) plt.show()
def test_approximate_spectral_radius(self): cases = [] cases.append( matrix([[-4-4.0j]]) ) cases.append( matrix([[-4+8.2j]]) ) cases.append( matrix([[2.0-2.9j,0],[0,1.5]]) ) cases.append( matrix([[-2.0-2.4j,0],[0,1.21]]) ) cases.append( matrix([[100+1.0j,0,0],[0,101-1.0j,0],[0,0,99+9.9j]]) ) for i in range(1,6): cases.append( matrix(rand(i,i)+1.0j*rand(i,i)) ) # method should be almost exact for small matrices for A in cases: Asp = csr_matrix(A) [E,V] = linalg.eig(A) E = abs(E) largest_eig = (E == E.max()).nonzero()[0] expected_eig = E[largest_eig] expected_vec = V[:,largest_eig] assert_almost_equal( approximate_spectral_radius(A), expected_eig ) assert_almost_equal( approximate_spectral_radius(Asp), expected_eig ) vec = approximate_spectral_radius(A, return_vector=True)[1] rayleigh = abs( dot(ravel(A*vec), ravel(vec)) / dot(ravel(vec), ravel(vec)) ) assert_almost_equal(rayleigh, expected_eig, decimal=4 ) vec = approximate_spectral_radius(Asp, return_vector=True)[1] rayleigh = abs( dot(ravel(Asp*vec), ravel(vec)) / dot(ravel(vec), ravel(vec)) ) assert_almost_equal(rayleigh, expected_eig, decimal=4 ) AA = mat(A).H*mat(A) AAsp = csr_matrix(AA) [E,V] = linalg.eig(AA) E = abs(E) largest_eig = (E == E.max()).nonzero()[0] expected_eig = E[largest_eig] expected_vec = V[:,largest_eig] assert_almost_equal( approximate_spectral_radius(AA), expected_eig ) assert_almost_equal( approximate_spectral_radius(AAsp), expected_eig ) vec = approximate_spectral_radius(AA, return_vector=True)[1] rayleigh = abs( dot(ravel(AA*vec), ravel(vec)) / dot(ravel(vec), ravel(vec)) ) assert_almost_equal(rayleigh, expected_eig, decimal=4 ) vec = approximate_spectral_radius(AAsp, return_vector=True)[1] rayleigh = abs( dot(ravel(AAsp*vec), ravel(vec)) / dot(ravel(vec), ravel(vec)) ) assert_almost_equal(rayleigh, expected_eig, decimal=4 )
def process(self, X, V, C): BifPoint.process(self, X, V, C) J_coords = C.sysfunc.jac(X, C.coords) eigs, VL, VR = linalg.eig(J_coords, left=1, right=1) # Check for nonreal multipliers found = False for i in range(len(eigs)): for j in range(i+1,len(eigs)): if abs(imag(eigs[i])) > 1e-10 and \ abs(imag(eigs[j])) > 1e-10 and \ abs(eigs[i]*eigs[j] - 1) < 1e-5: found = True if not found: del self.found[-1] return False self.found[-1].eigs = eigs self.info(C, -1) return True
def get_eq_from_eig(m): ''' get the equilibrium frequencies from the matrix. the eq freqs are the left eigenvector corresponding to eigenvalue of 0. Code here is largely taken from Bloom. See here - https://github.com/jbloom/phyloExpCM/blob/master/src/submatrix.py, specifically in the fxn StationaryStates ''' (w, v) = linalg.eig(m, left=True, right=False) max_i = 0 max_w = w[max_i] for i in range(1, len(w)): if w[i] > max_w: max_w = w[i] max_i = i assert( abs(max_w) < ZERO ), "Maximum eigenvalue is not close to zero." max_v = v[:,max_i] max_v /= np.sum(max_v) eq_freqs = max_v.real # these are the stationary frequencies # SOME SANITY CHECKS assert np.allclose(np.zeros(61), np.dot(eq_freqs, m)) # should be true since eigenvalue of zero pi_inv = np.diag(1.0 / eq_freqs) s = np.dot(m, pi_inv) assert np.allclose(m, np.dot(s, np.diag(eq_freqs)), atol=ZERO, rtol=1e-5), "exchangeability and equilibrium does not recover matrix" # And for some impressive overkill, double check pi_i*q_ij = pi_j*q_ji for i in range(61): pi_i = eq_freqs[i] for j in range(61): pi_j = eq_freqs[j] forward = pi_i * m[i][j] backward = pi_j * m[j][i] assert(abs(forward - backward) < ZERO), "Detailed balance violated." return eq_freqs
def propagator_steadystate(U): """Find the steady state for successive applications of the propagator :math:`U`. Parameters ---------- U : qobj Operator representing the propagator. Returns ------- a : qobj Instance representing the steady-state density matrix. """ evals, evecs = la.eig(U.full()) ev_min, ev_idx = _get_min_and_index(abs(evals - 1.0)) evecs = evecs.T rho = Qobj(vec2mat(evecs[ev_idx]), dims=U.dims[0]) rho = rho * (1.0 / rho.tr()) rho = 0.5 * (rho + rho.dag()) # make sure rho is herm return rho
def Checkvalid(Object,Order,alpha,inorout,mur,sig): Object = Object[:-4]+".vol" #Set order Ordercheck to be of low order to speed up computation. Ordercheck = 1 #Accuracy is increased by increaing noutput, but at greater cost noutput=20 #Set up the Solver Parameters Solver,epsi,Maxsteps,Tolerance = SolverParameters() #Loading the object file ngmesh = ngmeshing.Mesh(dim=3) ngmesh.Load("VolFiles/"+Object) #Creating the mesh and defining the element types mesh = Mesh("VolFiles/"+Object) mesh.Curve(5)#This can be used to refine the mesh #Set materials mu_coef = [ mur[mat] for mat in mesh.GetMaterials() ] mu = CoefficientFunction(mu_coef) inout_coef = [inorout[mat] for mat in mesh.GetMaterials() ] inout = CoefficientFunction(inout_coef) sigma_coef = [sig[mat] for mat in mesh.GetMaterials() ] sigma = CoefficientFunction(sigma_coef) #Scalars Mu0 = 4*np.pi*10**(-7) #Setup the finite element space dom_nrs_metal = [0 if mat == "air" else 1 for mat in mesh.GetMaterials()] femfull = H1(mesh, order=Ordercheck,dirichlet="default|outside") freedofs = femfull.FreeDofs() ndof=femfull.ndof Output = np.zeros([ndof,noutput],dtype=float) Averg = np.zeros([1,3],dtype=float) # we want to create a list of coordinates where we would like to apply BCs list=np.zeros([noutput,3],dtype=float) npp=0 for el in mesh.Elements(BND): if el.mat == "default": Averg[0,:]=0 #determine the average coordinate for v in el.vertices: Averg[0,:]+=mesh[v].point[:] Averg=Averg/3 if npp < noutput: list[npp,:]=Averg[0,:] npp+=1 print(" solving problems", end='\r') sval=(Integrate(inout, mesh))**(1/3) for i in range(noutput): sol=GridFunction(femfull) sol.Set(exp(-((x-list[i,0])**2 + (y-list[i,1])**2 + (z-list[i,2])**2)/sval**2),definedon=mesh.Boundaries("default")) u = femfull.TrialFunction() v = femfull.TestFunction() # the bilinear-form a = BilinearForm(femfull, symmetric=True, condense=True) a += 1/alpha**2*grad(u)*grad(v)*dx a += u*v*dx # the right hand side f = LinearForm(femfull) f += 0 * v * dx if Solver=="bddc": c = Preconditioner(a,"bddc")#Apply the bddc preconditioner a.Assemble() f.Assemble() if Solver=="local": c = Preconditioner(a,"local")#Apply the local preconditioner c.Update() #Solve the problem f.vec.data += a.harmonic_extension_trans * f.vec res = f.vec.CreateVector() res.data = f.vec - a.mat * sol.vec inverse= CGSolver(a.mat, c.mat, precision=Tolerance, maxsteps=Maxsteps) sol.vec.data += inverse * res sol.vec.data += a.inner_solve * f.vec sol.vec.data += a.harmonic_extension * sol.vec Output[:,i] = sol.vec.FV().NumPy() print(" problems solved ") Mc = np.zeros([noutput,noutput],dtype=float) M0 = np.zeros([noutput,noutput],dtype=float) print(" computing matrices", end='\r') # create numpy arrays by passing solutions back to NG Solve Soli=GridFunction(femfull) Solj=GridFunction(femfull) for i in range(noutput): Soli.Set(exp(-((x-list[i,0])**2 + (y-list[i,1])**2 + (z-list[i,2])**2)/sval**2),definedon=mesh.Boundaries("default")) Soli.vec.FV().NumPy()[:]=Output[:,i] for j in range(i,noutput): Solj.Set(exp(-((x-list[j,0])**2 + (y-list[j,1])**2 + (z-list[j,2])**2)/sval**2),definedon=mesh.Boundaries("default")) Solj.vec.FV().NumPy()[:]=Output[:,j] Mc[i,j] = Integrate(inout * (InnerProduct(grad(Soli),grad(Solj))/alpha**2+ InnerProduct(Soli,Solj)),mesh) Mc[j,i] = Mc[i,j] M0[i,j] = Integrate((1-inout) * (InnerProduct(grad(Soli),grad(Solj))/alpha**2+ InnerProduct(Soli,Solj)),mesh) M0[j,i] = M0[i,j] print(" matrices computed ") # solve the eigenvalue problem print(" solving eigenvalue problem", end='\r') out=slin.eig(Mc+M0,Mc,left=False, right=False) print(" eigenvalue problem solved ") # compute contants etasq = np.max((out.real)) C = 1 # It is not clear what this value is. C1 = C * ( 1 + np.sqrt(etasq) )**2 C2 = 2 * etasq epsilon = 8.854*10**-12 sigmamin = Integrate(inout * sigma, mesh)/Integrate(inout, mesh) mumax = Integrate(inout * mu * Mu0, mesh)/Integrate(inout, mesh) volume = Integrate(inout, mesh) D = (volume * alpha**3)**(1/3) cond1 = np.sqrt(1/epsilon/mumax/D**2/C1) cond2 = 1/epsilon*sigmamin/C2 cond = min(cond1,cond2) print(" maximum recomeneded frequency is ",str(round(cond/100))) return cond/100
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Oct 18 17:14:57 2017 @author: justinwu """ import numpy as np from scipy import linalg as la A = np.array([[1, 5, 2], [2, 4, 1], [3, 6, 2]]) lna, v = la.eig(A) l1, l2, l3 = lna print(l1, l2, l3) print(v) print(v[:, 0]) print(v[:, 1]) print(v[:, 2]) print('--------------') v1 = np.array(v[:, 0]).T print(v1) print(linalg.norm(A.dot(v1) - l1 * v1))
n=36 # Legendre Gauss-Lobatto nodes x,vn=zelegl(n) # Derivative matrix for Legendre at Gauss-Lobatto nodes D=dmlegl(n,x,vn,n) D2=np.dot(D,D) D2=D2[1:n,1:n] # Get eigenvalues and right-eigenvectors using scipy.linalg.eig # Notice: lam,V in reverse order that in Matlab where it's [V,Lam]=eig(D2) # Also notice the difference In Matlab we get diagonal matrix lam, and here an numpy array lam!!! lam,V=eig(D2) ii = np.argsort(-lam) # sort eigenvalues lam=lam[ii] V=V[:,ii] fig = plt.figure() #fig, axes = plt.subplots(nrows=6, ncols=1) #fig.tight_layout() eigs=np.linspace(5,30,6) # plot 6 eigenmodes for j in eigs: lv = np.shape(V)[0]+2 u = np.zeros(lv) u[1:lv-1] = V[:,int(j)] ax1 = fig.add_subplot(6,1,j/5) ax1.plot(x,u,'bo') plt.subplots_adjust(hspace = 0.9)
H = knn(M,k) for u,vec in H.iteritems(): for v in vec: d = ker(M[u],M[v]) A[u,v] = d A = A + A.T D = np.diag( [sum(v) for v in A] ) #print A #return A return D-A L_EN = L(EN,0) L_ES = L(ES,1) evals1, enpvecs = eig(L_EN) evals2, eshvecs = eig(L_ES) New_EN = [] New_ES = [] for e,n in seeded: print e,n,evals1[en[e]].real, n,evals2[es[n]] b_en = enpvecs[en[e]] b_es = eshvecs[es[n]] New_EN.append(b_en) New_ES.append(b_es) EN2 = np.array(New_EN).T ES2 = np.array(New_ES).T print EN2.shape, ES2.shape
iArr = slg.inv(arr) print('iArr:', iArr) # 奇异值分解 arr = np.arange(9).reshape((3, 3)) + np.diag([1, 0, 1]) u, sigma, v = slg.svd(arr) print(u, sigma, v) # sig3 = np.mat([[sigma[0], 0, 0], [0, sigma[1], 0], [0, 0, sigma[2]]]) # 重构原始矩阵 # newArr = u*sig3*v # print('arr:', arr) # print('newArr', newArr) sig3 = np.diag(sigma) # 重构原始矩阵 newArr = u.dot(sig3).dot(v) print('arr:', arr) print('newArr', newArr) # QR q, r = slg.qr(arr) print('q:', q) print('r:', r) # 求解 b = np.array([6, 14]) arr = np.array([[1, 2], [3, 4]]) print('Solve:', slg.solve(arr, b)) # 特征值和特征向量 # arr = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) print('eig:', slg.eig(arr))
def olf_bulb_10(Nmitral,H_in,W_in,P_odor_in,dam): # Nmitral = 10 #number of mitral cells Ngranule = np.copy(Nmitral) #number of granule cells pg. 383 of Li/Hop Ndim = Nmitral+Ngranule #total number of cells t_inh = 25 ; # time when inhalation starts t_exh = 205; #time when exhalation starts finalt = 395; # end time of the cycle #y = zeros(ndim,1); Sx = 1.43 #Sx,Sx2,Sy,Sy2 are parameters for the activation functions Sx2 = 0.143 Sy = 2.86 #These are given in Li/Hopfield pg 382, slightly diff in her thesis Sy2 = 0.286 th = 1 #threshold for the activation function tau_exh = 33.3333; #Exhale time constant, pg. 382 of Li/Hop exh_rate = 1/tau_exh alpha = .15 #decay rate for the neurons #Li/Hop have it as 1/7 or .142 on pg 383 P_odor0=np.zeros(Nmitral) #odor pattern, no odor H0 = H_in #weight matrix: to mitral from granule W0 = W_in #weights: to granule from mitral Ib = np.ones((Nmitral,1))*.243 #initial external input to mitral cells Ic = np.ones((Ngranule,1))*.1 #initial input to granule cells, these values are #given on pg 382 of Li/Hop signalflag = 1 # 0 for linear output, 1 for activation function noise = np.zeros((Ndim,1)) #noise in inputs noiselevel = .00143 noisewidth = 7 #noise correlation time, given pg 383 Li/Hop as 9, but 7 in thesis lastnoise = np.zeros((Ndim,1)) #initial time of last noise pule #****************************************************************************** #CALCULATE FIXED POINTS #Calculating equilibrium value with no input rest0 = np.zeros((Ndim,1)) restequi = fsolve(lambda x: equi(x,Ndim,Nmitral,Sx,Sx2,Sy,Sy2,th,alpha,\ t_inh,H0,W0,P_odor0,Ib,Ic,dam),rest0) #about 20 ms to run this np.random.seed(seed=23) #init0 = restequi+np.random.rand(Ndim)*.00143 #initial conditions plus some noise #for no odor input init0 = restequi+np.random.rand(Ndim)*.00143 #initial conditions plus some noise #for no odor input np.random.seed() #Now calculate equilibrium value with odor input lastnoise = lastnoise + t_inh - noisewidth #initialize lastnoise value #But what is it for? to have some #kind of correlation in the noise #find eigenvalues of A to see if input produces oscillating signal xequi = fsolve(lambda x: equi(x,Ndim,Nmitral,Sx,Sx2,Sy,Sy2,th,alpha,\ t_inh,H0,W0,P_odor_in,Ib,Ic,dam),rest0) #equilibrium values with some input, about 20 ms to run #****************************************************************************** #CALCULATE A AND DETERMINE EXISTENCE OF OSCILLATIONS diffgy = celldiff(xequi[Nmitral:],Sy,Sy2,th) diffgx = celldiff(xequi[0:Nmitral],Sx,Sx2,th) H1 = np.dot(H0,diffgy) W1 = np.dot(W0,diffgx) #intermediate step in constructing A A = np.dot(H1,W1) #Construct A dA,vA = lin.eig(A) #about 20 ms to run this #Find eigenvalues of A diff = (1j)*(dA)**.5 - alpha #criteria for a growing oscillation negsum = -(1j)*(dA)**.5 - alpha #Same diff_re = np.real(diff) #Take the real part negsum_re = np.real(negsum) #do an argmax to return the eigenvalue that will cause the fastest growing oscillations #Then do a spectrograph to track the growth of the associated freq through time indices = np.where(diff_re>0) #Find the indices where the criteria is met indices2 = np.where(negsum_re>0) #eigenvalues that could lead to growing oscillations # candidates = np.append(np.real((dA[indices])**.5),np.real((dA[indices2])**.5)) largest = np.argmax(diff_re) check = np.size(indices) check2 = np.size(indices2) if check==0 and check2==0: # print("No Odor Recognized") dominant_freq = 0 else: dominant_freq = np.real((dA[largest])**.5)/(2*np.pi) #find frequency of the dominant mode #Divide by 2pi to get to cycles/ms # print("Odor detected. Eigenvalues:",dA[indices],dA[indices2],\ # "\nEigenvectors:",vA[indices],vA[indices2],\ # "\nDominant Frequency:",dominant_freq) #************************************************************************* #SOLVE DIFFERENTIAL EQUATIONS TO GET INPUT AND OUTPUTS AS FN'S OF t #differential equation to solve teval = np.r_[0:finalt] #solve the differential equation sol = solve_ivp(lambda t,y: diffeq(t,y,Nmitral,Ngranule,Ndim,lastnoise,\ noise,noisewidth,noiselevel, t_inh,t_exh,exh_rate,alpha,Sy,\ Sy2,Sx,Sx2,th,H0,W0,P_odor_in,Ic,Ib,dam),\ [0,395],init0,t_eval = teval,method = 'RK45') t = sol.t y = sol.y y = np.transpose(y) yout = np.copy(y) #convert signal into output signal given by the activation fn if signalflag ==1: for i in np.arange(np.size(t)): yout[i,:Nmitral] = cellout(y[i,:Nmitral],Sx,Sx2,th) yout[i,Nmitral:] = cellout(y[i,Nmitral:],Sy,Sy2,th) #solve diffeq for P_odor = 0 #first, reinitialize lastnoise & noise noise = np.zeros((Ndim,1)) lastnoise = np.zeros((Ndim,1)) lastnoise = lastnoise + t_inh - noisewidth sol0 = sol = solve_ivp(lambda t,y: diffeq(t,y,Nmitral,Ngranule,Ndim,lastnoise,\ noise,noisewidth,noiselevel, t_inh,t_exh,exh_rate,alpha,Sy,\ Sy2,Sx,Sx2,th,H0,W0,P_odor0,Ic,Ib,dam),\ [0,395],init0,t_eval = teval,method = 'RK45') y0 = sol0.y y0 = np.transpose(y0) y0out = np.copy(y0) #convert signal into output signal given by the activation fn if signalflag ==1: for i in np.arange(np.size(t)): y0out[i,:Nmitral] = cellout(y0[i,:Nmitral],Sx,Sx2,th) y0out[i,Nmitral:] = cellout(y0[i,Nmitral:],Sy,Sy2,th) #***************************************************************************** #SIGNAL PROCESSING #Filtering the signal - O_mean: Lowpass fitered signal, under 20 Hz #S_h: Highpass filtered signal, over 20 Hz fs = 1/(.001*(t[1]-t[0])) #sampling freq, converting from ms to sec f_c = 15/fs # Cutoff freq at 20 Hz, written as a ratio of fc to sample freq flter = np.sinc(2*f_c*(t - (finalt-1)/2))*np.blackman(finalt) #creating the #windowed sinc filter #centered at the middle #of the time data flter = flter/np.sum(flter) #normalize hpflter = -np.copy(flter) hpflter[int((finalt-1)/2)] += 1 #convert the LP filter into a HP filter Sh = np.zeros(np.shape(yout)) Sl = np.copy(Sh) Sl0 = np.copy(Sh) Sbp = np.copy(Sh) for i in np.arange(Ndim): Sh[:,i] = np.convolve(yout[:,i],hpflter,mode='same') Sl[:,i] = np.convolve(yout[:,i],flter,mode='same') Sl0[:,i] = np.convolve(y0out[:,i],flter,mode='same') #find the oscillation period Tosc (Tosc must be greater than 5 ms to exclude noise) Tosc0 = np.zeros(np.size(np.arange(5,50))) for i in np.arange(5,50): Sh_shifted=np.roll(Sh,i,axis=0) Tosc0[i-5] = np.sum(np.diagonal(np.dot(np.transpose(Sh[:,:Nmitral]),Sh_shifted[:,:Nmitral]))) #That is, do the correlation matrix (time correlation), take the diagonal to #get the autocorrelations, and find the max Tosc = np.argmax(Tosc0) Tosc = Tosc + 5 f_c2 = 1000*(1.3/Tosc)/fs #Filter out components with frequencies higher than this #to get rid of noise effects in cross-correlation #times 1000 to get units right flter2 = np.sinc(2*f_c2*(t - (finalt-1)/2))*np.blackman(finalt) flter2 = flter2/np.sum(flter2) for i in np.arange(Ndim): Sbp[:,i] = np.convolve(Sh[:,i],flter2,mode='same') #CALCULATE THE DISTANCE MEASURES #calculate phase via cross-correlation with each cell phase = np.zeros(Nmitral) for i in np.arange(1,Nmitral): crosscor = signal.correlate(Sbp[:,0],Sbp[:,i]) tdiff = np.argmax(crosscor)-(finalt-1) phase[i] = tdiff/Tosc * 2*np.pi #Problem with the method below is that it will only give values from 0 to pi #for i in np.arange(1,Nmitral): # phase[i]=np.arccos(np.dot(Sbp[:,0],Sbp[:,i])/(lin.norm(Sbp[:,0])*lin.norm(Sbp[:,i]))) OsciAmp = np.zeros(Nmitral) Oosci = np.copy(OsciAmp)*0j Omean = np.zeros(Nmitral) for i in np.arange(Nmitral): OsciAmp[i] = np.sqrt(np.sum(Sh[125:250,i]**2)/np.size(Sh[125:250,i])) Oosci[i] = OsciAmp[i]*np.exp(1j*phase[i]) Omean[i] = np.average(Sl[:,i] - Sl0[:,i]) Omean = np.maximum(Omean,0) Ooscibar = np.sqrt(np.dot(Oosci,np.conjugate(Oosci)))/Nmitral #can't just square b/c it's complex Omeanbar = np.sqrt(np.dot(Omean,Omean))/Nmitral maxlam = np.max(np.abs(np.imag(np.sqrt(dA)))) return yout,y0out,Sh,t,OsciAmp,Omean,Oosci,Omeanbar,Ooscibar,dominant_freq,maxlam
def pagerank_weighted_scipy(graph): matrix = build_matrix(graph) vals, vecs = eig(matrix.todense(), left=True, right=False) return process_results(graph, vecs)
def rsw_rect(grid): H = 5e2 # Fluid Depth beta = 2e-11 # beta parameter f0 = 1e-4 # Mean Coriolis parameter g = 9.81 # gravity Lx = np.sqrt(2) * 1e5 # Zonal qidth Ly = 1e5 # Meridional width Nx = grid[0] Ny = grid[1] # # Using Finite Difference # [Dx,Dx2,x] = fd2(Nx); [Dy,Dy2,y] = fd2(Ny) # x = Lx/2*x; y = Ly/2*y # Dx = 2/Lx*Dx; Dy = 2/Ly*Dy # Dx2 = (2/Lx)**2*Dx2; Dy2 = (2/Ly)**2*Dy2 # Using cheb # x derivative Dx, x = cheb(Nx) x = Ly / 2 * x Dx = 2 / Lx * Dx # y derivative Dy, y = cheb(Ny) y = Ly / 2 * y Dy = 2 / Ly * Dy # Define Differentiation Matrices using kronecker product F = np.kron(np.diag(np.ravel(f0 + beta * y)), np.eye(Nx + 1)) Z = np.zeros([(Nx + 1) * (Ny + 1), (Nx + 1) * (Ny + 1)]) DX = np.kron(np.eye(Ny + 1), Dx) DY = np.kron(Dy, np.eye(Nx + 1)) # Sx and Sy are used to select which rows/columns need to be # deleted for the boundary conditions. Sy = np.ones((Nx + 1) * (Ny + 1), dtype=bool) Sy[0:Nx + 1] = 0 Sy[Ny * (Nx + 1):] = 0 Sx = np.ones((Nx + 1) * (Ny + 1), dtype=bool) Sx[0:((Nx + 1) * (Ny + 1)):(Nx + 1)] = 0 Sx[Nx:((Nx + 1) * (Ny + 1)):(Nx + 1)] = 0 # Define Matrices Zx = Z[Sx, :] Fx = F[Sx, :] Zxx = Zx[:, Sx] Fxy = Fx[:, Sy] Fy = F[Sy, :] Zy = Z[Sy, :] Fyx = Fy[:, Sx] Zyy = Zy[:, Sy] A0 = np.hstack([Zxx, Fxy, -g * DX[Sx, :]]) A1 = np.hstack([-Fyx, Zyy, -g * DY[Sy, :]]) A2 = np.hstack([-H * DX[:, Sx], -H * DY[:, Sy], Z]) #size = (Nx-1)(Ny+1)+(Nx+1)(Ny-1)+(Nx+1)(Ny+1) = 3*Nx*Ny + Ny + Nx + 1 ^2 A = np.vstack([A0, A1, A2]) # B = np.eye(A.shape[0]) # Using eig eigVals, eigVecs = spalg.eig(1j * A) ind = (np.real(eigVals)).argsort() #get indices in ascending order eigVecs = eigVecs[:, ind] eigVals = eigVals[ind] omega = eigVals # # Using eigs # evals_all, evecs_all = eigs(1j*A,80,which='SR',maxiter=500) # print evals_all[0:5] # omega = eigVals evals = len(eigVals) # how many eigenvalues we have # for i in range(evals): # plt.plot(np.arange(0,evals_all.shape[0]),evals_all[:].real, 'o') # plt.title("Plot of Real Part of Eigenvalues Using eigs") # plt.show() # for i in range(evals): # plt.plot(np.arange(0,eigVals.shape[0]),eigVals[:].real, 'o') # plt.title("Plot of Real Part of Eigenvalues Using eig") # plt.show() print "First 5 eigenvalues:" posReal = eigVals[eigVals.real > 1e-10] print posReal[0:5] omega = eigVals.real fieldNames = ["u_x", "u_y", "eta"] nsol = eigVecs.shape[1] fields = np.empty(3, dtype='object') fields = [np.reshape(eigVecs[0:(Nx-1)*(Ny+1),:], [Ny+1,Nx-1,nsol]), \ np.reshape(eigVecs[(Nx-1)*(Ny+1):2*Nx*Ny-2,:], [Ny-1, Nx+1, nsol]), \ np.reshape(eigVecs[2*Nx*Ny-2:,:], [Nx+1, Ny+1, nsol])] om = omega.real om[om <= f0] = np.Inf ii = (abs(om.real)).argmin(0) for i in range(ii - 2, ii + 9): uf = fields[0] u = np.squeeze(uf[:, :, i]) vf = fields[1] v = np.squeeze(vf[:, :, i]) hf = fields[2] h = np.squeeze(hf[:, :, i]) v = np.vstack([np.zeros([1, Nx + 1]), v, np.zeros([1, Nx + 1])]) u = np.hstack([np.zeros([Ny + 1, 1]), u, np.zeros([Ny + 1, 1])]) X, Y = np.meshgrid(x, y) plt.subplot(3, 2, 1) plt.contourf(X / 1e3, Y / 1e3, (u.real).conj().transpose(), 20) plt.colorbar() plt.subplot(3, 2, 2) plt.contourf(X / 1e3, Y / 1e3, (u.imag).conj().transpose(), 20) plt.colorbar() plt.subplot(3, 2, 3) plt.contourf(X / 1e3, Y / 1e3, (v.real).conj().transpose(), 20) plt.colorbar() plt.subplot(3, 2, 4) plt.contourf(X / 1e3, Y / 1e3, (v.imag).conj().transpose(), 20) plt.colorbar() plt.subplot(3, 2, 5) plt.contourf(X / 1e3, Y / 1e3, (h.real).conj().transpose(), 20) plt.colorbar() plt.subplot(3, 2, 6) plt.contourf(X / 1e3, Y / 1e3, (h.imag).conj().transpose(), 20) plt.colorbar() plt.show()
def CalAbsorb(self): Energyp = [] Epsreal = [] Absorpv = [] Epsimag = [] Reflexd = [] Energyp, Epsreal, Epsimag = self.GetEpsilon() for i in range(len(Epsimag)): Energy = 0. ImagXX = 0. ImagYY = 0. ImagZZ = 0. RealXX = 0. RealYY = 0. RealZZ = 0. ImagXY = 0. ImagYZ = 0. ImagZX = 0. RealXY = 0. RealYZ = 0. RealZX = 0. Energy = float(Energyp[i]) ImagXX = float(Epsimag[i][1]) ImagYY = float(Epsimag[i][2]) ImagZZ = float(Epsimag[i][3]) RealXX = float(Epsreal[i][1]) RealYY = float(Epsreal[i][2]) RealZZ = float(Epsreal[i][3]) ImagXY = float(Epsimag[i][4]) ImagYZ = float(Epsimag[i][5]) ImagZX = float(Epsimag[i][6]) RealXY = float(Epsreal[i][4]) RealYZ = float(Epsreal[i][5]) RealZX = float(Epsreal[i][6]) Cxx = complex(RealXX, ImagXX) Cyy = complex(RealYY, ImagYY) Czz = complex(RealZZ, ImagZZ) Cxy = complex(RealXY, ImagXY) Cyz = complex(RealYZ, ImagYZ) Czx = complex(RealZX, ImagZX) C_eps = mat([[Cxx, Cxy, conj(Czx)], [conj(Cxy), Cyy, Cyz], [Czx, conj(Cyz), Czz]]) eps_eig, eps_v = linalg.eig(C_eps) hv = Energy alpha_a1 = hv * 71618.96076 * sqrt( abs(eps_eig[0]) - real(eps_eig[0])) alpha_a2 = hv * 71618.96076 * sqrt( abs(eps_eig[1]) - real(eps_eig[1])) alpha_a3 = hv * 71618.96076 * sqrt( abs(eps_eig[2]) - real(eps_eig[2])) alpha_av = (alpha_a1 + alpha_a2 + alpha_a3) / 3 Absorpv.append(alpha_av) n1 = sqrt(0.5 * (abs(eps_eig[0]) + real(eps_eig[0]))) n2 = sqrt(0.5 * (abs(eps_eig[1]) + real(eps_eig[1]))) n3 = sqrt(0.5 * (abs(eps_eig[2]) + real(eps_eig[2]))) n_av = (n1 + n2 + n3) / 3.0 Reflexd.append(n_av) return Energyp, Absorpv, Reflexd
def matrix_eigenvalues(a): return la.eig(a)
trans_m = np.delete(trans_m, row, axis=1) for icbin, cbin in enumerate(CBINS): if cbin > row: CBINS[icbin] -= 1 elif cbin == row: del CBINS[icbin] if INIT_BINS[0] > row: INIT_BINS[0] -= 1 if TARGET_BINS[0] > row: TARGET_BINS[0] -= 1 print(CBINS) print(trans_m) K = np.nan_to_num(trans_m) print(K) eigvals, eigvecs = LA.eig(K.T) unity = (np.abs(np.real(eigvals) - 1)).argmin() print(eigvals, eigvecs) eq_pop = np.abs(np.real(eigvecs)[unity]) eq_pop /= eq_pop.sum() # probability of starting in init bin A. distr_prob = np.random.rand(len(INIT_BINS)) paths = [] # t_bins: all bins which are not target bins. t_bins = list(x for x in range(0, NBINS) if x not in TARGET_BINS) # lower_bound = mfpt - error # lower_bound = 121.8 lower_bound = 116
def numpy_eigen(): A = np.array([[1, 3, 5], [3, 5, 3], [5, 3, 9]]) evals, evecs = la.eig(A) return la.eigvalsh(A)
r+=t+" NO es normal "+str(ks)+" \n" elif ks>0.01 : r+=t+" es normal\n" print("-------------------------------\n") if not "no " in r: print("¡Todo parece ser normal!\n") else: print(r) print("--------------------------------\n") #------------------- Cálculo de Matriz de componentes principales -----------------------------# import scipy.linalg as la from math import log CV = Trait_matrix.cov() vals,vects = la.eig(CV) perct = [] #Porcentaje de varianza explicada for i in vals: perct.append((i*100)/vals.sum()) p=len(traits) #número de variames N=Trait_matrix[[traits[0]]].size #Tamaño muestral def eme (m,lg=False): #la sumatoria de los valores propios hasta m s=0 if lg: #si es la suma de los logaritmos for i in range(m,len(vals)+1): s+=log(vals[i-1].real,10) else: for i in range(m,len(vals)+1): s+=vals[i-1].real return s x_2 = 0 gl = 0
# File: Hello.py import math import scipy.linalg as la import numpy as np import matplotlib.pyplot as plt A = [[1,4],[-4,1]] eigvals,eigvecs = la.eig(A) new = [] old = [] norm1 = [0,0] norm1dup = [] norm2 = [0,0] norm2dup = [] norminf = [0,0] norminfdup = [] x, y = -1.001, 0 for k in range(2000): x += 0.001 y = 1 old.append([x,y]) old.append([x,-y]) a, a_inv = np.matmul(A, [x, y]), np.matmul(A, [x, -y]) new.append(a) new.append(a_inv) if abs(a[0]) + abs(a[1]) > abs(norm1[0]) + abs(norm1[1]): norm1 = a
# -------------------------------------------------------------------------- # # Main # -------------------------------------------------------------------------- # # == Compute the optimal rule == # optimal_lq = qe.lqcontrol.LQ(Q, R, A, B, C, beta) Po, Fo, do = optimal_lq.stationary_values() # == Compute a robust rule given theta == # baseline_robust = qe.robustlq.RBLQ(Q, R, A, B, C, beta, theta) Fb, Kb, Pb = baseline_robust.robust_rule() # == Check the positive definiteness of worst-case covariance matrix to == # # == ensure that theta exceeds the breakdown point == # test_matrix = np.identity(Pb.shape[0]) - np.dot(C.T, Pb.dot(C)) / theta eigenvals, eigenvecs = eig(test_matrix) assert (eigenvals >= 0).all(), 'theta below breakdown point.' emax = 1.6e6 optimal_best_case = value_and_entropy(emax, Fo, 'best') robust_best_case = value_and_entropy(emax, Fb, 'best') optimal_worst_case = value_and_entropy(emax, Fo, 'worst') robust_worst_case = value_and_entropy(emax, Fb, 'worst') fig, ax = plt.subplots() ax.set_xlim(0, emax) ax.set_ylabel("Value") ax.set_xlabel("Entropy") ax.grid()
#import matplotlib.pyplot as plt Nset = [100, 200, 400, 800] nset = [2, 5, 10, 20] m = 2 p = 8 MC = 100 tim_res = np.zeros((4, 4)) nidx = 0 for n in nset: Nidx = 0 for N in Nset: A = np.random.randn(n, n) lam = linalg.eig(A)[0] rho = np.max(np.abs(lam)) ## Here we create a random stable DT system A = A / rho / 1.01 B = np.random.randn(n, m) C = np.random.randn(p, n) D = np.random.randn(p, m) fset = np.arange(0, N, dtype='float') / N u = np.random.randn(N, m) y = fsid.lsim((A, B, C, D), u, dtype='float') yf = np.fft.fft(y, axis=0) uf = np.fft.fft(u, axis=0) fset = np.arange(0, N, dtype='float') / N wexp = np.exp(1j * 2 * np.pi * fset) W = np.zeros((N, p, p))
def process(self): after = self.after before = self.before (rows, cols, bands) = after.shape after = np.transpose(np.reshape(after, (rows * cols, bands)), (1, 0)) before = np.transpose(np.reshape(before, (rows * cols, bands)), (1, 0)) after_mean = np.mean(after, axis=1) after_var = np.std(after, axis=1) before_mean = np.mean(before, axis=1) before_var = np.std(before, axis=1) # for i in range(bands): # #test = after[:, i] - after_mean[i] # after[i,:] = (after[i,:]-after_mean[i])/after_var[i] # before[i,:] = (before[i,:]-before_mean[i])/before_var[i] cov_aa_mari = np.cov(after) cov_aa_mat_i = np.linalg.inv(cov_aa_mari) con_cov = np.cov(after, before) cov_xx = con_cov[0:bands, 0:bands] cov_xy = con_cov[0:bands, bands:] cov_yx = con_cov[bands:, 0:bands] cov_yy = con_cov[bands:, bands:] # yy_cov = np.cov(before) A = inv(cov_xx) @ cov_xy @ inv(cov_yy) @ cov_yx B = inv(cov_yy) @ cov_yx @ inv(cov_xx) @ cov_xy # 与A特征值相同,但特征向量不同 # A的特征值与特征向量 av 特征值, ad 特征向量 [av, ad] = eig(A) # 对特征值从小到大排列 与 CCA相反 swap_av_index = np.argsort(av) swap_av = av[swap_av_index[:av.size:1]] swap_ad = ad[swap_av_index[:av.size:1], :] # 满足st 条件 ma = inv(sqrtm(swap_ad.T @ cov_xx @ swap_ad)) # 条件一 swap_ad = swap_ad @ ma # 对应b的值 [bv, bd] = eig(B) swap_bv = bv[swap_av_index[:bv.size:1]] swap_bd = bd[swap_av_index[:bd.size:1]] mb = inv(sqrtm(swap_bd.T @ cov_yy @ swap_bd)) # 条件二 swap_bd = swap_bd @ mb # ab = np.linalg.inv(cov_yy) @ cov_yx @ swap_ad # bb = np.linalg.inv() MAD = swap_ad.T @ after - (swap_bd.T @ before) [i, j] = MAD.shape var_mad = np.zeros(i) for k in range(i): var_mad[k] = np.var(MAD[k]) var_mad = np.transpose(np.matlib.repmat(var_mad, j, 1), (1, 0)) res = MAD * MAD / var_mad T = res.sum(axis=0) # T = np.zeros(j) # #for row in range(j): # sum = 0. # for col in range(i): # sum = np.sum(np.square(MAD[col,:] / np.var(MAD[col]))) # T[i] = sum # Kmeans 聚类 re = np.reshape(T, (j, 1)) kmeans = KMeans(n_clusters=2, random_state=0).fit(re) img = np.reshape(kmeans.labels_, ( rows, cols, )) center = kmeans.cluster_centers_ pyplot.imshow(np.uint8(img)) pyplot.show() # scipy.misc.imsave('c.jpg', img) print(center)
JAC[M - 1, :] = concatenate((BBOT, zeros(2 * M))) # V JAC[2 * M - 2, :] = concatenate((zeros(M), BTOP, zeros(M))) JAC[2 * M - 1, :] = concatenate((zeros(M), BBOT, zeros(M))) B = zeros((3 * M, 3 * M), dtype='complex') B[:2 * M, :2 * M] = eye(2 * M, 2 * M) ### BCs on RHS matrix B[M - 2, :] = 0 B[M - 1, :] = 0 B[2 * M - 2, :] = 0 B[2 * M - 1, :] = 0 eigenvalues = linalg.eig(JAC, B, left=False, right=False) eigenvalues = eigenvalues[~isnan(eigenvalues * conj(eigenvalues))] eigenvalues = eigenvalues[~isinf(eigenvalues * conj(eigenvalues))] eigarr = zeros((len(eigenvalues), 2)) eigarr[:, 0] = real(eigenvalues) eigarr[:, 1] = imag(eigenvalues) #savetxt("eigs.txt", eigarr) actualDecayRate = amax(eigarr[:, 0]) while actualDecayRate > 100: eigarr[argmax(eigarr[:, 0]), 0] = -10.0 actualDecayRate = amax(eigarr[:, 0]) evals.append([kx, actualDecayRate])
I_eps_zz = I_eps_zz * dE / sqrt(2.0 * pi) / sigma I_eps_xy = I_eps_xy * dE / sqrt(2.0 * pi) / sigma I_eps_yz = I_eps_yz * dE / sqrt(2.0 * pi) / sigma I_eps_zx = I_eps_zx * dE / sqrt(2.0 * pi) / sigma Cxx = complex(R_eps_xx, I_eps_xx) Cyy = complex(R_eps_yy, I_eps_yy) Czz = complex(R_eps_zz, I_eps_zz) Cxy = complex(R_eps_xy, I_eps_xy) Cyz = complex(R_eps_yz, I_eps_yz) Czx = complex(R_eps_zx, I_eps_zx) C_eps = mat([[Cxx, Cxy, conj(Czx)], [conj(Cxy), Cyy, Cyz], [Czx, conj(Cyz), Czz]]) eps_eig, eps_v = linalg.eig(C_eps) # print hv, imag(eps_eig[0]),imag(eps_eig[1]),imag(eps_eig[2]) alpha_a1 = hv * 71618.96076 * sqrt(abs(eps_eig[0]) - real(eps_eig[0])) alpha_a2 = hv * 71618.96076 * sqrt(abs(eps_eig[1]) - real(eps_eig[1])) alpha_a3 = hv * 71618.96076 * sqrt(abs(eps_eig[2]) - real(eps_eig[2])) alpha_av = (alpha_a1 + alpha_a2 + alpha_a3) / 3 n1 = sqrt(0.5 * (abs(eps_eig[0]) + real(eps_eig[0]))) n2 = sqrt(0.5 * (abs(eps_eig[1]) + real(eps_eig[1]))) n3 = sqrt(0.5 * (abs(eps_eig[2]) + real(eps_eig[2]))) n_av = (n1 + n2 + n3) / 3.0 f.write("%9.4f %13.6E %13.6E %13.6E %13.6E %9.4f\n" % (hv, alpha_a1, alpha_a2, alpha_a3, alpha_av, n_av**2)) alpha.append([hv, alpha_av, n_av**2])
def lda_train2(data=None, label=None): """ Perform Linear Discriminant Analysis on input data. Input: data (array): input data array of shape (number samples x number features). label (array): input data label array of shape (number of samples x 1). Must start in 0 (e.g., label = [0,0,0,1,2] for 5 samples). Output: success (boolean): indicates whether training was successful (True) or not (False). Configurable fields:{"name": "dimreduction.lda.train", "config": {"": ""}, "inputs": ["data", "label"], "outputs": ["success"]} See Also: Notes: Example: References: .. [1] ... .. [2] ... .. [3] ... """ # Check inputs if data is None: raise TypeError, "Please provide input data." if label is None: raise TypeError, "Please provide input data label." if 0 not in label: raise TypeError, "Label must start in 0 (e.g., label = [0,0,0,1,2] for 5 samples)." # success = False try: # Compute mean of each set (mi) m = [] for c in set(label): m.append(scipy.mean(data[label == c], axis=0)) m = scipy.array(m) # Compute Scatter Matrix of eah set (Si) S = [] for c in set(label): S.append(scipy.cov(scipy.array(data[label == c]).T)) # Compute Within Scatter Matrix (SW) SW = 0 for s in S: SW += s # Compute Total Mean (mt) mt = scipy.mean(data, axis=0) # Compute Total Scatter Matrix (ST) ST = 0 for xi in data: aux = scipy.matrix(xi - mt) ST += aux.T * aux # Compute Between Scatter Matrix (SB) SB = 0 for c in set(label): aux = scipy.matrix(m[c, :] - mt) SB += len(pylab.find(label == c)) * aux.T * aux # Solve (Sb - li*Sw)Wi = 0 for the eigenvectors wi eigenvalues, v = linalg.eig(SB, SW) # Get real part and sort eigenvalues real_sorted_eigenvalues = [] for i in xrange(len(eigenvalues)): real_sorted_eigenvalues.append([scipy.real(eigenvalues[i]), i]) real_sorted_eigenvalues.sort() # Get the (nclasses-1) main eigenvectors # Assures eigenvalue is not NaN nclasses = len(set(label)) - 1 # nclasses = 5 eigenvectors = [] for i in xrange(-1, -len(real_sorted_eigenvalues) - 1, -1): if not scipy.isnan(real_sorted_eigenvalues[i][0]): eigenvectors.append(v[real_sorted_eigenvalues[i][1]]) if len(eigenvectors) == nclasses: break # Updates variables # self.eigen_values = real_sorted_eigenvalues # self.eigen_vectors = eigenvectors # self.transform_matrix = scipy.matrix(eigenvectors) transform_matrix = scipy.matrix(eigenvectors) # success = True # self.is_trained = True except Exception as e: print e print traceback.format_exc() # return success return transform_matrix
def my_LDA(X, Y): """ Train a LDA classifier from the training set X: training data Y: class labels of training data """ classLabels = np.unique(Y) # different class labels on the dataset classNum = len(classLabels) datanum, dim = X.shape # dimensions of the dataset totalMean = np.mean(X, 0) # total mean of the data # ====================== YOUR CODE HERE ====================== # Instructions: Implement the LDA technique, following the # steps given in the pseudocode on the assignment. # The function should return the projection matrix W, # the centroid vector for each class projected to the new # space defined by W and the projected data X_lda. # ============================================================= # partition class labels per label - list of arrays per label partition = [np.where(Y == label)[0] for label in classLabels] # find mean value per class (per attribute) - list of arrays per label classMean = [(np.mean(X[idx], 0), len(idx)) for idx in partition] # Compute the within-class scatter matrix Sw = np.zeros((dim, dim)) for idx in partition: Sw += np.cov(X[idx], rowvar=0) * len( idx) # covariance matrix * fraction of instances per class # Compute the between-class scatter matrix Sb = np.zeros((dim, dim)) for mu, class_size in classMean: mu = mu.reshape(dim, 1) #make column vector Sb += class_size * np.dot( (mu - totalMean), np.transpose((mu - totalMean))) # Solve the eigenvalue problem for discriminant directions to maximize class seperability while simultaneously minimizing # the variance within each class # The exception code can be ignored for the example dataset try: S = np.dot(linalg.inv(Sw), Sb) eigval, eigvec = linalg.eig(S) except: #SingularMatrix print "Singular matrix" eigval, eigvec = linalg.eig(Sb, Sw + Sb) idx = eigval.argsort()[::-1] # Sort eigenvalues eigvec = eigvec[:, idx] # Sort eigenvectors according to eigenvalues W = np.real( eigvec[:, :classNum - 1]) # eigenvectors correspond to k-1 largest eigenvalues # Project data onto the new LDA space X_lda = np.real(np.dot(X, np.real(W))) # project the mean vectors of each class onto the LDA space projected_centroid = [ np.dot(mu, np.real(W)) for mu, class_size in classMean ] return W, projected_centroid, X_lda
slice_height, slice_width = det_pixels.shape # The '300' is the width/height of the image if slice_height > 300 / np.sqrt(2.) or slice_width > 300 / np.sqrt(2.): det_pixels = (det_pixels == i + 1) x = np.arange(slice_width) y = np.arange(slice_height) flux = np.sum(det_pixels) x_cen = np.sum(x[np.newaxis, :] * det_pixels) / flux y_cen = np.sum(y[:, np.newaxis] * det_pixels) / flux c_xx = np.sum(((x - x_cen)**2)[np.newaxis, :] * det_pixels) / flux c_yy = np.sum(((y - y_cen)**2)[:, np.newaxis] * det_pixels) / flux c_xy = np.sum((x - x_cen)[np.newaxis, :] * (y - y_cen)[:, np.newaxis] * det_pixels) / flux C = np.array([[c_xx, c_xy], [c_xy, c_yy]]) w, v = sl.eig(C) a, b = np.max(2.0 * np.sqrt(np.real(w))), \ np.min(2.0 * np.sqrt(np.real(w))) # The final check whether we have a track: if a > (300 / (2. * np.sqrt(2.))) and b / a < 0.1: track_pixels[slc] = det_pixels # make a plot from original image and track pixels if track(s) were found: if np.sum(track_pixels) > 0: print('Track(s) detected! Creating plot %s' % (out_plot)) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) ax1.imshow(data, origin='lower', cmap='gray_r', vmin=min_val, vmax=max_val) ax1.set_xlabel('x [pix]') ax1.set_ylabel('y [pix]') ax1.set_title('original data')
import datetime starttime = datetime.datetime.now() import numpy as np import scipy.linalg as la A = np.array([[17.125, -8, 0, -8], [-8, 16.2815, -8, 0], [0, -8, 16, -8], [-8, 0, -8, 16.28125]]) print(la.eig(A)[0]) endtime = datetime.datetime.now() print("total time used", endtime - starttime)
def ex17(exclude=sc.array([3]), plotfilename='ex17.png', nburn=5000, nsamples=200000, parsigma=[1, m.pi / 200., .1], bovyprintargs={}): """ex17: solve exercise 17 by MCMC Input: exclude - ID numbers to exclude from the analysis plotfilename - filename for the output plot nburn - number of burn-in samples nsamples - number of samples to take after burn-in parsigma - proposal distribution width (Gaussian) Output: plot History: 2010-05-07 - Written - Bovy (NYU) """ #Read the data data = read_data('data_allerr.dat', allerr=True) ndata = len(data) nsample = ndata - len(exclude) #First find the chi-squared solution, which we will use as an #initial gues #Put the dat in the appropriate arrays and matrices Y = sc.zeros(nsample) X = sc.zeros(nsample) A = sc.ones((nsample, 2)) C = sc.zeros((nsample, nsample)) Z = sc.zeros((nsample, 2)) yerr = sc.zeros(nsample) ycovar = sc.zeros((2, nsample, 2)) #Makes the sc.dot easier jj = 0 for ii in range(ndata): if sc.any(exclude == data[ii][0]): pass else: Y[jj] = data[ii][1][1] X[jj] = data[ii][1][0] Z[jj, 0] = X[jj] Z[jj, 1] = Y[jj] A[jj, 1] = data[ii][1][0] C[jj, jj] = data[ii][2]**2. yerr[jj] = data[ii][2] ycovar[0, jj, 0] = data[ii][3]**2. ycovar[1, jj, 1] = data[ii][2]**2. ycovar[0, jj, 1] = data[ii][4] * m.sqrt( ycovar[0, jj, 0] * ycovar[1, jj, 1]) ycovar[1, jj, 0] = ycovar[0, jj, 1] jj = jj + 1 #Now compute the best fit and the uncertainties bestfit = sc.dot(linalg.inv(C), Y.T) bestfit = sc.dot(A.T, bestfit) bestfitvar = sc.dot(linalg.inv(C), A) bestfitvar = sc.dot(A.T, bestfitvar) bestfitvar = linalg.inv(bestfitvar) bestfit = sc.dot(bestfitvar, bestfit) #Now sample inittheta = m.acos(1. / m.sqrt(1. + bestfit[1]**2.)) if bestfit[1] < 0.: inittheta = m.pi - inittheta initialguess = sc.array( [bestfit[0] * m.cos(inittheta), inittheta, sc.log(1.)]) #(m,b,logV) #With this initial guess start off the sampling procedure initialX = objective(initialguess, Z, ycovar) currentX = initialX bestX = initialX bestfit = initialguess currentguess = initialguess naccept = 0 samples = [] samples.append(currentguess) for jj in range(nburn + nsamples): #Draw a sample from the proposal distribution newsample = sc.zeros(3) newsample[0] = currentguess[0] + stats.norm.rvs() * parsigma[0] newsample[1] = currentguess[1] + stats.norm.rvs() * parsigma[1] newsample[2] = currentguess[2] + stats.norm.rvs() * parsigma[2] #Calculate the objective function for the newsample newX = objective(newsample, Z, ycovar) #Accept or reject #Reject with the appropriate probability u = stats.uniform.rvs() try: test = m.exp(newX - currentX) except OverflowError: test = 2. if u < test: #Accept currentX = newX currentguess = newsample naccept = naccept + 1 if currentX > bestX: bestfit = currentguess bestX = currentX samples.append(currentguess) if double(naccept) / (nburn + nsamples) < .5 or double(naccept) / ( nburn + nsamples) > .8: print "Acceptance ratio was " + str( double(naccept) / (nburn + nsamples)) samples = sc.array(samples).T[:, nburn:-1] print "Best-fit, overall" print bestfit, sc.mean(samples[2, :]), sc.median(samples[2, :]) histmb, edges = sc.histogramdd(samples.T[:, 0:2], bins=round(sc.sqrt(nsamples) / 2.)) indxi = sc.argmax(sc.amax(histmb, axis=1)) indxj = sc.argmax(sc.amax(histmb, axis=0)) print "Best-fit, marginalized" print edges[0][indxi - 1], edges[1][indxj - 1] print edges[0][indxi], edges[1][indxj] print edges[0][indxi + 1], edges[1][indxj + 1] t = edges[1][indxj] bcost = edges[0][indxi] mf = m.sqrt(1. / m.cos(t)**2. - 1.) b = bcost / m.cos(t) print b, mf #Plot plot.bovy_print(**bovyprintargs) hist, bins, patchess = plot.bovy_hist(sc.exp(samples.T[:, 2] / 2.), edgecolor='k', bins=round(sc.sqrt(nsamples) / 2.), xlabel=r'$\sqrt{V}$', normed=True, histtype='step') cumhist = sc.cumsum(hist) / sc.sum(hist) / (bins[1] - bins[0]) ninefive = 0. ninenine = 0. foundfive = False foundnine = False for ii in range(len(cumhist)): if cumhist[ii] * (bins[1] - bins[0]) > 0.95 and not foundfive: ninefive = bins[ii] foundfive = True if cumhist[ii] * (bins[1] - bins[0]) > 0.99 and not foundnine: ninenine = bins[ii] foundnine = True print ninefive, ninenine axvline(ninefive, color='0.5', lw=2.) axvline(ninenine, color='0.5', lw=2.) plot.bovy_end_print(plotfilename) return #Plot result plot.bovy_print() xrange = [0, 300] yrange = [0, 700] plot.bovy_plot(sc.array(xrange), mf * sc.array(xrange) + b, 'k--', xrange=xrange, yrange=yrange, xlabel=r'$x$', ylabel=r'$y$', zorder=2) for ii in range(10): #Random sample ransample = sc.floor((stats.uniform.rvs() * nsamples)) ransample = samples.T[ransample, 0:2] mf = m.sqrt(1. / m.cos(ransample[1])**2. - 1.) b = ransample[0] / m.cos(ransample[1]) bestb = b bestm = mf plot.bovy_plot(sc.array(xrange), bestm * sc.array(xrange) + bestb, overplot=True, color='0.75', zorder=0) #Add labels nsamples = samples.shape[1] for ii in range(nsample): Pb = 0. for jj in range(nsamples): Pb += Pbad(samples[:, jj], Z[ii, :], ycovar[:, ii, :]) Pb /= nsamples text(Z[ii, 0] + 5, Z[ii, 1] + 5, '%.1f' % Pb, color='0.5', zorder=3) #Plot the data OMG straight from plot_data.py data = read_data('data_allerr.dat', True) ndata = len(data) #Create the ellipses and the data points id = sc.zeros(nsample) x = sc.zeros(nsample) y = sc.zeros(nsample) ellipses = [] ymin, ymax = 0, 0 xmin, xmax = 0, 0 jj = 0 for ii in range(ndata): if sc.any(exclude == data[ii][0]): continue id[jj] = data[ii][0] x[jj] = data[ii][1][0] y[jj] = data[ii][1][1] #Calculate the eigenvalues and the rotation angle ycovar = sc.zeros((2, 2)) ycovar[0, 0] = data[ii][3]**2. ycovar[1, 1] = data[ii][2]**2. ycovar[0, 1] = data[ii][4] * m.sqrt(ycovar[0, 0] * ycovar[1, 1]) ycovar[1, 0] = ycovar[0, 1] eigs = linalg.eig(ycovar) angle = m.atan(-eigs[1][0, 1] / eigs[1][1, 1]) / m.pi * 180. thisellipse = Ellipse(sc.array([x[jj], y[jj]]), 2 * m.sqrt(eigs[0][0]), 2 * m.sqrt(eigs[0][1]), angle) ellipses.append(thisellipse) if (x[jj] + m.sqrt(ycovar[0, 0])) > xmax: xmax = (x[jj] + m.sqrt(ycovar[0, 0])) if (x[jj] - m.sqrt(ycovar[0, 0])) < xmin: xmin = (x[jj] - m.sqrt(ycovar[0, 0])) if (y[jj] + m.sqrt(ycovar[1, 1])) > ymax: ymax = (y[jj] + m.sqrt(ycovar[1, 1])) if (y[jj] - m.sqrt(ycovar[1, 1])) < ymin: ymin = (y[jj] - m.sqrt(ycovar[1, 1])) jj = jj + 1 #Add the error ellipses ax = gca() for e in ellipses: ax.add_artist(e) e.set_facecolor('none') ax.plot(x, y, color='k', marker='o', linestyle='None') plot.bovy_end_print(plotfilename)
def dmd(A, rank=None, dt=1, modes='exact', return_amplitudes=False, return_vandermonde=False, order=True): """Dynamic Mode Decomposition. Dynamic Mode Decomposition (DMD) is a data processing algorithm which allows to decompose a matrix `A` in space and time. The matrix `A` is decomposed as `A = F * B * V`, where the columns of `F` contain the dynamic modes. The modes are ordered corresponding to the amplitudes stored in the diagonal matrix `B`. `V` is a Vandermonde matrix describing the temporal evolution. Parameters ---------- A : array_like, shape `(m, n)`. Input array. rank : int If `rank < (n-1)` low-rank Dynamic Mode Decomposition is computed. dt : scalar or array_like, optional (default: 1) Factor specifying the time difference between the observations. modes : str `{'standard', 'exact', 'exact_scaled'}` - 'standard' : uses the standard definition to compute the dynamic modes, `F = U * W`. - 'exact' : computes the exact dynamic modes, `F = Y * V * (S**-1) * W`. - 'exact_scaled' : computes the exact dynamic modes, `F = (1/l) * Y * V * (S**-1) * W`. return_amplitudes : bool `{True, False}` True: return amplitudes in addition to dynamic modes. return_vandermonde : bool `{True, False}` True: return Vandermonde matrix in addition to dynamic modes and amplitudes. order : bool `{True, False}` True: return modes sorted. Returns ------- F : array_like Matrix containing the dynamic modes of shape `(m, n-1)` or `(m, k)`. b : array_like, if `return_amplitudes=True` 1-D array containing the amplitudes of length `min(n-1, rank)`. V : array_like, if `return_vandermonde=True` Vandermonde matrix of shape `(n-1, n-1)` or `(rank, n-1)`. omega : array_like Time scaled eigenvalues: `ln(l)/dt`. References ---------- J. H. Tu, et al. "On Dynamic Mode Decomposition: Theory and Applications" (2013). (available at `arXiv <http://arxiv.org/abs/1312.0041>`_). N. B. Erichson and C. Donovan. "Randomized Low-Rank Dynamic Mode Decomposition for Motion Detection" (2015). Under Review. """ # converts A to array, raise ValueError if A has inf or nan A = np.asarray_chkfinite(A) m, n = A.shape if modes not in _VALID_MODES: raise ValueError('modes must be one of %s, not %s' % (' '.join(_VALID_MODES), modes)) if A.dtype not in _VALID_DTYPES: raise ValueError('A.dtype must be one of %s, not %s' % (' '.join(_VALID_DTYPES), A.dtype)) if rank is not None and (rank < 1 or rank > n): raise ValueError('rank must be > 1 and less than n') #Split data into lef and right snapshot sequence X = A[:, :(n - 1)] #pointer Y = A[:, 1:n] #pointer #Singular Value Decomposition U, s, Vh = linalg.svd(X, compute_uv=True, full_matrices=False, overwrite_a=False, check_finite=True) if rank is not None: U = U[:, :rank] s = s[:rank] Vh = Vh[:rank, :] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #Solve the LS problem to find estimate for M using the pseudo-inverse #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #real: M = U.T * Y * Vt.T * S**-1 #complex: M = U.H * Y * Vt.H * S**-1 #Let G = Y * Vt.H * S**-1, hence M = M * G G = np.dot(Y, conjugate_transpose(Vh)) / s M = np.dot(conjugate_transpose(U), G) #Eigen Decomposition l, W = linalg.eig(M, right=True, overwrite_a=True) omega = np.log(l) / dt if order: # return ordered result sort_idx = np.argsort(np.abs(omega)) W = W[:, sort_idx] l = l[sort_idx] omega = omega[sort_idx] #Compute DMD Modes if modes == 'standard': F = np.dot(U, W) else: F = np.dot(G, W) if modes == 'exact_scaled': F /= l result = [F] if return_amplitudes: #Compute amplitueds b using least-squares: Fb=x1 b = _get_amplitudes(F, A) result.append(b) if return_vandermonde: #Compute Vandermonde matrix V = np.fliplr(np.vander(l, N=n)) result.append(V) result.append(omega) return result
def __init__(self, x, y, z, dim, file, percentage_max_n=0.1, typ="", motion=""): """ :param x: list, x coordinates :param y: list, y coordinates :param y: list, z coordinates :param dim: int, dimension :param file: str, path to trajectory :param percentage_max_n: float, percentage of length of the trajectory for msd generating :param typ: str, type of diffusion i.e sub, super, rand :param motion: str, mode of diffusion eg. normal, directed """ CharacteristicFour.__init__(self, x, y, z, dim, file, percentage_max_n, typ, motion) self.velocity_autocorrelation, self.velocity_autocorrelation_names = self.get_velocity_autocorrelation( [1, 2]) self.ksstat = self.get_ksstat() self.dagostino_stats, self.dagostino_stats_names = self.get_dagostino_stats( ) self.mv_features, self.mv_features_names = self.moving_window( windows=[10, 20]) self.eM, self.eL, self.eJ = self.get_exponents() self.maximum_ts = self.get_maximum_test_statistic() self.radius_gyration_tensor = self.get_tensor() self.trappedness = self.get_trappedness() self.eigenvalues, self.eigenvectors = LA.eig( self.radius_gyration_tensor) self.asymmetry = self.get_asymmetry() self.diff_kurtosis = self.get_kurtosis_corrected() self.efficiency = self.get_efficiency() self.max_std_x, self.max_std_y = self.max_min_std() self.max_std_change_x, self.max_std_change_y = self.max_std_change() self.velocity_autocorrelation, self.velocity_autocorrelation_names = self.get_velocity_autocorrelation( [1, 2]) self.dma, self.dma_names = self.get_dma([1, 2]) self.values = [self.file, self.type, self.motion, self.D, self.alpha, self.alpha_n_1, self.alpha_n_2, self.alpha_n_3, self.fractal_dimension, self.mean_gaussianity, self.mean_squared_displacement_ratio, self.straightness, self.p_variation, self.max_excursion_normalised, self.ksstat, self.eM, self.eL, self.eJ, self.maximum_ts, self.trappedness, self.asymmetry, self.diff_kurtosis, self.efficiency, self.max_std_x, self.max_std_y, self.max_std_change_x, self.max_std_change_y] \ + list(self.velocity_autocorrelation) \ + list(self.p_variations) + self.dagostino_stats + self.mv_features + list(self.dma) self.columns = ["file", "Alpha", "motion", "D", "alpha", "alpha_n_1", "alpha_n_2", "alpha_n_3", "fractal_dimension", "mean_gaussianity", "mean_squared_displacement_ratio", "straightness", "p-variation", "max_excursion_normalised", "ksstat_chi2", "M", "L", "J", "max_ts", "trappedness", 'asymmetry', "diff_kurtosis", "efficiency", 'max_std_x', 'max_std_y', 'max_std_change_x', 'max_std_change_y'] + self.velocity_autocorrelation_names \ + self.p_variation_names + self.dagostino_stats_names + self.mv_features_names \ + list(self.dma_names) self.data = pd.DataFrame([self.values], columns=self.columns)
def compute_cca(views, k=300, eps=1e-12): """ views: list of views, each N x v_i_emb where N is the number of observations and v_i_emb is the embedding dimensionality of that view k: integer for the dimensionality of the joint projection space eps: float added to diagonals of matrices A and B for stability """ m = views[0].size(0) t = views[0].type() o = [v.size(1) for v in views] os = sum(o) A = torch.zeros(os, os).type(t) B = torch.zeros(os, os).type(t) print('doing generalised eigendecomposition...') row_i = 0 for i, V_i in enumerate(views): V_i = V_i.t() o_i = V_i.size(0) mu_i = V_i.mean(dim=1, keepdim=True) # mean center view i V_i_bar = V_i - mu_i.expand_as(V_i) # o_i x N col_i = 0 for j, V_j in enumerate(views): V_j = V_j.t() o_j = V_j.size(0) if i > j: col_i += o_j continue mu_j = V_j.mean(dim=1, keepdim=True) # mean center view j V_j_bar = V_j - mu_j.expand_as(V_j) # o_j x N C_ij = (1.0 / (m - 1)) * torch.mm(V_i_bar, V_j_bar.t()) # o_i x o_j A[row_i:row_i + o_i, col_i:col_i + o_j] = C_ij A[col_i:col_i + o_j, row_i:row_i + o_i] = C_ij.t() if i == j: B[row_i:row_i + o_i, col_i:col_i + o_j] = C_ij.clone() col_i += o_j row_i += o_i diagonal(A).add_(eps) diagonal(B).add_(eps) A = A.cpu().numpy() B = B.cpu().numpy() l, v = la.eig(A, B) idx = l.argsort()[-k:][::-1] l = l[idx] # eigenvalues v = v[:, idx] # eigenvectors l = torch.from_numpy(l.real) v = torch.from_numpy(v.real) # extracting projection matrices proj_matrices = [ v[sum(o[:i]):sum(o[:i]) + views[i].size(1)].type(t) for i in range(len(views)) ] return l.type(t), proj_matrices
def compute_eigvals(self): self.eigvals = [] for i in range(len(self.J)): w, _ = eig(self.J[i]) self.eigvals.append(w)
ans = integral(this_func, 0, 1) print("integral:", ans) # scipy integrate!! value, error = integrate.quad(this_func, 0, 1) print("scipy integrate value", value) print("scipy integrate error", error) # scipy linear algebra A = np.random.rand(3, 3) b = np.random.rand(3) x = linalg.solve(A, b) # solve A x = b print(x) eigen = linalg.eig(A) # eigens print(eigen) det = linalg.det(A) # determinant print(det) U, s, Vh = linalg.svd(A) # singular value decomposition # statistics in scipy y = stats.norm.cdf(0.311) # norm CDF print(y) # data fitting def func(x, a, b, c): return a * np.exp(-b * x) + c
Xt.append(xt) Yt.append(y) # labelled source Xs = np.row_stack(Xs) Xs = Xs / Xs.std(0) # scale Ys = np.concatenate(Ys) # unlabelled target Xt = np.row_stack(Xt) Xt = Xt / Xt.std(0) # scale Yt = np.concatenate(Yt) # PCA Zd = 2 # dimensionality of the subspace # weights _, w = eig(np.cov(np.row_stack([Xs[0:-1:2], Xt[0:-1:2]]).T)) # train with even # project pcaZs = Xs @ w[:, :Zd] pcaZt = Xt @ w[:, :Zd] # -- plot # train (even idxs) plt.scatter(pcaZs[0:-1:2, 0], pcaZs[0:-1:2, 1], s=4, c=np.take(clr, Ys[0:-1:2] - 1), label='$D_s$') plt.scatter(pcaZt[0:-1:2, 0], pcaZt[0:-1:2, 1], s=8, ec=np.take(clr, Yt[0:-1:2] - 1),