def test_diag(self): a = (100*get_mat(5)).astype('f') b = a.copy() for k in range(5): for l in range(max((k-1,0)),5): b[l,k] = 0 assert_equal(triu(a,k=2),b) b = a.copy() for k in range(5): for l in range(k+3,5): b[l,k] = 0 assert_equal(triu(a,k=-2),b)
def test_diag(self): a = (100 * get_mat(5)).astype('f') b = a.copy() for k in range(5): for l in range(max((k - 1, 0)), 5): b[l, k] = 0 assert_equal(triu(a, k=2), b) b = a.copy() for k in range(5): for l in range(k + 3, 5): b[l, k] = 0 assert_equal(triu(a, k=-2), b)
def iterative(): N, h, neighs, atoms, nc, nv, vl = pre.run2() del atoms, nc, vl lp = pro.laplace(N, h, neighs) del neighs A = lp.tolil() del lp B = np.zeros(N) for i in range(N): A[0, i] = 0.0 B[i] = -4 * np.pi * nv[i] del nv A[0, 0] = 1.0 A = A.toarray() B[0] = 0.0 Au = la.triu(A) Al = la.tril(A) Ad = np.diag(np.diag(A)) ## for i in range(N): ## for j in range(N): ## if A[i][j] != Au[i][j]+Al[i][j]-Ad[i][j]: ## print(i, j) x = np.zeros(N) T = 1000 RJ = np.dot(la.inv(Ad), A) + np.identity(N) RGS = -np.dot(la.inv(Al - Ad), Au) val, vec = la.eig(RGS) maxx = 0.0 for i in range(len(val)): if abs(val[i]) > maxx: maxx = abs(val[i]) print(maxx)
def test_basic(self): a = (100*get_mat(5)).astype('l') b = a.copy() for k in range(5): for l in range(k+1,5): b[l,k] = 0 assert_equal(triu(a),b)
def test_basic(self): a = (100 * get_mat(5)).astype('l') b = a.copy() for k in range(5): for l in range(k + 1, 5): b[l, k] = 0 assert_equal(triu(a), b)
def make_edges(n): A = la.triu(np.random.randint(1,50,(n,n))*(np.random.rand(n,n)>.5)) S = [] for index, x in np.ndenumerate(A): if x != 0: S.append((str(index[0]), str(index[1]), x)) return S
def Problem1(n): """Use linalg.toeplitz() and linalg.triu() to generate matrices of arbitrary size""" from scipy.linalg import triu ut = triu([[0] * i + [x for x in range(1, (n + 1) - i)] for i in range(n)]) toep = la.toeplitz([1.0 / (i + 1) for i in range(n)]) return ut, toep
def yty2(): invT = S #log("old invT = " + str(invT)) if j == p_eff - 1: invT[:p_eff, :p_eff] = triu(X2[:p_eff, :m].dot(np.conj(X2)[:p_eff, :m].T)) #log("invT = " + str(invT)) for jj in range(p_eff): invT[jj,jj] = (invT[jj,jj] - 1.)/2. #log("invT = {}".format(invT)) return invT
def Problem1(n): """Use linalg.toeplitz() and linalg.triu() to generate matrices of arbitrary size""" from scipy.linalg import triu ut = triu([[0]*i+[x for x in range(1,(n+1)-i)] for i in range(n)]) toep = la.toeplitz([1.0/(i+1) for i in range(n)]) return ut, toep
def yty2(): invT = S #log("old invT = " + str(invT)) if j == p_eff - 1: invT[:p_eff, :p_eff] = triu(X2[:p_eff, :m].dot( np.conj(X2)[:p_eff, :m].T)) #log("invT = " + str(invT)) for jj in range(p_eff): invT[jj, jj] = (invT[jj, jj] - 1.) / 2. #log("invT = {}".format(invT)) return invT
def test_get_chol_decomp(self): """ Return L, """ n = 10 for sparsity in [False, True]: # # Cycle through sparsity # # Generate random SPD matrix A = test_matrix(n, sparsity) K = SPDMatrix(A) # Compute the Cholesky decomposition K.chol_decomp() # Check that the decomposition reproduces the matrix if K.chol_type()=='full': # Get Cholesky factor L, D, P, D0 = K.get_chol_decomp() if not np.allclose(D,D0): # Indefinite matrix - change to modified matrix A = L.dot(D.dot(L.T)) # Check reconstruction self.assertTrue(np.allclose(L.dot(D.dot(L.T)),A)) # Check that P*L is lower triangular with ones on diagonal self.assertTrue(np.allclose(1, np.diagonal(P.dot(L)))) self.assertTrue(np.allclose(0, linalg.triu(P.dot(L),1))) elif K.chol_type()=='sparse': # Get Cholesky factor L = K.get_chol_decomp() P = L.P() LL = L.L() # Build permutation matrix I = sp.diags([1],0, shape=(n,n), format='csc') PP = I[P,:] # Compute P'L LL = PP.T.dot(LL) # Check reconstruction LL' = PAP' self.assertTrue(np.allclose(LL.dot(LL.T).toarray(), A.toarray()))
def LU(A): (m,n) = A.shape U = deepcopy(A) P = np.eye(m) L = np.eye(m) for i in range(m): # do the pivot max_j = i + np.argmax(np.abs(U[i:,i])) # make the pivot matrix Pi = np.eye(m) Pi[i,i] = 0 Pi[max_j,max_j] = 0 Pi[i,max_j] = 1 Pi[max_j,i] = 1 # do the pivot U = Pi @ U # add the pivot to P P = P @ Pi # add the pivot to L L= Pi @ L @ Pi # make the eliminator Linv Li = np.eye(m) Li[(i+1):m,i] = -U[(i+1):m,i]/U[i,i] # make the inverse of the eliminator Linv = np.eye(m) Linv[(i+1):m,i] = U[(i+1):m,i]/U[i,i] # do the elimination (we already did the pivot) U = Li @ U # add the inverse-eliminator to L L = L @ Linv L = linalg.tril(L) U = linalg.triu(U) return (P,L,U)
def spd(A): """ spd(A) -> True if A is a symmetric positive definite matrix constructs an upper triangular matrix from A and then checks that each diagonal element is greater than zero (this comes from the test that a matrix is SPD iff all principal leading minors are nonzero, and that these determinants will carry through to the diagonalization. alternative test is testing eigs(A) > 0 but this is much faster as it doesn't involve root-finding """ # test symmetric first, as it's easier if not symmetric(A): return False t = linalg.triu(A) return (np.diag(t) > 0).all()
def kruskal(A): size=A.shape minSpanTree=sp.zeros(size) nodesTree=sp.arange(size[0]) D=sp.ones(size)*sp.arange(size[0]) C=la.triu(A) Q=sp.concatenate([[C.flatten()],[D.T.flatten()],[D.flatten()]]) W=Q[:,sp.nonzero(C.flatten())[0]] edges=W[:,W[0,:].argsort()] i=0 j=0 #while np.sum(nodesTree!=nodesTree[0])>0: while (j<(size[0]-1)): now=edges[:,i] i=i+1 if nodesTree[now[1]]!=nodesTree[now[2]]: minSpanTree[now[1],now[2]]=now[0] nodesTree[nodesTree==nodesTree[now[2]]]=nodesTree[now[1]] j=j+1 return minSpanTree+minSpanTree.T
def GaussSeidel(A, b, tolerance=1.e-10, MaxSteps=100): """Solve the linear system A x = b using the Gauss-Seidel method, starting from the trivial initial guess.""" x = np.zeros_like(b) Anorm = A.copy() bnorm = b.copy() n = len(b) for i in range(n): bnorm[i] /= A[i, i] Anorm[i, :] /= A[i, i] # Compute the split D = np.eye(n) AL = la.tril(D - Anorm) AU = la.triu(D - Anorm) N = np.eye(n) - AL P = AU # Compute the convergence matrix and check its spectral radius M = np.dot(la.inv(N), P) eigenvalues, eigenvectors = la.eig(M) rho = np.amax(np.absolute(eigenvalues)) if (rho > 1): print("Gauss-Seidel will not converge as the"\ " largest eigenvalue of the convergence matrix is {}".format(rho)) for j in range(MaxSteps): x_old = x.copy() for i in range(n): x[i] = bnorm[i] + np.dot(AL[i, :], x) + np.dot(AU[i, :], x_old) if (la.norm(x - x_old) < tolerance): print("Gauss-Seidel converged in {} iterations.".format(j)) break return x
def GaussSeidel(A, b, tolerance = 1.e-10, MaxSteps = 100): """Solve the linear system A x = b using the Gauss-Seidel method, starting from the trivial initial guess.""" x = np.zeros_like(b) Anorm = A.copy() bnorm = b.copy() n = len(b) for i in range(n): bnorm[i] /= A[i, i] Anorm[i, :] /= A[i, i] # Compute the split D = np.eye(n) AL = la.tril(D - Anorm) AU = la.triu(D - Anorm) N = np.eye(n) - AL P = AU # Compute the convergence matrix and check its spectral radius M = np.dot(la.inv(N), P) eigenvalues, eigenvectors = la.eig(M) rho = np.amax(np.absolute(eigenvalues)) if (rho > 1): print("Gauss-Seidel will not converge as the"\ " largest eigenvalue of the convergence matrix is {}".format(rho)) for j in range(MaxSteps): x_old = x.copy() for i in range(n): x[i] = bnorm[i] + np.dot(AL[i, :], x) + np.dot(AU[i, :], x_old) if (la.norm(x - x_old) < tolerance): print("Gauss-Seidel converged in {} iterations.".format(j)) break return x
def KMeansSigma(data): N, D = data.shape[0], data.shape[1] mean = np.mean(data, axis=0) gap = data - mean S = np.dot(gap.T, gap) / (N - 1) Sbar = np.dot(gap.T, gap) / N Vars = np.zeros((D, D)) for i in range(N): xi = gap[i].reshape(-1, 1) Vars += (np.dot(xi, xi.T) - Sbar)**2 VarS = (N / ((N - 1)**3)) * Vars I = sl.triu(np.ones((D, D))) for i in range(D): I[i, i] = 0 I = I == 1 Lambda = np.sum(VarS[I]) / np.sum(S[I]**2) Lambda = np.min([1, Lambda]) Lambda = np.max([0, Lambda]) C = Lambda * np.diag(np.diag(S)) + (1-Lambda)*S return C
def Jacobi(A, b, tolerance = 1.e-10, MaxSteps = 100): """Solve the linear system A x = b using Jacobi's method, starting from the trivial initial guess.""" x = np.zeros_like(b) Anorm = A.copy() bnorm = b.copy() n = len(b) for i in range(n): bnorm[i] /= A[i, i] Anorm[i, :] /= A[i, i] # Compute the split N = np.eye(n) P = N - Anorm AL = la.tril(P) AU = la.triu(P) # Compute the convergence matrix and check its spectral radius M = np.dot(la.inv(N), P) eigenvalues, eigenvectors = la.eig(M) rho = np.amax(np.absolute(eigenvalues)) if (rho > 1): print("Jacobi will not converge as the"\ " largest eigenvalue of the convergence matrix is {}".format(rho)) for j in range(MaxSteps): x_old = x.copy() x = bnorm + np.dot(AL + AU, x) if (la.norm(x - x_old) < tolerance): print "Jacobi converged in ", j, " iterations." break return x
def Jacobi(A, b, tolerance=1.e-10, MaxSteps=100): """Solve the linear system A x = b using Jacobi's method, starting from the trivial initial guess.""" x = np.zeros_like(b) Anorm = A.copy() bnorm = b.copy() n = len(b) for i in range(n): bnorm[i] /= A[i, i] Anorm[i, :] /= A[i, i] # Compute the split N = np.eye(n) P = N - Anorm AL = la.tril(P) AU = la.triu(P) # Compute the convergence matrix and check its spectral radius M = np.dot(la.inv(N), P) eigenvalues, eigenvectors = la.eig(M) rho = np.amax(np.absolute(eigenvalues)) if (rho > 1): print("Jacobi will not converge as the"\ " largest eigenvalue of the convergence matrix is {}".format(rho)) for j in range(MaxSteps): x_old = x.copy() x = bnorm + np.dot(AL + AU, x) if (la.norm(x - x_old) < tolerance): print "Jacobi converged in ", j, " iterations." break return x
def get_stat_from_dynamics(singlecdt, tmin=None, tmax=None): """Computes stationary autocorrelation vector from autocorr matrix. This function uses the autocorrelation matrix, that stores two-point autocorrelation functions between the various acquisition time-points, to compute the autocorrelation function in case of stationary hypothesis. Computation is fast but result is mostly unreliable (depends very much on the accuracy of the dynamical autocorrelation estimates, which is usually quite low). Use set_stationary_autocorrelation instead. Parameters ---------- singlecdt : UnivariateConditioned instance tmin : float (default None) tmax : float (default None) Returns ------- dts, cts, res dts : array of floats time intervals cts : array of ints sample counts res : array of floats autocorrelation values Note ---- The estimate of the autocorrelation function using this procedure gives very poor accuracy estimates, and should be used only for quick inspection when a Univariate has been created and computed. For a better autocorrelation function estimate, it is necessary to parse samples another time, using only the sample average estimated in Univariate conditioned instances. """ times = singlecdt.time autocorr = singlecdt.autocorr cts = singlecdt.count_two # Resize matrices depending on time limits indexlow, indexup = 0, None if tmin is not None: while indexlow < len(times) and times[indexlow] < tmin: indexlow += 1 if tmax is not None: indexup = indexlow while indexup < len(times) and times[indexup] < tmax: indexup += 1 sl = slice(indexlow, indexup) times = times[sl] autocorr = autocorr[sl, sl] cts = cts[sl, sl] # how many time-points nframes = len(times) all_counts = np.zeros(nframes, dtype=np.int) res = np.zeros(nframes, dtype=np.float) dts = np.zeros(nframes, dtype=np.float) col = np.zeros(nframes, dtype=np.int16) col[-1] = 1 # initialisation for k in range(nframes): col[k] = 1 col[k - 1] -= 1 forward = triu(toeplitz(col)) all_counts[k] = np.sum(forward * cts) res[k] = np.sum(forward * cts * autocorr) / all_counts[k] dts[k] = times[k] - times[0] return dts, all_counts, res
def toepOne(n): return la.triu(la.toeplitz(sp.arange(1,n+1,1)))
# (1): tril # --- # Make a copy of a matrix with elements above the k-th diagonal zeroed. # k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal. print(linalg.tril(kron)) print(linalg.tril(kron, k=-1)) print(linalg.tril(kron, k=1)) # (2): triu # --- # Make a copy of a matrix with elements above the k-th diagonal zeroed. # k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal. print(linalg.triu(kron)) print(linalg.triu(kron, k=-1)) print(linalg.triu(kron, k=1)) ################################################################################################### ### =================== ### EIGENVALUE PROBLEMS ### =================== # (1): eig # --- # Solve an ordinary or generalized eigenvalue problem of a square matrix.
def pseudoSpect(A, npts=200, s=2., gridPointSelect=100, verbose=True, lstSqSolve=True): """ original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m % psa.m - Simple code for 2-norm pseudospectra of given matrix A. % Typically about N/4 times faster than the obvious SVD method. % Comes with no guarantees! - L. N. Trefethen, March 1999. parameter: A: the matrix to analyze npts: number of points at the grid s: axis limits (-s ... +s) gridPointSelect: ??? verbose: prints progress messages lstSqSolve: if true, use least squares in algorithm where solve could be used (probably) instead. (replacement for ldivide in MatLab) """ from scipy.linalg import schur, triu from pylab import (meshgrid, norm, dot, zeros, eye, diag, find, linspace, arange, isreal, inf, ones, lstsq, solve, sqrt, randn, eig, all) ldiv = lambda M1, M2: lstsq(M1, M2)[ 0] if lstSqSolve else lambda M1, M2: solve(M1, M2) def planerot(x): ''' return (G,y) with a matrix G such that y = G*x with y[1] = 0 ''' G = zeros((2, 2)) xn = x / norm(x) G[0, 0] = xn[0] G[1, 0] = -xn[1] G[0, 1] = xn[1] G[1, 1] = xn[0] return G, dot(G, x) xmin = -s xmax = s ymin = -s ymax = s x = linspace(xmin, xmax, npts, endpoint=False) y = linspace(ymin, ymax, npts, endpoint=False) xx, yy = meshgrid(x, y) zz = xx + 1j * yy #% Compute Schur form and plot eigenvalues: T, Z = schur(A, output='complex') T = triu(T) eigA = diag(T) # Reorder Schur decomposition and compress to interesting subspace: select = find(eigA.real > -250) # % <- ALTER SUBSPACE SELECTION n = len(select) for i in arange(n): for k in arange(select[i] - 1, i, -1): #:-1:i G = planerot([T[k, k + 1], T[k, k] - T[k + 1, k + 1]])[0].T[::-1, ::-1] J = slice(k, k + 2) T[:, J] = dot(T[:, J], G) T[J, :] = dot(G.T, T[J, :]) T = triu(T[:n, :n]) I = eye(n) # Compute resolvent norms by inverse Lanczos iteration and plot contours: sigmin = inf * ones((len(y), len(x))) #A = eye(5) niter = 0 for i in arange(len(y)): # 1:length(y) if all(isreal(A)) and (ymax == -ymin) and (i > len(y) / 2): sigmin[i, :] = sigmin[len(y) - i, :] else: for jj in arange(len(x)): z = zz[i, jj] T1 = z * I - T T2 = T1.conj().T if z.real < gridPointSelect: # <- ALTER GRID POINT SELECTION sigold = 0 qold = zeros((n, 1)) beta = 0 H = zeros((100, 100)) q = randn(n, 1) + 1j * randn(n, 1) while norm(q) < 1e-8: q = randn(n, 1) + 1j * randn(n, 1) q = q / norm(q) for k in arange(99): v = ldiv(T1, (ldiv(T2, q))) - dot(beta, qold) #stop alpha = dot(q.conj().T, v).real v = v - alpha * q beta = norm(v) qold = q q = v / beta H[k + 1, k] = beta H[k, k + 1] = beta H[k, k] = alpha if (alpha > 1e100): sig = alpha else: sig = max(abs(eig(H[:k + 1, :k + 1])[0])) if (abs(sigold / sig - 1) < .001) or (sig < 3 and k > 2): break sigold = sig niter += 1 #print 'niter = ', niter #%text(x(jj),y(i),num2str(k)) % <- SHOW ITERATION COUNTS sigmin[i, jj] = 1. / sqrt(sig) #end # end if verbose: print 'finished line ', str(i), ' out of ', str(len(y)) return x, y, sigmin
def pseudoSpect(A, npts=200, s=2., gridPointSelect=100, verbose=True, lstSqSolve=True): """ original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m % psa.m - Simple code for 2-norm pseudospectra of given matrix A. % Typically about N/4 times faster than the obvious SVD method. % Comes with no guarantees! - L. N. Trefethen, March 1999. parameter: A: the matrix to analyze npts: number of points at the grid s: axis limits (-s ... +s) gridPointSelect: ??? verbose: prints progress messages lstSqSolve: if true, use least squares in algorithm where solve could be used (probably) instead. (replacement for ldivide in MatLab) """ from scipy.linalg import schur, triu from pylab import (meshgrid, norm, dot, zeros, eye, diag, find, linspace, arange, isreal, inf, ones, lstsq, solve, sqrt, randn, eig, all) ldiv = lambda M1,M2 :lstsq(M1,M2)[0] if lstSqSolve else lambda M1,M2: solve(M1,M2) def planerot(x): ''' return (G,y) with a matrix G such that y = G*x with y[1] = 0 ''' G = zeros((2,2)) xn = x / norm(x) G[0,0] = xn[0] G[1,0] = -xn[1] G[0,1] = xn[1] G[1,1] = xn[0] return G, dot(G,x) xmin = -s xmax = s ymin = -s ymax = s; x = linspace(xmin,xmax,npts,endpoint=False) y = linspace(ymin,ymax,npts,endpoint=False) xx,yy = meshgrid(x,y) zz = xx + 1j*yy #% Compute Schur form and plot eigenvalues: T,Z = schur(A,output='complex'); T = triu(T) eigA = diag(T) # Reorder Schur decomposition and compress to interesting subspace: select = find( eigA.real > -250) # % <- ALTER SUBSPACE SELECTION n = len(select) for i in arange(n): for k in arange(select[i]-1,i,-1): #:-1:i G = planerot([T[k,k+1],T[k,k]-T[k+1,k+1]] )[0].T[::-1,::-1] J = slice(k,k+2) T[:,J] = dot(T[:,J],G) T[J,:] = dot(G.T,T[J,:]) T = triu(T[:n,:n]) I = eye(n); # Compute resolvent norms by inverse Lanczos iteration and plot contours: sigmin = inf*ones((len(y),len(x))); #A = eye(5) niter = 0 for i in arange(len(y)): # 1:length(y) if all(isreal(A)) and (ymax == -ymin) and (i > len(y)/2): sigmin[i,:] = sigmin[len(y) - i,:] else: for jj in arange(len(x)): z = zz[i,jj] T1 = z * I - T T2 = T1.conj().T if z.real < gridPointSelect: # <- ALTER GRID POINT SELECTION sigold = 0 qold = zeros((n,1)) beta = 0 H = zeros((100,100)) q = randn(n,1) + 1j*randn(n,1) while norm(q) < 1e-8: q = randn(n,1) + 1j*randn(n,1) q = q/norm(q) for k in arange(99): v = ldiv(T1,(ldiv(T2,q))) - dot(beta,qold) #stop alpha = dot(q.conj().T, v).real v = v - alpha*q beta = norm(v) qold = q q = v/beta H[k+1,k] = beta H[k,k+1] = beta H[k,k] = alpha if (alpha > 1e100): sig = alpha else: sig = max(abs(eig(H[:k+1,:k+1])[0])) if (abs(sigold/sig-1) < .001) or (sig < 3 and k > 2): break sigold = sig niter += 1 #print 'niter = ', niter #%text(x(jj),y(i),num2str(k)) % <- SHOW ITERATION COUNTS sigmin[i,jj] = 1./sqrt(sig); #end # end if verbose: print 'finished line ', str(i), ' out of ', str(len(y)) return x,y,sigmin
def transform_SDRs_to_Vs( sdrs, t_limits, diffusion_constant = 3e-12, adsorption_constant = 6e-9, desorption_constant = 1e-4, nrof_rebinding_events_truncate = 20, provide_varphi = False ): # Enforce input shape (vertical vectors for time component) t_limits = np.reshape( t_limits, (t_limits.size, 1) ) # Extract implicit inputs nrof_cells, nrof_time_points = sdrs.shape t_centers = 0.5*(t_limits[:-1] + t_limits[1:]) ## Compute discretization of $\phi(\tau)$ # Compute discretization of $\phi(\tau)$, analytical part phi_ini = ( 2*adsorption_constant*(np.sqrt( t_limits[1:] ) - np.sqrt( t_limits[:-1] ))/ np.sqrt( np.pi*diffusion_constant ) ) # Compute discretization of $\phi(\tau)$, numerical part erfcx_term = lambda tau: (adsorption_constant**2 / diffusion_constant) * ( special.erfcx( adsorption_constant * np.sqrt( tau / diffusion_constant ) )) for idx, _ in enumerate( phi_ini ): phi_ini[idx] -= integrate.quad( erfcx_term, t_limits[idx], t_limits[idx+1] )[0] ## Prepare for recursive computation of v (and optionally, \varphi) # Invert time in SDRs to prepare approximation of temporal integral sdrs = np.fliplr( sdrs ); # Initialize output variables (obtained by accumulation of terms) # Output variable, containing all the $v_c(\tau,T)$s vs = np.zeros( (nrof_cells, nrof_time_points) ) # Intermediate variable (not needed), $\varphi(\tau,t)$ if provide_varphi: varphi = np.zeros( nrof_time_points, nrof_time_points ) # Helping function for Poisson PMFs poisson_pmf = lambda val, lam: np.exp( -lam ) * lam**val / special.factorial( val ) ## Recursively approximate v for rebind in range( nrof_rebinding_events_truncate ): if rebind == 0: # For a single rebind, load discretized $\phi(\tau)$ phi_rebind = phi_ini else: # For the subsequent rebinds, perform discretized convolution to approximate $\phi^j(\tau)$ phi_rebind = np.expand_dims( np.convolve( phi_rebind[:,0], phi_ini[:,0] ) , 1 ) # Clip for only the smallest times in free motion (indicator in formulas). # Note that doing this does not interfere with the computation of the 0:nrof_time_points section of higher # convolutional powers, and avoids unnecessary computations phi_rebind = phi_rebind[:nrof_time_points] # Array representing the different values taken by the Poisson distribution # expression when one changes \tau and \eta, dim 0 is \tau and dim 1 is \eta poisson_with_indicator_rebind = linalg.triu( linalg.toeplitz( poisson_pmf( rebind, desorption_constant * np.reshape( t_centers, (1, t_centers.size) ) ) ) ) # Update on $\varphi(\tau,t)$ if provide_varphi: varphi += phi_rebind * poisson_with_indicator_rebind # Update on each of the $v_c(\tau,T)$ vs += ( np.sum( np.expand_dims( sdrs, 2 ) * np.expand_dims( poisson_with_indicator_rebind.swapaxes( 0, 1 ), 0 ) , 1, keepdims = True ) * np.expand_dims( phi_rebind.swapaxes( 0, 1 ), 0 ) ).swapaxes( 1, 2 ).squeeze( axis = 2 ) if provide_varphi: return ( vs, varphi ) return vs
import numpy as np import scipy.linalg as sl A = np.arange(16).reshape(4, 4) print('A: \n', A) # k = 0 时,保留对角线元素,对角线上方的所有元素格式化为0 print('tril(A, 0): \n', sl.tril(A)) # k = -1 时,包括对角线元素都被格式化为0 print('tril(A, -1): \n', sl.tril(A, -1)) # k = 1 时,边界上移到对角线上方的一格 print('tril(A, 1): \n', sl.tril(A, 1)) # triu 与 tril相反 # k = 0 时,保留对角线元素,对角线下方的所有元素格式化为0 print('triu(A, 0): \n', sl.triu(A)) # k = 1 时,包括对角线元素都被格式化为0 print('triu(A, 1): \n', sl.triu(A, 1)) # k = -1 时,边界下移到对角线下方的一格 print('triu(A, -1): \n', sl.triu(A, -1)) # 不管是tril还是triu,k的正值永远是把边界往上移,为负时往下移
x = np.linspace(0.0, 1.0, np1) y = np.linspace(0.0, 1.0, np1) F = np.zeros((nm1, nm1)) for i in range(nm1): F[0, i] += left(y[i + 1]) / h2 F[nm2, i] += right(y[i + 1]) / h2 F[i, 0] += bottom(x[i + 1]) / h2 F[i, nm2] += top(x[i + 1]) / h2 for j in range(nm2): F[i, j] += f(x[i + 1], y[i + 1]) F *= h2 D = np.ones((nm1, nm1)) D = 5 * np.eye(nm1) - sl.triu(sl.tril(D, 1), -1) start = t.time() U = sweep(nm1, D, F).transpose() end = t.time() print('time = ', end - start, sep='') def ua(x, y): return x * x * y + y * y * x Ua = np.zeros((np1, np1)) for i in range(np1): for j in range(np1): Ua[i, j] = ua(x[i], y[j])
def get_stat_from_dynamics(singlecdt, tmin=None, tmax=None): """Computes stationary autocorrelation vector from autocorr matrix. This function uses the autocorrelation matrix, that stores two-point autocorrelation functions between the various acquisition time-points, to compute the autocorrelation function in case of stationary hypothesis. Computation is fast but result is mostly unreliable (depends very much on the accuracy of the dynamical autocorrelation estimates, which is usually quite low). Use set_stationary_autocorrelation instead. Parameters ---------- singlecdt : UnivariateConditioned instance tmin : float (default None) tmax : float (default None) Returns ------- dts, cts, res dts : array of floats time intervals cts : array of ints sample counts res : array of floats autocorrelation values Note ---- The estimate of the autocorrelation function using this procedure gives very poor accuracy estimates, and should be used only for quick inspection when a Univariate has been created and computed. For a better autocorrelation function estimate, it is necessary to parse samples another time, using only the sample average estimated in Univariate conditioned instances. """ times = singlecdt.time autocorr = singlecdt.autocorr cts = singlecdt.count_two # Resize matrices depending on time limits indexlow, indexup = 0, None if tmin is not None: while indexlow < len(times) and times[indexlow] < tmin: indexlow += 1 if tmax is not None: indexup = indexlow while indexup < len(times) and times[indexup] < tmax: indexup += 1 sl = slice(indexlow, indexup) times = times[sl] autocorr = autocorr[sl, sl] cts = cts[sl, sl] # how many time-points nframes = len(times) all_counts = np.zeros(nframes, dtype=np.int) res = np.zeros(nframes, dtype=np.float) dts = np.zeros(nframes, dtype=np.float) col = np.zeros(nframes, dtype=np.int16) col[-1] = 1 # initialisation for k in range(nframes): col[k] = 1 col[k-1] -= 1 forward = triu(toeplitz(col)) all_counts[k] = np.sum(forward * cts) res[k] = np.sum(forward * cts * autocorr)/all_counts[k] dts[k] = times[k] - times[0] return dts, all_counts, res
y[these[idx]][int(0.6 * n_samp_dec):])) hierarchy = [] layer_vars = np.split(Data.represent_labels(Data.terminals), 2 * np.cumsum(layers))[:-1] for l in range(len(layer_vars) - 1): y_sup = layer_vars[l].argmax(0) y_sub = layer_vars[l + 1].argmax(0) sigs = [ util.decompose_covariance(z[y_sup == s, :].T, y_sub[y_sup == s])[1] for s in np.unique(y_sup) ] dots = np.einsum('ikl,jkl->ij', np.array(sigs), np.array(sigs)) csim = la.triu(dots, 1) / np.sqrt( (np.diag(dots)[:, None] * np.diag(dots)[None, :])) foo1, foo2 = np.nonzero(np.triu(np.ones(dots.shape), 1)) hierarchy.append(np.mean(csim[foo1, foo2])) ps_samps.append(PS) ccg_samps.append(CCGP) dec_samps.append(decoding) hier_samps.append(hierarchy) ka_samps.append( np.sum(Kz * Ky) / np.sqrt(np.sum(Ky * Ky) * np.sum(Kz * Kz))) all_PS.append(ps_samps) kernel_align.append(ka_samps) all_ccgp.append(ccg_samps)