def testMinrealBrute(self): for n, m, p in permutations(range(1,6), 3): s = matlab.rss(n, p, m) sr = s.minreal() if s.states > sr.states: self.nreductions += 1 else: np.testing.assert_array_almost_equal( np.sort(eigvals(s.A)), np.sort(eigvals(sr.A))) for i in range(m): for j in range(p): ht1 = matlab.tf( matlab.ss(s.A, s.B[:,i], s.C[j,:], s.D[j,i])) ht2 = matlab.tf( matlab.ss(sr.A, sr.B[:,i], sr.C[j,:], sr.D[j,i])) try: self.assert_numden_almost_equal( ht1.num[0][0], ht2.num[0][0], ht1.den[0][0], ht2.den[0][0]) except Exception as e: # for larger systems, the tf minreal's # the original rss, but not the balanced one if n < 6: raise e self.assertEqual(self.nreductions, 2)
def modularity_spectrum(G): """Return eigenvalues of the modularity matrix of G. Parameters ---------- G : Graph A NetworkX Graph or DiGraph Returns ------- evals : NumPy array Eigenvalues See Also -------- modularity_matrix References ---------- .. [1] M. E. J. Newman, "Modularity and community structure in networks", Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006. """ from scipy.linalg import eigvals if G.is_directed(): return eigvals(nx.directed_modularity_matrix(G)) else: return eigvals(nx.modularity_matrix(G))
def format_eig_svd(): def format_cplx(z): if z.imag < 1e-300: return '{0:.4f}'.format(z.real) return '{0:.4f}+{1:.4f}i'.format(z.real, z.imag) eig12 = sp.eigvals(generate_matrix(12)) svd12 = sp.svdvals(generate_matrix(12)) eig25 = sp.eigvals(generate_matrix(25)) svd25 = sp.svdvals(generate_matrix(25)) result12 = r'\begin{tabular}{cc}' + '\n' result12 += r' Eigenvalues&Singular values\\' + '\n' result12 += ' \\hline\n' result25 = copy.copy(result12) for k in range(25): if k < 12: result12 += r' ${0}$&${1:.4f}$\\'.format(format_cplx(eig12[k]), svd12[k]) + '\n' result25 += r' ${0}$&${1:.4f}$\\'.format(format_cplx(eig25[k]), svd25[k]) + '\n' result12 += '\\end{tabular}\n' result25 += '\\end{tabular}\n' print(result12) print(result25)
def test_dare(self): A = matrix([[-0.6, 0],[-0.1, -0.4]]) Q = matrix([[2, 1],[1, 0]]) B = matrix([[2, 1],[0, 1]]) R = matrix([[1, 0],[0, 1]]) X,L,G = dare(A,B,Q,R) # print("The solution obtained is", X) assert_array_almost_equal( A.T * X * A - X - A.T * X * B * solve(B.T * X * B + R, B.T * X * A) + Q, zeros((2,2))) assert_array_almost_equal(solve(B.T * X * B + R, B.T * X * A), G) # check for stable closed loop lam = eigvals(A - B * G) assert_array_less(abs(lam), 1.0) A = matrix([[1, 0],[-1, 1]]) Q = matrix([[0, 1],[1, 1]]) B = matrix([[1],[0]]) R = 2 X,L,G = dare(A,B,Q,R) # print("The solution obtained is", X) assert_array_almost_equal( A.T * X * A - X - A.T * X * B * solve(B.T * X * B + R, B.T * X * A) + Q, zeros((2,2))) assert_array_almost_equal(B.T * X * A / (B.T * X * B + R), G) # check for stable closed loop lam = eigvals(A - B * G) assert_array_less(abs(lam), 1.0)
def get_graph_props(g): travgl = g.transitivity_avglocal_undirected() tru = g.transitivity_undirected() d = g.diameter() asd = g.assortativity_degree() apl = g.average_path_length() omega = g.omega() density = g.density() maxd = g.maxdegree() medd = np.median(g.degree()) plaw = ig.power_law_fit(g.degree()) spnorm = max(np.abs(spl.eigvals(g.get_adjacency().data))) leigs = spl.eigvals(g.laplacian()) algc = abs(sorted([e for e in leigs if e > 1e-10])[1]) return [travgl, # avg. local transitivity tru, # global transitivity d, # diameter asd, # degree assortativity apl, # avg. path length omega, # max. clique density, maxd, # max. degree medd, # median degree plaw.alpha, # power law exponent spnorm, # largest eigenvalue of adj. matrix algc, # 2nd smallest non-zero eigenvalue of laplacian ]
def ps_scatter_plot(A, epsilon=.001, num_pts=20): n = A.shape[0] eigs = np.empty((num_pts+1,n),dtype=complex) for i in xrange(1,num_pts+1): E = np.random.random((n,n)) E *= epsilon/la.norm(E) es = la.eigvals(A+E) eigs[i,:] = es plt.plot(np.real(eigs[i]),np.imag(eigs[i]),'b*') eigs[0] = la.eigvals(A.todense()) plt.plot(np.real(eigs[0]),np.imag(eigs[0]),'r*') plt.show() return eigs
def computeAbsoluteLimitingLinearCoefficient(n,multiplyO,multiplyN,multiplyL,multiplyR): # {{{ if True: # n <= 3: matrix = [] for i in range(n): matrix.append(multiplyO(array([0]*i+[1]+[0]*(n-1-i)))) matrix = array(matrix) evals = eigvals(matrix) lam = evals[argmax(abs(evals))] tmatrix = matrix-lam*identity(n) ovecs = svd(dot(tmatrix,tmatrix))[-1][-2:] assert ovecs.shape == (2,n) else: ovals, ovecs = eigs(LinearOperator((n,n),matvec=multiplyO),k=2,which='LM',ncv=9) ovecs = ovecs.transpose() Omatrix = zeros((2,2),dtype=complex128) for i in range(2): for j in range(2): Omatrix[i,j] = dot(ovecs[i].conj(),multiplyO(ovecs[j])) numerator = sqrt(trace(dot(Omatrix.transpose().conj(),Omatrix))-2) lnvecs = multiplyL(ovecs) rnvecs = multiplyR(ovecs) Nmatrix = zeros((2,2),dtype=complex128) for i in range(2): for j in range(2): Nmatrix[i,j] = dot(lnvecs[i].conj(),multiplyN(rnvecs[j])) denominator = sqrt(trace(dot(Nmatrix.transpose().conj(),Nmatrix))) return numerator/denominator
def bands(self, k): """ Compute the band structure in the first Bloch wavevector k = [-pi/a, pi/a] """ # onsite energies onsite = self.onsite # consider the case where hop_inter and hop_intra are different # the intra-cell hopping term \Sum_i a_i^\dag b_i = \Sum_k a_k^\dag b_k e^{ik(rb - ra)} # the inter-cell hopping term \Sum_j b_j^\dag a_{j+1} = \Sum_k b_k^\dag a_k e^{ik(ra - rb)} hop_intra = self.hop_intra hop_inter = self.hop_inter Norbs = self.Norbs r = self.r a = self.a # constuct the H_k matrix, that is the Hamiltonian expressed in terms of spacial Fourier space # H = (a_k, b_k)^\dag H(k) (a_k, b_k)^T H = np.zeros((Norbs, Norbs), dtype=np.complex128) # onsite energy for i in range(Norbs): H[i,i] = onsite[i] # hopping parameters for i in range(Norbs): for j in range(i+1, Norbs): H[i,j] = hop_intra[i,j] * np.exp(-1j * k * (r[i] - r[j])) + \ hop_inter[i,j] * np.exp(-1j * k *(a + r[i] - r[j])) H[j,i] = np.conj(H[i,j]) eigvals = linalg.eigvals(H) return eigvals
def _default_response_times(A, n): """Compute a reasonable set of time samples for the response time. This function is used by `impulse`, `impulse2`, `step` and `step2` to compute the response time when the `T` argument to the function is None. Parameters ---------- A : ndarray The system matrix, which is square. n : int The number of time samples to generate. Returns ------- t : ndarray The 1-D array of length `n` of time samples at which the response is to be computed. """ # Create a reasonable time interval. # TODO: This could use some more work. # For example, what is expected when the system is unstable? vals = linalg.eigvals(A) r = min(abs(real(vals))) if r == 0.0: r = 1.0 tc = 1.0 / r t = linspace(0.0, 7 * tc, n) return t
def Energy_condensate_full(Q, F1, x, y, H, mu, kappa, Ns) : if Q==0 and F1 ==0 : return 1e14 m = find_minimum(Q, F1, mu, kappa, Ns) if m[0] < H/2 : return 1e14 result = 0 for n1 in range(Ns) : for n2 in range(Ns) : M, dim = HamiltonianMatrix(n1, n2, Q, F1, 0, H, mu, kappa, Ns, 'T1') B = np.identity(dim) B[dim/2:dim, dim/2:dim] = -np.identity(dim/2) eig = np.absolute(np.real(lin.eigvals(np.dot(B,M)))) result += sum(eig)/2 vec = [x[Ns * n1 + n2], np.conjugate(y[Ns * ((Ns - n1) % Ns) + (Ns - n2) % Ns])] result += np.dot(vec, np.dot(np.conj(vec).T, M)) return result - 3 * Ns ** 2 * (np.abs(F1)**2 - np.abs(Q)**2)/2 - Ns**2 * mu*(1. + kappa) + Ns * H
def _default_response_frequencies(A, n): """Compute a reasonable set of frequency points for bode plot. This function is used by `bode` to compute the frequency points (in rad/s) when the `w` argument to the function is None. Parameters ---------- A : ndarray The system matrix, which is square. n : int The number of time samples to generate. Returns ------- w : ndarray The 1-D array of length `n` of frequency samples (in rad/s) at which the response is to be computed. """ vals = linalg.eigvals(A) # Remove poles at 0 because they don't help us determine an interesting # frequency range. (And if we pass a 0 to log10() below we will crash.) poles = [pole for pole in vals if pole != 0] # If there are no non-zero poles, just hardcode something. if len(poles) == 0: minpole = 1 maxpole = 1 else: minpole = min(abs(real(poles))) maxpole = max(abs(real(poles))) # A reasonable frequency range is two orders of magnitude before the # minimum pole (slowest) and two orders of magnitude after the maximum pole # (fastest). w = numpy.logspace(numpy.log10(minpole) - 2, numpy.log10(maxpole) + 2, n) return w
def ar_model_check_stable(A): """check if this AR model is stable :Parameters: A : ndarray The coefficient matrix of the model """ # inits and checks m, p = A.shape p /= m if p != round(p): raise ValueError('bad inputs!') # check for stable model A1 = N.concatenate(( A, N.concatenate(( N.eye((p - 1) * m), N.zeros(((p - 1) * m, m)) ), axis=1) )) lambdas = NL.eigvals(A1) rval = True if (N.absolute(lambdas) > 1).any(): rval = False del A1, lambdas return rval
def test_simple_tr(self): a = array([[1,2,3],[1,2,3],[2,5,6]],'d') a = transpose(a).copy() a = transpose(a) w = eigvals(a) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] assert_array_almost_equal(w,exact_w)
def test_simple_complex(self): a = [[1,2,3],[1,2,3],[2,5,6+1j]] w = eigvals(a) exact_w = [(9+1j+sqrt(92+6j))/2, 0, (9+1j-sqrt(92+6j))/2] assert_array_almost_equal(w,exact_w)
def adjacency_spectrum(G, weight='weight'): """Return eigenvalues of the adjacency matrix of G. Parameters ---------- G : graph A NetworkX graph weight : string or None, optional (default='weight') The edge data key used to compute each value in the matrix. If None, then each edge has weight 1. Returns ------- evals : NumPy array Eigenvalues Notes ----- For MultiGraph/MultiDiGraph, the edges weights are summed. See to_numpy_matrix for other options. See Also -------- adjacency_matrix """ from scipy.linalg import eigvals return eigvals(nx.adjacency_matrix(G,weight=weight).todense())
def test_timescales(self): P_dense=self.bdc.transition_matrix() P=self.bdc.transition_matrix_sparse() ev=eigvals(P_dense) """Sort with decreasing magnitude""" ev=ev[np.argsort(np.abs(ev))[::-1]] ts=-1.0/np.log(np.abs(ev)) """k=None""" with self.assertRaises(ValueError): tsn=timescales(P) """k is not None""" tsn=timescales(P, k=self.k) self.assertTrue(np.allclose(ts[1:self.k], tsn[1:])) """k is not None, ncv is not None""" tsn=timescales(P, k=self.k, ncv=self.ncv) self.assertTrue(np.allclose(ts[1:self.k], tsn[1:])) """tau=7""" """k is not None""" tsn=timescales(P, k=self.k, tau=7) self.assertTrue(np.allclose(7*ts[1:self.k], tsn[1:]))
def set_aw(sys,poles): """Divide in controller in input and feedback part for anti-windup Usage ===== [sys_in,sys_fbk]=set_aw(sys,poles) Inputs ------ sys: controller poles : poles for the anti-windup filter Outputs ------- sys_in, sys_fbk: controller in input and feedback part """ sys=ss(sys); den_old=poly(eigvals(sys.A)) den = poly(poles) tmp= tf(den_old,den,sys.Tsamp) tmpss=tf2ss(tmp) sys_in=ss(tmp*sys) sys_in.Tsamp=sys.Tsamp sys_fbk=ss(1-tmp) sys_fbk.Tsamp=sys.Tsamp return sys_in, sys_fbk
def _run_hamiltonian(verbose=True): c = classicalHamiltonian() if verbose: print(c.potential(array([-0.5, 0.5]))) print(c.potential(array([-0.5, 0.0]))) print(c.potential(array([0.0, 0.0]))) xopt = optimize.fmin(c.potential, c.initialposition(), xtol=1e-10) # Important to restrict the step in order to avoid the discontinutiy at # x=[0,0] # hessian = nd.Hessian(c.potential, step_max=1.0, step_nom=np.abs(xopt)) step = nd.MaxStepGenerator(step_max=2, step_ratio=4, num_steps=16) hessian = nd.Hessian(c.potential, step=step, method='central', full_output=True) # hessian = algopy.Hessian(c.potential) # Does not work # hessian = scientific.Hessian(c.potential) # does not work H, info = hessian(xopt) true_H = np.array([[5.23748385e-12, -2.61873829e-12], [-2.61873829e-12, 5.23748385e-12]]) if verbose: print(xopt) print('H', H) print('H-true_H', np.abs(H-true_H)) print('error_estimate', info.error_estimate) eigenvalues = linalg.eigvals(H) normal_modes = c.normal_modes(eigenvalues) print('eigenvalues', eigenvalues) print('normal_modes', normal_modes) return H, info.error_estimate, true_H
def testMinreal(self, verbose=False): """Test a minreal model reduction""" #A = [-2, 0.5, 0; 0.5, -0.3, 0; 0, 0, -0.1] A = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]] #B = [0.3, -1.3; 0.1, 0; 1, 0] B = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]] #C = [0, 0.1, 0; -0.3, -0.2, 0] C = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]] #D = [0 -0.8; -0.3 0] D = [[0., -0.8], [-0.3, 0.]] # sys = ss(A, B, C, D) sys = ss(A, B, C, D) sysr = minreal(sys) self.assertEqual(sysr.states, 2) self.assertEqual(sysr.inputs, sys.inputs) self.assertEqual(sysr.outputs, sys.outputs) np.testing.assert_array_almost_equal( eigvals(sysr.A), [-2.136154, -0.1638459]) s = tf([1, 0], [1]) h = (s+1)*(s+2.00000000001)/(s+2)/(s**2+s+1) hm = minreal(h) hr = (s+1)/(s**2+s+1) np.testing.assert_array_almost_equal(hm.num[0][0], hr.num[0][0]) np.testing.assert_array_almost_equal(hm.den[0][0], hr.den[0][0])
def __init__(self, bb_ = (None,None), mumu_ = (None,None), met = (None,None), massT2=172.5**2, massW2=80.4**2, lv = utils.LorentzV ) : U = np.diag([1,1,-1]) S = np.array([[-1, 0,met[0]], [ 0,-1,met[1]], [ 0, 0, 1]]) nus = utils.vessel() nus.ellipse = tuple( self.Ellipse(b,mu,massT2,massW2) for b,mu in zip(bb_,mumu_) ) if any(e==None for e in nus.ellipse) : self.nunu_s = []; return nus.ellipseT = tuple( np.vstack([e[:2],[0,0,1]]) for e in nus.ellipse ) nus.ellipseT_inv = tuple( np.linalg.inv(eT) for eT in nus.ellipseT ) nus.nT_inv = tuple( eT.dot(U.dot(eT.T)) for eT in nus.ellipseT ) nus.nT = tuple( np.linalg.inv(nTi) for nTi in nus.nT_inv ) nT_p = S.T.dot(nus.nT[1]).dot(S) eig = next( e.real for e in LA.eigvals( nus.nT_inv[0].dot(nT_p),overwrite_a=True ) if not e.imag ) G = nT_p - eig * nus.nT[0] nus.vs = [ ( nus.ellipse[0].dot(nus.ellipseT_inv[0]).dot(nuT), nus.ellipse[1].dot(nus.ellipseT_inv[1]).dot(S.dot(nuT))) for nuT in self.intersections( G, nus.nT[0] ) ] if not nus.vs : nus.vs = [self.closestXY(nus.ellipse,met)] self.nunu_s = [ (lv(),lv()) for _ in nus.vs ] for (nu,n_),(vu,v_) in zip(self.nunu_s,nus.vs) : nu.SetPxPyPzE(vu[0],vu[1],vu[2],0); nu.SetM(0) n_.SetPxPyPzE(v_[0],v_[1],v_[2],0); n_.SetM(0) self.nus = nus self.bb_ = bb_ self.mumu_ = mumu_ self.met = met
def bb_step(sys,X0=None,Tf=None,Ts=0.001): """Plot the step response of the continous system sys Call: y=bb_step(sys [,Tf=final time] [,Ts=time step]) Parameters ---------- sys : Continous System in State Space form X0: Initial state vector (not used yet) Ts : sympling time Tf : Final simulation time Returns ------- Nothing """ if Tf==None: vals = eigvals(sys.A) r = min(abs(real(vals))) if r < 1e-10: r = 0.1 Tf = 7.0 / r sysd=c2d(sys,Ts) dstep(sysd,Tf=Tf)
def eigenvalues(A, n): """ Return the eigenvalues of A in order from largest to smallest. Parameters ---------- A : numpy.array with shape (nstates,nstates) The matrix for which eigenvalues are to be computed. n : int The number of largest eigenvalues to return. Examples -------- Return all sorted eigenvalues. >>> from bhmm.util import testsystems >>> Tij = testsystems.generate_transition_matrix(nstates=3, reversible=True) >>> ew_sorted = eigenvalues(Tij, 3) Return largest eigenvalue. >>> ew_sorted = eigenvalues(Tij, 1) TODO ---- Replace this with a call to the EMMA method once we use EMMA as a dependency. """ v=linalg.eigvals(A).real idx=(-v).argsort()[:n] return v[idx]
def projection_shifts_init(A, E, B, shift_options): """Find starting shift parameters for low-rank ADI iteration using Galerkin projection on spaces spanned by LR-ADI iterates. See [PK16]_, pp. 92-95. Parameters ---------- A The |Operator| A from the corresponding Lyapunov equation. E The |Operator| E from the corresponding Lyapunov equation. B The |VectorArray| B from the corresponding Lyapunov equation. shift_options The shift options to use (see :func:`lyap_solver_options`). Returns ------- shifts A |NumPy array| containing a set of stable shift parameters. """ for i in range(shift_options['init_maxiter']): Q = gram_schmidt(B, atol=0, rtol=0) shifts = spla.eigvals(A.apply2(Q, Q), E.apply2(Q, Q)) shifts = shifts[np.real(shifts) < 0] if shifts.size == 0: # use random subspace instead of span{B} (with same dimensions) if shift_options['init_seed'] is not None: np.random.seed(shift_options['init_seed']) np.random.seed(np.random.random() + i) B = B.space.make_array(np.random.rand(len(B), B.space.dim)) else: return shifts raise RuntimeError('Could not generate initial shifts for low-rank ADI iteration.')
def run_hamiltonian(hessian, verbose=True): c = ClassicalHamiltonian() xopt = optimize.fmin(c.potential, c.initialposition(), xtol=1e-10) hessian.fun = c.potential hessian.full_output = True h, info = hessian(xopt) true_h = np.array([[5.23748385e-12, -2.61873829e-12], [-2.61873829e-12, 5.23748385e-12]]) eigenvalues = linalg.eigvals(h) normal_modes = c.normal_modes(eigenvalues) if verbose: print(c.potential([-0.5, 0.5])) print(c.potential([-0.5, 0.0])) print(c.potential([0.0, 0.0])) print(xopt) print('h', h) print('h-true_h', np.abs(h - true_h)) print('error_estimate', info.error_estimate) print('eigenvalues', eigenvalues) print('normal_modes', normal_modes) return h, info.error_estimate, true_h
def _default_response_frequencies(A, n): """Compute a reasonable set of frequency points for bode plot. This function is used by `bode` to compute the frequency points (in rad/s) when the `w` argument to the function is None. Parameters ---------- A : ndarray The system matrix, which is square. n : int The number of time samples to generate. Returns ------- w : ndarray The 1-D array of length `n` of frequency samples (in rad/s) at which the response is to be computed. """ vals = linalg.eigvals(A) minpole = min(abs(real(vals))) maxpole = max(abs(real(vals))) # A reasonable frequency range is two orders of magnitude before the # minimum pole (slowest) and two orders of magnitude after the maximum pole # (fastest). w = numpy.logspace(numpy.log10(minpole) - 2, numpy.log10(maxpole) + 2, n) return w
def step(system, X0=None, T=None, N=None): """Step response of continuous-time system. Inputs: system -- an instance of the LTI class or a tuple with 2, 3, or 4 elements representing (num, den), (zero, pole, gain), or (A, B, C, D) representation of the system. X0 -- (optional, default = 0) inital state-vector. T -- (optional) time points (autocomputed if not given). N -- (optional) number of time points to autocompute (100 if not given). Ouptuts: (T, yout) T -- output time points, yout -- step response of system. """ if isinstance(system, lti): sys = system else: sys = lti(*system) if N is None: N = 100 if T is None: vals = linalg.eigvals(sys.A) tc = 1.0/min(abs(real(vals))) T = arange(0,7*tc,7*tc / float(N)) U = ones(T.shape, sys.A.dtype) vals = lsim(sys, U, T, X0=X0) return vals[0], vals[1]
def compare_eigen_methods(): """ timing of different eigensolver methods""" import scipy.linalg as linalg print '\n *** diagonalize a real symmetric band matrix by different methods ***\n' mt=mytimer.create(10) try: N=float(sys.argv[1]) S=float(sys.argv[2]) except: sys.exit('supply N S (command line arguments): matrix dimension N and and number of super-diagonals S ') np.random.seed(1) a=np.random.random((N,N)) ab=GMatrix(np.random.random((S+1,N)),storage='upper_banded') mt[0].start('lapack symmetric upper banded') print ab.store+'\n',ab.eigvals()[:5] mt[0].stop() a=ab.restore('full') mt[1].start('lapack symmetric full') print a.store+'\n',a.eigvals()[:5] mt[1].stop() print 'lingalg' mt[2].start('linalg general') print np.sort(linalg.eigvals(a.m).real)[:5] mt[2].stop() mytimer.table()
def __init__(self,para): self.parms = Parameters() self.parms.N = para.N self.parms.Nin = para.Nin self.parms.Nout= para.Nout self.parms.inp_sc= para.inp_sc self.parms.spectr_rad= para.spectr_rad self.parms.leak_rate= para.leak_rate self.parms.inpBias = para.inpBias self.parms.Pscaling = para.Pscaling self.parms.OutScaling = para.OutScaling self.parms.alpha = para.alpha self.parms.t1 = para.t1 self.parms.dt = para.dt self.parms.y_d = para.y_d self.P = np.eye(self.parms.N)*self.parms.Pscaling#100 self.inpBias = self.parms.inpBias * np.random.randn(self.parms.N,1) wtemp = np.random.randn(self.parms.N,self.parms.N) self.W = self.parms.spectr_rad*wtemp/max(abs(linalg.eigvals(wtemp))) self.V = np.random.randn(self.parms.N,self.parms.Nin)*np.dot(np.ones((self.parms.N,1)),self.parms.inp_sc) self.Woutp = np.random.randn(self.parms.Nout,self.parms.N) self.state_A = np.random.rand(self.parms.N,1)*2.-1. self.state_B = self.state_A self.orgstate = self.state_A
def z2_vanderbilt(h,nk=30,nt=100,nocc=None,full=False): """ Calculate Z2 invariant according to Vanderbilt algorithm""" out = [] # output list path = np.linspace(0.,1.,nk) # set of kpoints fo = open("WANNIER_CENTERS.OUT","w") if full: ts = np.linspace(0.,1.0,nt,endpoint=False) else: ts = np.linspace(0.,0.5,nt,endpoint=False) wfall = [[occ_states2d(h,np.array([k,t,0.,])) for k in path] for t in ts] # select a continuos gauge for the first wave for it in range(len(ts)-1): # loop over ts wfall[it+1][0] = smooth_gauge(wfall[it][0],wfall[it+1][0]) for it in range(len(ts)): # loop over t points row = [] # empty list for this row t = ts[it] # select the t point wfs = wfall[it] # get set of waves for i in range(len(wfs)-1): wfs[i+1] = smooth_gauge(wfs[i],wfs[i+1]) # transform into a smooth gauge # m = uij(wfs[i],wfs[i+1]) # matrix of wavefunctions m = uij(wfs[0],wfs[len(wfs)-1]) # matrix of wavefunctions evals = lg.eigvals(m) # eigenvalues of the rotation x = np.angle(evals) # phase of the eigenvalues fo.write(str(t)+" ") # write pumping variable row.append(t) # store for ix in x: # loop over phases fo.write(str(ix)+" ") row.append(ix) # store fo.write("\n") out.append(row) # store fo.close() return np.array(out).transpose() # transpose the map
def _run_hamiltonian(verbose=True): c = classicalHamiltonian() if verbose: print(c.potential(array([-0.5, 0.5]))) print(c.potential(array([-0.5, 0.0]))) print(c.potential(array([0.0, 0.0]))) xopt = optimize.fmin(c.potential, c.initialposition(), xtol=1e-10) hessian = nd.Hessian(c.potential) H = hessian(xopt) true_H = np.array([[5.23748385e-12, -2.61873829e-12], [-2.61873829e-12, 5.23748385e-12]]) error_estimate = np.NAN if verbose: print(xopt) print('H', H) print('H-true_H', np.abs(H - true_H)) # print('error_estimate', info.error_estimate) eigenvalues = linalg.eigvals(H) normal_modes = c.normal_modes(eigenvalues) print('eigenvalues', eigenvalues) print('normal_modes', normal_modes) return H, error_estimate, true_H
def find_zeros(self, pprint=False): """ Find the zeros and poles of a complex function (C->C) inside a closed curve. input: """ # total multiplicity of zeros (number of zeros * order) pol_unity = monicPolynomial([]) p_unity = pol_unity.f_polynomial() residue = self.inner_prod(p_unity, p_unity) N = int(round(residue.real)) accuracy = np.abs(N - residue) if pprint: print('N =', N, ' (accuracy =', accuracy, ')') # if N is zero if N == 0: n = 0 zeros = np.array([]) else: # list of FOPs phis = [] pols = [] phis.append(p_unity) pols.append(pol_unity) # phi_0 # compute arithmetic mean of nodes p_aux = monicPolynomial([0.]).f_polynomial() mu = self.inner_prod(p_unity, p_aux) / N if pprint: print('mu =', mu) # append first polynomial pol = monicPolynomial([-mu]) phis.append(pol.f_polynomial()) pols.append(pol) # phi_1 # if the algorithm quits after the first zero, it is mu zeros = np.array([mu]) n = 1 # initialization r = 1 t = 0 while r + t < N: if pprint: print(str(r + t)) # naive criterion to check if phi_r+t+1 is regular prod_aux, maxpsum = self.inner_prod(phis[-1], phis[-1], compute_maxpsum=True) # compute eigenvalues of the next pencil G, G1 = self.generalized_hankel_matrices(phis, pols) eigv = la.eigvals(G1, G) # print eigv # check if these eigenvalues lie within the contour if self.points_within_contour(eigv + mu): # if np.abs(prod_aux)/maxpsum > eps_reg: if pprint: print(str(r + t) + '.1') # compute next FOP in the regular way pol = monicPolynomial(eigv, coef_type='zeros') phis.append(pol.f_polynomial()) pols.append(pol) r += t + 1 t = 0 n = r zeros = eigv + mu else: if pprint: print(str(r + t) + '.2') c = npol.polyfromroots([mu]) pol = monicPolynomial(npol.polymul(c, pols[-1].coef), coef_type='normal') pols.append(pol) phis.append(pol.f_polynomial()) t += 1 # check multiplicities # constuct vandermonde system if len(zeros) > 0: A = np.vander(zeros).T[::-1, :] b = np.array([ self.inner_prod(p_unity, lambda x, k=k: x**k) for k in range(n) ]) # solve the Vandermonde system nu = la.solve(A, b) nu = np.round(nu).real.astype(int) sane = (np.sum(nu) == N) else: nu = np.array([]) sane = True # for printing result if pprint: print('>>> Result of computation of zeros <<<') print('number of zeros = ', n - len(np.where(nu == 0)[0])) pstring = 'yes!' if sane == 1 else 'no!' print('sane? ' + pstring) for i, zero in enumerate(zeros): print('zero #' + str(i + 1) + ' = ' + str(zero) + ' (multiplicity = ' + str(nu[i]) + ')') print('') # eliminate spurious zeros (the ones with zero multiplicity) inds_ = np.argsort(zeros.real) # inds = np.where(nu[inds_] > 0)[0] if self.use_known_zeros: self.global_zeros = np.concatenate( (self.global_zeros, zeros[inds_][inds])) self.global_zmultiplicities = np.concatenate( (self.global_zmultiplicities, nu[inds_][inds])) iglob = np.argsort(self.global_zeros.real) self.global_zeros = self.global_zeros[iglob] self.global_zmultiplicities = self.global_zmultiplicities[iglob] return zeros[inds_], (n - len(np.where(nu == 0)[0]), nu[inds_], sane)
def QFIM_Gauss(R, dR, D, dD): """ Calculation of the SLD based quantum Fisher information (QFI) and quantum Fisher information matrix (QFIM) with gaussian states. Parameters ---------- > **R:** `array` -- First-order moment. > **dR:** `list` -- Derivatives of the first-order moment on the unknown parameters to be estimated. For example, dR[0] is the derivative vector on the first parameter. > **D:** `matrix` -- Second-order moment. > **dD:** `list` -- Derivatives of the second-order moment on the unknown parameters to be estimated. For example, dD[0] is the derivative vector on the first parameter. > **eps:** `float` -- Machine epsilon. Returns ---------- **QFI or QFIM with gaussian states:** `float or matrix` -- For single parameter estimation (the length of drho is equal to one), the output is QFI and for multiparameter estimation (the length of drho is more than one), it returns QFIM. """ para_num = len(dR) m = int(len(R) / 2) QFIM_res = np.zeros([para_num, para_num]) C = np.array([[D[i][j] - R[i] * R[j] for j in range(2 * m)] for i in range(2 * m)]) dC = [ np.array([[ dD[k][i][j] - dR[k][i] * R[j] - R[i] * dR[k][j] for j in range(2 * m) ] for i in range(2 * m)]) for k in range(para_num) ] C_sqrt = sqrtm(C) J = np.kron([[0, 1], [-1, 0]], np.eye(m)) B = C_sqrt @ J @ C_sqrt P = np.eye(2 * m) P = np.vstack([P[:][::2], P[:][1::2]]) T, Q = schur(B) vals = eigvals(B) c = vals[::2].imag Diag = np.diagflat(c**-0.5) S = inv(J @ C_sqrt @ Q @ P @ np.kron([[0, 1], [-1, 0]], -Diag)).T @ P.T sx = np.array([[0.0, 1.0], [1.0, 0.0]]) sy = np.array([[0.0, -1.0j], [1.0j, 0.0]]) sz = np.array([[1.0, 0.0], [0.0, -1.0]]) a_Gauss = [1j * sy, sz, np.eye(2), sx] es = [[np.eye(1, m**2, m * i + j).reshape(m, m) for j in range(m)] for i in range(m)] As = [[np.kron(s, a_Gauss[i]) / np.sqrt(2) for s in es] for i in range(4)] gs = [[[[np.trace(inv(S) @ dC @ inv(S.T) @ aa.T) for aa in a] for a in A] for A in As] for dC in dC] G = [ np.zeros((2 * m, 2 * m)).astype(np.longdouble) for _ in range(para_num) ] for i in range(para_num): for j in range(m): for k in range(m): for l in range(4): G[i] += np.real(gs[i][l][j][k] / (4 * c[j] * c[k] + (-1)**(l + 1)) * inv(S.T) @ As[l][j][k] @ inv(S)) QFIM_res += np.real([[ np.trace(G[i] @ dC[j]) + dR[i] @ inv(C) @ dR[j] for j in range(para_num) ] for i in range(para_num)]) if para_num == 1: return QFIM_res[0][0] else: return QFIM_res
def time_eigvals(self, size, contig, module): if module == 'numpy': nl.eigvals(self.a) else: sl.eigvals(self.a)
def eigvalsh(a, UPLO="L", eigvals=[]): val = la.eigvals(a) val = np.sort(np.real(val)) if eigvals: return val[eigvals[0]:eigvals[1] + 1] return val
def is_stable(self, K): stab = False if np.amax(np.abs(LA.eigvals(self.a_cl(K)))) < (1.0 - 1.0e-6): stab = True return stab
# V JAC[2*M-2, :] = concatenate((zeros(M), BTOP, zeros(4*M))) JAC[2*M-1, :] = concatenate((zeros(M), BBOT, zeros(4*M))) B = zeros((6*M, 6*M), dtype='complex') B[:2*M, :2*M] = eye(2*M,2*M) B[3*M:, 3*M:] = eye(3*M,3*M) ### BCs on RHS matrix B[M-2, :] = 0 B[M-1, :] = 0 B[2*M-2, :] = 0 B[2*M-1, :] = 0 eigenvalues = linalg.eigvals(JAC, B) eigenvalues = eigenvalues[~isnan(eigenvalues*conj(eigenvalues))] eigenvalues = eigenvalues[~isinf(eigenvalues*conj(eigenvalues))] eigarr = zeros((len(eigenvalues), 2)) eigarr[:,0] = real(eigenvalues) eigarr[:,1] = imag(eigenvalues) savetxt("eigs.txt", eigarr) #actualDecayRate = amax(eigarr[:, 0]) actualDecayRate = -1.0 for i in range(len(eigarr[:,0])): if eigarr[i,0] > 1.0: continue
def matrix_matrix_best_mat(n, kA, kB, gammaB, no_trials): samples_in_omega = 200 k = kA * kB s = n - k identity_part = np.identity((k), dtype=float) DeltaB = int(np.round((s - 1) * (kB - 1) / (gammaB - 1 / kB))) while DeltaB % kB != 0: DeltaB = DeltaB + 1 z = int(DeltaB / kB + (s - 1) * (kB - 1)) workers = list(range(n)) Choice_of_workers = list(it.combinations(workers, k)) size_total = np.shape(Choice_of_workers) total_no_choices = size_total[0] best_mat_A = {} best_mat_B = {} mu = 0 sigma = 1 min_eigenvalue = np.zeros(total_no_choices * samples_in_omega) max_eigenvalue = np.zeros(total_no_choices * samples_in_omega) condition_number = np.zeros(no_trials, dtype=float) for trial in range(0, no_trials): best_mat_A[trial] = np.random.normal(mu, sigma, [kA, s]) best_mat_B[trial] = np.random.normal(mu, sigma, [kB, s]) matrices = {} matrices[0] = best_mat_A[trial] matrices[1] = best_mat_B[trial] best_mat_comb = np.zeros((kA * kB, s)) for i in range(s): cum_prod = matrices[0][:, i] cum_prod = np.einsum('i,j->ij', cum_prod, matrices[1][:, i]).ravel() best_mat_comb[:, i] = cum_prod ind = 0 exponent_vector = list(range(0, kB)) for i in range(1, kA): m = i * z exponent_vector = np.concatenate( (exponent_vector, list(range(m, m + kB))), axis=0) w = np.zeros(2 * samples_in_omega, dtype=float) for i in range(0, samples_in_omega): w[i] = -np.pi + i * 2 * np.pi / samples_in_omega zz = samples_in_omega for z in range(0, zz): imag = 1j omega = np.zeros((k, s), dtype=complex) for i in range(0, s): omega[:, i] = np.power(np.exp(-imag * w[z])**i, list(range(k))) Generator_mat = np.concatenate( (identity_part, np.multiply(best_mat_comb, omega)), axis=1) for i in range(0, total_no_choices): Coding_matrix = [] kk = list(Choice_of_workers[i]) Coding_matrix = Generator_mat[:, kk] Coding_matrixT = np.transpose(Coding_matrix) D = np.matmul(np.conjugate(Coding_matrixT), Coding_matrix) eigenvalues = eigvals(D) eigenvalues = np.real(eigenvalues) min_eigenvalue[ind] = np.min(eigenvalues) max_eigenvalue[ind] = np.max(eigenvalues) ind = ind + 1 condition_number[trial] = np.sqrt( np.max(max_eigenvalue) / np.min(min_eigenvalue)) best_cond_min = np.min(condition_number) position = np.argmin(condition_number) R_A = best_mat_A[position] R_B = best_mat_B[position] return R_A, R_B, best_cond_min
def balfreq(SS, DictBalFreq): ''' Method for frequency limited balancing. The Observability ad controllability Gramians over the frequencies kv are solved in factorised form. Balancd modes are then obtained with a square-root method. Details: Observability and controllability Gramians are solved in factorised form through explicit integration. The number of integration points determines both the accuracy and the maximum size of the balanced model. Stability over all (Nb) balanced states is achieved if: a. one of the Gramian is integrated through the full Nyquist range b. the integration points are enough. Input: - DictBalFreq: dictionary specifying integration method with keys: - 'frequency': defines limit frequencies for balancing. The balanced model will be accurate in the range [0,F], where F is the value of this key. Note that F units must be consistent with the units specified in the self.ScalingFacts dictionary. - 'method_low': ['gauss','trapz'] specifies whether to use gauss quadrature or trapezoidal rule in the low-frequency range [0,F] - 'options_low': options to use for integration in the low-frequencies. These depend on the integration scheme (See below). - 'method_high': method to use for integration in the range [F,F_N], where F_N is the Nyquist frequency. See 'method_low'. - 'options_high': options to use for integration in the high-frequencies. - 'check_stability': if True, the balanced model is truncated to eliminate unstable modes - if any is found. Note that very accurate balanced model can still be obtained, even if high order modes are unstable. Note that this option is overridden if "" - 'get_frequency_response': if True, the function also returns the frequency response evaluated at the low-frequency range integration points. If True, this option also allows to automatically tune the balanced model. Future options: - Ncpu: for parallel run The following integration schemes are available: - 'trapz': performs integration over equally spaced points using trapezoidal rule. It accepts options dictionaries with keys: - 'points': number of integration points to use (including domain boundary) - 'gauss' performs gauss-lobotto quadrature. The domain can be partitioned in Npart sub-domain in which the gauss-lobotto quadrature of order Ord can be applied. A total number of Npart*Ord points is required. It accepts options dictionaries of the form: - 'partitions': number of partitions - 'order': quadrature order. Example: The following dictionary DictBalFreq={ 'frequency': 1.2, 'method_low': 'trapz', 'options_low': {'points': 12}, 'method_high': 'gauss', 'options_high': {'partitions': 2, 'order': 8}, 'check_stability': True } balances the state-space model in the frequency range [0, 1.2] using (a) 12 equally-spaced points integration of the Gramians in the low-frequency range [0,1.2] and (b) a 2 Gauss-Lobotto 8-th order quadratures of the controllability Gramian in the high-frequency range. A total number of 28 integration points will be required, which will result into a balanced model with number of states min{ 2*28* number_inputs, 2*28* number_outputs } The model is finally truncated so as to retain only the first Ns stable modes. ''' ### check input dictionary if 'frequency' not in DictBalFreq: raise NameError('Solution dictionary must include the "frequency" key') if 'method_low' not in DictBalFreq: warnings.warn('Setting default options for low-frequency integration') DictBalFreq['method_low'] = 'trapz' DictBalFreq['options_low'] = {'points': 12} if 'method_high' not in DictBalFreq: warnings.warn('Setting default options for high-frequency integration') DictBalFreq['method_high'] = 'gauss' DictBalFreq['options_high'] = {'partitions': 2, 'order': 8} if 'check_stability' not in DictBalFreq: DictBalFreq['check_stability'] = True if 'output_modes' not in DictBalFreq: DictBalFreq['output_modes'] = True if 'get_frequency_response' not in DictBalFreq: DictBalFreq['get_frequency_response'] = False ### get integration points and weights # Nyquist frequency kn = np.pi / SS.dt Opt = DictBalFreq['options_low'] if DictBalFreq['method_low'] == 'trapz': kv_low, wv_low = get_trapz_weights(0., DictBalFreq['frequency'], Opt['points'], False) elif DictBalFreq['method_low'] == 'gauss': kv_low, wv_low = get_gauss_weights(0., DictBalFreq['frequency'], Opt['partitions'], Opt['order']) else: raise NameError('Invalid value %s for key "method_low"' % DictBalFreq['method_low']) Opt = DictBalFreq['options_high'] if DictBalFreq['method_high'] == 'trapz': if Opt['points'] == 0: warnings.warn('You have chosen no points in high frequency range!') kv_high, wv_high = [], [] else: kv_high, wv_high = get_trapz_weights(DictBalFreq['frequency'], kn, Opt['points'], True) elif DictBalFreq['method_high'] == 'gauss': if Opt['order'] * Opt['partitions'] == 0: warnings.warn('You have chosen no points in high frequency range!') kv_high, wv_high = [], [] else: kv_high, wv_high = get_gauss_weights(DictBalFreq['frequency'], kn, Opt['partitions'], Opt['order']) else: raise NameError('Invalid value %s for key "method_high"' % DictBalFreq['method_high']) ### -------------------------------------------------- loop frequencies ### merge vectors Nk_low = len(kv_low) kvdt = np.concatenate((kv_low, kv_high)) * SS.dt wv = np.concatenate((wv_low, wv_high)) * SS.dt zv = np.cos(kvdt) + 1.j * np.sin(kvdt) Eye = libsp.eye_as(SS.A) Zc = np.zeros((SS.states, 2 * SS.inputs * len(kvdt)), ) Zo = np.zeros((SS.states, 2 * SS.outputs * Nk_low), ) if DictBalFreq['get_frequency_response']: Yfreq = np.empty(( SS.outputs, SS.inputs, Nk_low, ), dtype=np.complex_) kv = kv_low for kk in range(len(kvdt)): zval = zv[kk] Intfact = wv[kk] # integration factor Qctrl = Intfact * libsp.solve(zval * Eye - SS.A, SS.B) kkvec = range(2 * kk * SS.inputs, 2 * (kk + 1) * SS.inputs) Zc[:, kkvec[:SS.inputs]] = Qctrl.real Zc[:, kkvec[SS.inputs:]] = Qctrl.imag ### ----- frequency response if DictBalFreq['get_frequency_response'] and kk < Nk_low: Yfreq[:,:,kk]= (1./Intfact)*\ libsp.dot(SS.C,Qctrl,type_out=np.ndarray)+SS.D ### ----- observability if kk >= Nk_low: continue Qobs = Intfact * libsp.solve(np.conj(zval) * Eye - SS.A.T, SS.C.T) kkvec = range(2 * kk * SS.outputs, 2 * (kk + 1) * SS.outputs) Zo[:, kkvec[:SS.outputs]] = Intfact * Qobs.real Zo[:, kkvec[SS.outputs:]] = Intfact * Qobs.imag # delete full matrices Kernel = None Qctrl = None Qobs = None # LRSQM (optimised) U, hsv, Vh = scalg.svd(np.dot(Zo.T, Zc), full_matrices=False) sinv = hsv**(-0.5) T = np.dot(Zc, Vh.T * sinv) Ti = np.dot((U * sinv).T, Zo.T) # Zc,Zo=None,None ### build frequency balanced model Ab = libsp.dot(Ti, libsp.dot(SS.A, T)) Bb = libsp.dot(Ti, SS.B) Cb = libsp.dot(SS.C, T) SSb = libss.ss(Ab, Bb, Cb, SS.D, dt=SS.dt) ### Eliminate unstable modes - if any: if DictBalFreq['check_stability']: for nn in range(1, len(hsv) + 1): eigs_trunc = scalg.eigvals(SSb.A[:nn, :nn]) eigs_trunc_max = np.max(np.abs(eigs_trunc)) if eigs_trunc_max > 1. - 1e-16: SSb.truncate(nn - 1) hsv = hsv[:nn - 1] T = T[:, :nn - 1] Ti = Ti[:nn - 1, :] break outs = (SSb, hsv) if DictBalFreq['output_modes']: outs += (T, Ti, Zc, Zo, U, Vh) return outs
def system_norm(G, p=np.inf, hinf_tol=1e-6, eig_tol=1e-8): """ Computes the system p-norm. Currently, no balancing is done on the system, however in the future, a scaling of some sort will be introduced. Currently, only H₂ and H∞-norm are understood. For H₂-norm, the standard grammian definition via controllability grammian, that can be found elsewhere is used. Parameters ---------- G : {State,Transfer} System for which the norm is computed p : {int,np.inf} The norm type; `np.inf` for H∞- and `2` for H2-norm hinf_tol: float When the progress is below this tolerance the result is accepted as converged. eig_tol: float The algorithm relies on checking the eigenvalues of the Hamiltonian being on the imaginary axis or not. This value is the threshold such that the absolute real value of the eigenvalues smaller than this value will be accepted as pure imaginary eigenvalues. Returns ------- n : float Resulting p-norm Notes ----- The H∞ norm is computed via the so-called BBBS algorithm ([1]_, [2]_). .. [1] N.A. Bruinsma, M. Steinbuch: Fast Computation of H∞-norm of transfer function. System and Control Letters, 14, 1990. :doi:`10.1016/0167-6911(90)90049-Z` .. [2] S. Boyd and V. Balakrishnan. A regularity result for the singular values of a transfer matrix and a quadratically convergent algorithm for computing its L∞-norm. System and Control Letters, 1990. :doi:`10.1016/0167-6911(90)90037-U` """ # Tried the corrections given in arXiv:1707.02497, couldn't get the gains # mentioned in the paper. _check_for_state_or_transfer(G) if p not in (2, np.inf): raise ValueError('The p in p-norm is not 2 or infinity. If you' ' tried the string \'inf\', use "np.inf" instead') T = transfer_to_state(G) if isinstance(G, Transfer) else G a, b, c, d = T.matrices # 2-norm if p == 2: # Handle trivial infinities if not np.allclose(T.d, np.zeros_like(T.d)) or (not T._isstable): return np.Inf if T.SamplingSet == 'R': x = lyapunov_eq_solver(a.T, b @ b.T) return np.sqrt(np.trace(c @ x @ c.T)) else: x = lyapunov_eq_solver(a.T, b @ b.T, form='d') return np.sqrt(np.trace(c @ x @ c.T + d @ d.T)) # ∞-norm elif np.isinf(p): if not T._isstable: return np.Inf # Initial gamma0 guess # Get the max of the largest svd of either # - feedthrough matrix # - G(iw) response at the pole with smallest damping # - G(iw) at w = 0 # Formula (4.3) given in Bruinsma, Steinbuch Sys.Cont.Let. (1990) if any(T.poles.imag): J = [np.abs(x.imag / x.real / np.abs(x)) for x in T.poles] ind = np.argmax(J) low_damp_fr = np.abs(T.poles[ind]) else: low_damp_fr = np.min(np.abs(T.poles)) f, w = frequency_response(T, w=[0, low_damp_fr], w_unit='rad/s', output_unit='rad/s') if T._isSISO: lb = np.max(np.abs(f)) else: # Only evaluated at two frequencies, 0 and wb lb = np.max(norm(f, ord=2, axis=(0, 1))) # Finally gamma_lb = np.max([lb, norm(d, ord=2)]) # Start a for loop with a definite end! Convergence is quartic!! for x in range(51): # (Step b1) test_gamma = gamma_lb * (1 + 2 * np.sqrt(np.spacing(1.))) # (Step b2) R = d.T @ d - test_gamma**2 * np.eye(d.shape[1]) S = d @ d.T - test_gamma**2 * np.eye(d.shape[0]) # TODO : Implement the result of Benner for the Hamiltonian later mat = block( [[a - b @ solve(R, d.T) @ c, -test_gamma * b @ solve(R, b.T)], [ test_gamma * c.T @ solve(S, c), -(a - b @ solve(R, d.T) @ c).T ]]) eigs_of_H = eigvals(mat) # (Step b3) im_eigs = eigs_of_H[np.abs(eigs_of_H.real) <= eig_tol] # If none left break if im_eigs.size == 0: gamma_ub = test_gamma break else: # Take the ones with positive imag part w_i = np.sort(np.unique(np.abs(im_eigs.imag))) # Evaluate the cubic interpolant m_i = (w_i[1:] + w_i[:-1]) / 2 f, w = frequency_response(T, w=m_i, w_unit='rad/s', output_unit='rad/s') if T._isSISO: gamma_lb = np.max(np.abs(f)) else: gamma_lb = np.max(norm(f, ord=2, axis=(0, 1))) return (gamma_lb + gamma_ub) / 2
for i in range(0, 7): cmat[i][i] = 1 cmat[0][1] = cmat[1][0] = spst.pearsonr(data.det, data.lam)[0] cmat[0][2] = cmat[2][0] = spst.pearsonr(data.det, data.lamdet)[0] cmat[0][3] = cmat[3][0] = spst.pearsonr(data.det, data.lavg)[0] cmat[0][4] = cmat[4][0] = spst.pearsonr(data.det, data.lent)[0] cmat[0][5] = cmat[5][0] = spst.pearsonr(data.det, data.vavg)[0] cmat[0][6] = cmat[6][0] = spst.pearsonr(data.det, data.vent)[0] cmat[1][2] = cmat[2][1] = spst.pearsonr(data.lam, data.lamdet)[0] cmat[1][3] = cmat[3][1] = spst.pearsonr(data.lam, data.lavg)[0] cmat[1][4] = cmat[4][1] = spst.pearsonr(data.lam, data.lent)[0] cmat[1][5] = cmat[5][1] = spst.pearsonr(data.lam, data.vavg)[0] cmat[1][6] = cmat[6][1] = spst.pearsonr(data.lam, data.vent)[0] cmat[2][3] = cmat[3][2] = spst.pearsonr(data.lamdet, data.lavg)[0] cmat[2][4] = cmat[4][2] = spst.pearsonr(data.lamdet, data.lent)[0] cmat[2][5] = cmat[5][2] = spst.pearsonr(data.lamdet, data.vavg)[0] cmat[2][6] = cmat[6][2] = spst.pearsonr(data.lamdet, data.vent)[0] cmat[3][4] = cmat[4][3] = spst.pearsonr(data.lavg, data.lent)[0] cmat[3][5] = cmat[5][3] = spst.pearsonr(data.lavg, data.vavg)[0] cmat[3][6] = cmat[6][3] = spst.pearsonr(data.lavg, data.vent)[0] cmat[4][5] = cmat[5][4] = spst.pearsonr(data.lent, data.vavg)[0] cmat[4][6] = cmat[6][4] = spst.pearsonr(data.lent, data.vent)[0] cmat[5][6] = cmat[6][5] = spst.pearsonr(data.vavg, data.vent)[0] print cmat b = spl.eigvals(abs(cmat)) print b bvar = np.var(b) meff = 1 + ((6) * (1 - (bvar / 7))) print meff
def _ideal_tfinal_and_dt(sys, is_step=True): """helper function to compute ideal simulation duration tfinal and dt, the time increment. Usually called by _default_time_vector, whose job it is to choose a realistic time vector. Considers both poles and zeros. For discrete-time models, dt is inherent and only tfinal is computed. Parameters ---------- sys : StateSpace or TransferFunction The system whose time response is to be computed is_step : bool Scales the dc value by the magnitude of the nonzero mode since integrating the impulse response gives :math:`\int e^{-\lambda t} = -e^{-\lambda t}/ \lambda` Default is True. Returns ------- tfinal : float The final time instance for which the simulation will be performed. dt : float The estimated sampling period for the simulation. Notes ----- Just by evaluating the fastest mode for dt and slowest for tfinal often leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000])) since dt will be very small and tfinal will be too large though the fast mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0] and the simulation would be unnecessarily long and the plot is virtually an L shape since the decay is so fast. Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR can be used such that only the modes that have significant effect on the time response are taken. But the sensitivity of the eigenvalues complicate the matter since dlambda = <w, dA*v> with <w,v> = 1. Hence we can only work with simple poles with this formulation. See Golub, Van Loan Section 7.2.2 for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of the response is dependent on the size of the eigenshapes rather than the eigenvalues themselves. By Ilhan Polat, with modifications by Sawyer Fuller to integrate into python-control 2020.08.17 """ sqrt_eps = np.sqrt(np.spacing(1.)) default_tfinal = 5 # Default simulation horizon default_dt = 0.1 total_cycles = 5 # number of cycles for oscillating modes pts_per_cycle = 25 # Number of points divide a period of oscillation log_decay_percent = np.log(100) # Factor of reduction for real pole decays if sys.is_static_gain(): tfinal = default_tfinal dt = sys.dt if isdtime(sys, strict=True) else default_dt elif isdtime(sys, strict=True): dt = sys.dt A = _convertToStateSpace(sys).A tfinal = default_tfinal p = eigvals(A) # Array Masks # unstable m_u = (np.abs(p) >= 1 + sqrt_eps) p_u, p = p[m_u], p[~m_u] if p_u.size > 0: m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps) t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u]) / dt)) tfinal = max(tfinal, t_emp) # zero - negligible effect on tfinal m_z = np.abs(p) < sqrt_eps p = p[~m_z] # Negative reals- treated as oscillary mode m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps) p_nr, p = p[m_nr], p[~m_nr] if p_nr.size > 0: t_emp = np.max(log_decay_percent / np.abs( (np.log(p_nr) / dt).real)) tfinal = max(tfinal, t_emp) # discrete integrators m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps) p_int, p = p[m_int], p[~m_int] # pure oscillatory modes m_w = (np.abs(np.abs(p) - 1) < sqrt_eps) p_w, p = p[m_w], p[~m_w] if p_w.size > 0: t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w) / dt).min() tfinal = max(tfinal, t_emp) if p.size > 0: t_emp = log_decay_percent / np.abs((np.log(p) / dt).real).min() tfinal = max(tfinal, t_emp) if p_int.size > 0: tfinal = tfinal * 5 else: # cont time sys_ss = _convertToStateSpace(sys) # Improve conditioning via balancing and zeroing tiny entries # See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance b, (sca, perm) = matrix_balance(sys_ss.A, separate=True) p, l, r = eig(b, left=True, right=True) # Reciprocal of inner product <w,v> for each eigval, (bound the ~infs by 1e12) # G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12) eig_sens = np.reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real)) eig_sens = minimum(1e12, eig_sens) # Tolerances p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0. # Incorporate balancing to outer factors l[perm, :] *= np.reciprocal(sca)[:, None] r[perm, :] *= sca[:, None] w, v = sys_ss.C.dot(r), l.T.conj().dot(sys_ss.B) origin = False # Computing the "size" of the response of each simple mode wn = np.abs(p) if np.any(wn == 0.): origin = True dc = np.zeros_like(p, dtype=float) # well-conditioned nonzero poles, np.abs just in case ok = np.abs(eig_sens) <= 1 / sqrt_eps # the averaged t->inf response of each simple eigval on each i/o channel # See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is # R/L eigenvector dependent) dc[ok] = norm(v[ok, :], axis=1) * norm(w[:, ok], axis=0) * eig_sens[ok] dc[wn != 0.] /= wn[wn != 0] if is_step else 1. dc[wn == 0.] = 0. # double the oscillating mode magnitude for the conjugate dc[p.imag != 0.] *= 2 # Now get rid of noncontributing integrators and simple modes if any relevance = (dc > 0.1 * dc.max()) | ~ok psub = p[relevance] wnsub = wn[relevance] tfinal, dt = [], [] ints = wnsub == 0. iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps) # Pure imaginary? if np.any(iw): tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist() dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist() # The rest ~ts = log(%ss value) / exp(Re(eigval)t) texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real) tfinal += texp_mode.tolist() dt += minimum( texp_mode / 50, (2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist() # All integrators? if len(tfinal) == 0: return default_tfinal * 5, default_dt * 5 tfinal = np.max(tfinal) * (5 if origin else 1) dt = np.min(dt) return tfinal, dt
def ispos(mat): evals = eigvals(mat) if np.all(evals >= 0.0): return True return False
def analyseMatrix(M): print('Negative eigenvalues:', [(v, i) for (v, i) in enumerate(linalg.eigvals(M)) if v < 0]) print('Symmetric: ', np.allclose(M, M.T))
# Trigonometric functions - sinm, cosm, tanm linalg.sinm(A_sq) linalg.sinm(A_non_sq) # Hyperbolic trigonometric functions - sinhm, coshm, tanhm linalg.sinhm(A_sq) linalg.sinhm(A_non_sq) # Arbitrary function from scipy import special, random, linalg np.random.seed(1234) A = random.rand(3, 3) B = linalg.funm(A, lambda x: special.jv(0, x)) A B linalg.eigvals(A) special.jv(0, linalg.eigvals(A)) linalg.eigvals(B) # ----------------------------------------------------------------------------- # SPARSE EIGENVALUE PROBLEMS WITH ARPACK # Can be used to find only smallest/largest/real/complex part eigenvalues from scipy.linalg import eig, eigh from scipy.sparse.linalg import eigs, eigsh np.set_printoptions(suppress = True) np.random.seed(0)
### EIGENVALUE PROBLEMS ### =================== # (1): eig # --- # Solve an ordinary or generalized eigenvalue problem of a square matrix. print(linalg.eig(a)) # (2): eigvals # --- # Compute eigenvalues from an ordinary or generalized eigenvalue problem. print(linalg.eigvals(a)) ################################################################################################### ### =============== ### DESCOMPOSITIONS ### =============== # (1.a): lu # --- # Compute pivoted LU decomposition of a matrix. P, L, U = linalg.lu(a) print(P)
# Make the scaling matrix for RHS of equation RHS = eye(10 * vLen, 10 * vLen) RHS[:3 * vLen, :3 * vLen] = Re * eye(3 * vLen, 3 * vLen) # Zero all elements corresponding to p equation RHS[3 * vLen:4 * vLen, :] = zeros((vLen, 10 * vLen)) # Apply boundary conditions to RHS for i in range(3 * (2 * N + 1)): RHS[M * (i + 1) - 1, M * (i + 1) - 1] = 0 RHS[M * (i + 1) - 2, M * (i + 1) - 2] = 0 del i # Use library function to solve for eigenvalues/vectors print 'in linalg.eig time=', (time.time() - startTime) eigenvals = linalg.eigvals(equations_matrix, RHS, overwrite_a=True) # Save output eigarray = vstack((real(eigenvals), imag(eigenvals))).T #remove nans and infs from eigenvalues eigarray = eigarray[~isnan(eigarray).any(1), :] eigarray = eigarray[~isinf(eigarray).any(1), :] savetxt( 'ev-k{k}{file}-redn{Nselect}.dat'.format(k=k, file=filename[:-7], Nselect=Nselect), eigarray) #stop the clock print 'done in', (time.time() - startTime)
def test_simple_complex(self): a = [[1, 2, 3], [1, 2, 3], [2, 5, 6 + 1j]] w = eigvals(a) exact_w = [(9 + 1j + sqrt(92 + 6j)) / 2, 0, (9 + 1j - sqrt(92 + 6j)) / 2] assert_array_almost_equal(w, exact_w)
def h_infinity_norm(ss, **kwargs): r""" Returns H-infinity norm of a linear system using iterative methods. The H-infinity norm of a MIMO system is traditionally calculated finding the largest SVD of the transfer function evaluated across the entire frequency spectrum. That can prove costly for a large number of evaluations, hence the iterative methods of [1] are employed. In the case of a SISO system the H-infinity norm corresponds to the maximum frequency gain. A scalar value is returned if the system is stable. If the system is unstable it returns ``np.Inf``. References: [1] Bruinsma, N. A., & Steinbuch, M. (1990). A fast algorithm to compute the H∞-norm of a transfer function matrix. Systems and Control Letters, 14(4), 287–293. https://doi.org/10.1016/0167-6911(90)90049-Z Args: ss (sharpy.linear.src.libss.ss): Multi input multi output system. **kwargs: Key-word arguments. Keyword Args: tol (float (optional)): Tolerance. Defaults to ``1e-7``. tol_imag_eigs (float (optional)): Tolerance to find purely imaginary eigenvalues. Defaults to ``1e-7``. iter_max (int (optional)): Maximum number of iterations. print_info (bool (optional)): Print status and information. Defaults to ``False``. Returns: float: H-infinity norm of the system. """ tol = kwargs.get('tol', 1e-7) iter_max = kwargs.get('iter_max', 10) print_info = kwargs.get('print_info', False) # tolerance to find purely imaginary eigenvalues i.e those with Re(eig) < tol_imag_eigs tol_imag_eigs = kwargs.get('tol_imag_eigs', 1e-7) if ss.dt is not None: ss = libss.disc2cont(ss) # 1) Compute eigenvalues of original system eigs = sclalg.eigvals(ss.A) if any(eigs.real > tol_imag_eigs): if print_info: try: cout.cout_wrap('System is unstable - H-inf = np.inf') except ValueError: print('System is unstable - H-inf = np.inf') return np.inf # 2) Find eigenvalue that maximises equation. If all real pick largest eig if np.max(np.abs(eigs.imag) < tol_imag_eigs): eig_m = np.max(eigs.real) else: eig_m, _ = max_eigs(eigs) # 3) Choose best option for gamma_lb max_steady_state = np.max( sclalg.svd(ss.transfer_function_evaluation(0), compute_uv=False)) max_eig_m = np.max( sclalg.svd(ss.transfer_function_evaluation(1j * np.abs(eig_m)), compute_uv=False)) max_d = np.max(sclalg.svd(ss.D, compute_uv=False)) gamma_lb = max(max_steady_state, max_eig_m, max_d) iter_num = 0 if print_info: try: cout.cout_wrap( 'Calculating H-inf norm\n{0:>4s} ::::: {1:^8s}'.format( 'Iter', 'Hinf')) except ValueError: print('Calculating H_inf norm\n{0:>4s} ::::: {1:^8s}'.format( 'Iter', 'Hinf')) while iter_num < iter_max: if print_info: try: cout.cout_wrap('{0:>4g} ::::: {1:>8.2e}'.format( iter_num, gamma_lb)) except ValueError: print('{0:>4g} ::::: {1:>8.2e}'.format(iter_num, gamma_lb)) gamma = (1 + 2 * tol) * gamma_lb # 4) compute hamiltonian and eigenvalues ham = hamiltonian(gamma, ss) eigs = sclalg.eigvals(ham) # If eigenvalues all eigenvalues are purely imaginary if any(np.abs(eigs.real) < tol_imag_eigs): # Select imaginary eigenvalues and those with positive values condition_imag = (np.abs(eigs.real) < tol_imag_eigs) * eigs.imag > 0 imag_eigs = eigs[condition_imag].imag # Sort them in decreasing order order = np.argsort(imag_eigs)[::-1] imag_eigs = imag_eigs[order] if len(imag_eigs) == 1: m = imag_eigs[0] svdmax = np.max( sclalg.svd(ss.transfer_function_evaluation(1j * m), compute_uv=False)) gamma_lb = svdmax else: m_list = [ 0.5 * (imag_eigs[i] + imag_eigs[i + 1]) for i in range(len(imag_eigs) - 1) ] svdmax = [ np.max( sclalg.svd(ss.transfer_function_evaluation(1j * m), compute_uv=False)) for m in m_list ] gamma_lb = max(svdmax) else: gamma_ub = gamma break iter_num += 1 if iter_num == iter_max: raise np.linalg.LinAlgError( 'Unconverged H-inf solution after %g iterations' % iter_num) hinf = 0.5 * (gamma_lb + gamma_ub) return hinf
def SRLS(anchors, w, r2, rescale=False, z=None, print_out=False): '''Squared range least squares (SRLS) Algorithm written by A.Beck, P.Stoica in "Approximate and Exact solutions of Source Localization Problems". :param anchors: anchor points (Nxd) :param w: weights for the measurements (Nx1) :param r2: squared distances from anchors to point x. (Nx1) :param rescale: Optional parameter. When set to True, the algorithm will also identify if there is a global scaling of the measurements. Such a situation arise for example when the measurement units of the distance is unknown and different from that of the anchors locations (e.g. anchors are in meters, distance in centimeters). :param z: Optional parameter. Use to fix the z-coordinate of localized point. :param print_out: Optional parameter, prints extra information. :return: estimated position of point x. ''' def y_hat(_lambda): lhs = ATA + _lambda * D assert A.shape[0] == b.shape[0] assert A.shape[1] == f.shape[0], 'A {}, f {}'.format(A.shape, f.shape) rhs = (np.dot(A.T, b) - _lambda * f).reshape((-1, )) assert lhs.shape[0] == rhs.shape[0], 'lhs {}, rhs {}'.format( lhs.shape, rhs.shape) try: return np.linalg.solve(lhs, rhs) except: return np.zeros((lhs.shape[1], )) def phi(_lambda): yhat = y_hat(_lambda).reshape((-1, 1)) sol = np.dot(yhat.T, np.dot(D, yhat)) + 2 * np.dot(f.T, yhat) return sol.flatten() def phi_prime(_lambda): # TODO: test this. B = np.linalg.inv(ATA + _lambda * D) C = A.T.dot(b) - _lambda * f y_prime = -B.dot(D.dot(B.dot(C)) - f) y = y_hat(_lambda) return 2 * y.T.dot(D).dot(y_prime) + 2 * f.T.dot(y_prime) from scipy import optimize from scipy.linalg import sqrtm n, d = anchors.shape assert r2.shape[1] == 1 and r2.shape[0] == n, 'r2 has to be of shape Nx1' assert w.shape[1] == 1 and w.shape[0] == n, 'w has to be of shape Nx1' if z is not None: assert d == 3, 'Dimension of problem has to be 3 for fixing z.' if rescale and z is not None: raise NotImplementedError('Cannot run rescale for fixed z.') if rescale and n < d + 2: raise ValueError( 'A minimum of d + 2 ranges are necessary for rescaled ranging.') elif n < d + 1 and z is None: raise ValueError( 'A minimum of d + 1 ranges are necessary for ranging.') elif n < d: raise ValueError('A minimum of d ranges are necessary for ranging.') Sigma = np.diagflat(np.power(w, 0.5)) if rescale: A = np.c_[-2 * anchors, np.ones((n, 1)), -r2] else: if z is None: A = np.c_[-2 * anchors, np.ones((n, 1))] else: A = np.c_[-2 * anchors[:, :2], np.ones((n, 1))] A = Sigma.dot(A) if rescale: b = -np.power(np.linalg.norm(anchors, axis=1), 2).reshape(r2.shape) else: b = r2 - np.power(np.linalg.norm(anchors, axis=1), 2).reshape(r2.shape) if z is not None: b = b + 2 * anchors[:, 2].reshape((-1, 1)) * z - z**2 b = Sigma.dot(b) ATA = np.dot(A.T, A) if rescale: D = np.zeros((d + 2, d + 2)) D[:d, :d] = np.eye(d) else: if z is None: D = np.zeros((d + 1, d + 1)) else: D = np.zeros((d, d)) D[:-1, :-1] = np.eye(D.shape[0] - 1) if rescale: f = np.c_[np.zeros((1, d)), -0.5, 0.].T elif z is None: f = np.c_[np.zeros((1, d)), -0.5].T else: f = np.c_[np.zeros((1, 2)), -0.5].T eig = np.sort(np.real(eigvalsh(a=D, b=ATA))) if (print_out): print('ATA:', ATA) print('rank:', np.linalg.matrix_rank(A)) print('eigvals:', eigvals(ATA)) print('condition number:', np.linalg.cond(ATA)) print('generalized eigenvalues:', eig) #eps = 0.01 if eig[-1] > 1e-10: lower_bound = -1.0 / eig[-1] else: print('Warning: biggest eigenvalue is zero!') lower_bound = -1e-5 inf = 1e5 xtol = 1e-12 lambda_opt = 0 # We will look for the zero of phi between lower_bound and inf. # Therefore, the two have to be of different signs. if (phi(lower_bound) > 0) and (phi(inf) < 0): # brentq is considered the best rootfinding routine. try: lambda_opt = optimize.brentq(phi, lower_bound, inf, xtol=xtol) except: print( 'SRLS error: brentq did not converge even though we found an estimate for lower and upper bonud. Setting lambda to 0.' ) else: try: lambda_opt = optimize.newton(phi, lower_bound, fprime=phi_prime, maxiter=1000, tol=xtol, verbose=True) assert phi( lambda_opt ) < xtol, 'did not find solution of phi(lambda)=0:={}'.format( phi(lambda_opt)) except: print('SRLS error: newton did not converge. Setting lambda to 0.') if (print_out): print('phi(lower_bound)', phi(lower_bound)) print('phi(inf)', phi(inf)) print('phi(lambda_opt)', phi(lambda_opt)) pos_definite = ATA + lambda_opt * D eig = np.sort(np.real(eigvals(pos_definite))) print('should be strictly bigger than 0:', eig) # Compute best estimate yhat = y_hat(lambda_opt) if print_out and rescale: print('Scaling factor :', yhat[-1]) if rescale: return yhat[:d], yhat[-1] elif z is None: return yhat[:d] else: return np.r_[yhat[0], yhat[1], z]
def _compute_tfinal_and_dt(sys, is_step=True): """ Helper function to estimate a final time and a sampling period for time domain simulations. It is essentially geared towards impulse response but is also used for step responses. For discrete-time models, obviously dt is inherent and only tfinal is computed. Parameters ---------- sys : {State, Transfer} The system to be investigated is_step : bool Scales the dc value by the magnitude of the nonzero mode since integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ. Default is True. Returns ------- tfinal : float The final time instance for which the simulation will be performed. dt : float The estimated sampling period for the simulation. Notes ----- Just by evaluating the fastest mode for dt and slowest for tfinal often leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000])) since dt will be very small and tfinal will be too large though the fast mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0] and the simulation would be unnecessarily long and the plot is virtually an L shape since the decay is so fast. Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR can be used such that only the modes that have significant effect on the time response are taken. But the sensitivity of the eigenvalues complicate the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work with simple poles with this formulation. See Golub, Van Loan Section 7.2.2 for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of the response is dependent on the size of the eigenshapes rather than the eigenvalues themselves. """ sqrt_eps = np.sqrt(np.spacing(1.)) min_points = 100 # min number of points min_points_z = 20 # min number of points max_points = 10000 # max number of points max_points_z = 75000 # max number of points for discrete models default_tfinal = 5 # Default simulation horizon total_cycles = 5 # number of cycles for oscillating modes pts_per_cycle = 25 # Number of points divide a period of oscillation log_decay_percent = np.log(100) # Factor of reduction for real pole decays # if a static model is given, don't bother with checks if sys._isgain: if sys._isdiscrete: return sys._dt * min_points_z, sys._dt else: return default_tfinal, default_tfinal / min_points if sys._isdiscrete: # System already has sampling fixed hence we can't fall into the same # trap mentioned above. Just get nonintegrating slow modes together # with the damping. dt = sys._dt tfinal = default_tfinal p = eigvals(sys.a) # Array Masks # unstable m_u = (np.abs(p) >= 1 + sqrt_eps) p_u, p = p[m_u], p[~m_u] if p_u.size > 0: m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps) t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u]) / dt)) tfinal = max(tfinal, t_emp) # zero - negligible effect on tfinal m_z = np.abs(p) < sqrt_eps p = p[~m_z] # Negative reals- treated as oscillary mode m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps) p_nr, p = p[m_nr], p[~m_nr] if p_nr.size > 0: t_emp = np.max(log_decay_percent / np.abs( (np.log(p_nr) / dt).real)) tfinal = max(tfinal, t_emp) # discrete integrators m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps) p_int, p = p[m_int], p[~m_int] # pure oscillatory modes m_w = (np.abs(np.abs(p) - 1) < sqrt_eps) p_w, p = p[m_w], p[~m_w] if p_w.size > 0: t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w) / dt).min() tfinal = max(tfinal, t_emp) if p.size > 0: t_emp = log_decay_percent / np.abs((np.log(p) / dt).real).min() tfinal = max(tfinal, t_emp) if p_int.size > 0: tfinal = tfinal * 5 # Make tfinal an integer multiple of dt num_samples = tfinal // dt if num_samples > max_points_z: tfinal = dt * max_points_z else: tfinal = dt * num_samples return tfinal, dt # Improve conditioning via balancing and zeroing tiny entries # See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance b, (sca, perm) = matrix_balance(sys.a, separate=True) p, l, r = eig(b, left=True, right=True) # Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12) # G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12) eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real)) eig_sens = minimum(1e12, eig_sens) # Tolerances p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0. # Incorporate balancing to outer factors l[perm, :] *= reciprocal(sca)[:, None] r[perm, :] *= sca[:, None] w, v = sys.c @ r, l.T.conj() @ sys.b origin = False # Computing the "size" of the response of each simple mode wn = np.abs(p) if np.any(wn == 0.): origin = True dc = zeros_like(p, dtype=float) # well-conditioned nonzero poles, np.abs just in case ok = np.abs(eig_sens) <= 1 / sqrt_eps # the averaged t→∞ response of each simple λ on each i/o channel # See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is # R/L eigenvector dependent) dc[ok] = norm(v[ok, :], axis=1) * norm(w[:, ok], axis=0) * eig_sens[ok] dc[wn != 0.] /= wn[wn != 0] if is_step else 1. dc[wn == 0.] = 0. # double the oscillating mode magnitude for the conjugate dc[p.imag != 0.] *= 2 # Now get rid of noncontributing integrators and simple modes if any relevance = (dc > 0.1 * dc.max()) | ~ok psub = p[relevance] wnsub = wn[relevance] tfinal, dt = [], [] ints = wnsub == 0. iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps) # Pure imaginary? if np.any(iw): tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist() dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist() # The rest ~ts = log(%ss value) / exp(Re(λ)t) texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real) tfinal += texp_mode.tolist() dt += minimum(texp_mode / 50, (2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist() # All integrators? if len(tfinal) == 0: return default_tfinal * 5, default_tfinal * 5 / min_points tfinal = np.max(tfinal) * (5 if origin else 1) dt = np.min(dt) dt = tfinal / max_points if tfinal // dt > max_points else dt tfinal = dt * min_points if tfinal // dt < min_points else tfinal return tfinal, dt
if len(V[i]) == n-1: return True return False print(testH(exemple1, 7)) print(testH(exemple2, 5)) print(testH(exemple3, 4)) # ##Partie 3 # In[26]: import scipy.linalg as lg print(lg.eigvals(A1)) # ##Partie 6 # In[28]: exemple4 = [[1,4], [0,2], [1,3], [2,4], [0,3]] # In[34]: def graphe(p): V = [[1, 4], [0, 2], [1, 3], [2, 4], [0, 3]] n = 5 for k in range(p):
def test_simple(self): a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] w = eigvals(a) exact_w = [(9 + sqrt(93)) / 2, 0, (9 - sqrt(93)) / 2] assert_array_almost_equal(w, exact_w)
# === Define the prior density === # Sigma = [[0.9, 0.3], [0.3, 0.9]] Sigma = np.array(Sigma) x_hat = np.array([8, 8]) # === Initialize the Kalman filter === # kn = Kalman(A, G, Q, R) kn.set_state(x_hat, Sigma) # === Set the true initial value of the state === # x = np.zeros(2) # == Print eigenvalues of A == # print("Eigenvalues of A:") print(eigvals(A)) # == Print stationary Sigma == # S, K = kn.stationary_values() print("Stationary prediction error variance:") print(S) # === Generate the plot === # T = 50 e1 = np.empty(T) e2 = np.empty(T) for t in range(T): # == Generate signal and update prediction == # y = multivariate_normal(mean=np.dot(G, x), cov=R) kn.update(y) # == Update state and record error == #
# Initialize system parameters and matrices f = 1.0 # Spring Constant m1 = 0.4 m2 = 1.0 F = lambda q: np.array([[2 * f, -f, 0, -f * np.exp(-1j * q)], [ -f, 2 * f, -f, 0 ], [0, -f, 2 * f, -f], [-f * np.exp(1j * q), 0, -f, 2 * f]]) M = np.diag([m1, m1, m1, m2]) # Set-up eigenvalue matrix eig_mat = lambda q: inv(M).dot(F(q)) # Plot eigenvalues (normal mode frequencies) as a function of wavenumber x_axis = np.arange(-np.pi, np.pi, np.pi / 50) eigenlist = [eigvals(eig_mat(x)) for x in x_axis] eigenlist_2 = np.sqrt(np.abs(eigenlist)) # Figure 2.12: Frequencies of the four eigenmodes of the linear chain. plt.figure(1) plt.plot(x_axis, eigenlist_2, "k.") plt.xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi], [r"$-\pi$", r"$-\frac{\pi}{2}$", "0", r"$\frac{\pi}{2}$", r"$\pi$"]) plt.xlabel('q', size='xx-large') plt.ylabel(r'$\omega$', size='xx-large') plt.show() # Print eigenvectors for q = 0 eigen_sys = eig(eig_mat(0.0)) omega = np.sqrt(np.abs(eigen_sys[0])) # Normal Mode Frequencies eigen_vecs = np.abs(eigen_sys[1]) # Eigenvectors
def get_ergodic(F: np.ndarray, Q: np.ndarray, B: np.ndarray=None, x_0: np.ndarray=None, force_diffuse: List[bool]=None, is_warning: bool=True) -> Tuple[np.ndarray, np.ndarray]: """ Calculate initial state covariance matrix, and identify diffuse state. It effectively solves a Lyapuov equation Parameters: ---------- F : state transition matrix Q : initial error covariance matrix B : regression matrix x_0 : initial x, used for calculating ergodic mean force_diffuse : List of booleans of user-determined diffuse state is_warning : whether to show warning message Returns: ---------- P_0 : the initial state covariance matrix, np.inf for diffuse state xi_0 : the initial state mean, 0 for diffuse state """ Q_ = Q.copy() dim = Q.shape[0] # Is is_diffuse is not supplied, create the list if force_diffuse is None: is_diffuse = np.zeros(dim, dtype=np.bool) else: is_diffuse = deepcopy(force_diffuse) if len(is_diffuse) != dim: raise ValueError('is_diffuse has wrong size') # Check F and Q if F.shape[0] != F.shape[1]: raise TypeError('F must be a square matrix') if Q.shape[0] != Q.shape[1]: raise TypeError('Q must be a square matrix') if F.shape[0] != Q.shape[0]: raise TypeError('Q and F must be of same size') # If explosive roots, use fully diffuse initialization # and issue a warning eig = linalg.eigvals(F) if np.any(np.abs(eig) > 1): if is_warning: warnings.warn('Ft contains explosive roots. Assumptions ' + \ 'of marginal LL correction may be violated, and ' + \ 'results may be biased or inconsistent. Please provide ' + \ 'user-defined xi_1_0 and P_1_0.', RuntimeWarning) is_diffuse_explosive = get_explosive_diffuse(F) is_diffuse = is_diffuse | is_diffuse_explosive # Modify Q_ to reflect diffuse states Q_ = mask_nan(is_diffuse, Q_, diag=inf_val) # Calculate raw P_0 with warnings.catch_warnings(): warnings.simplefilter("ignore") P_0 = lyap(F, Q_, 'bilinear') # Clean up P_0 for i in range(dim): if np.abs(P_0[i][i]) > max_val: is_diffuse[i] = True P_0 = mask_nan(is_diffuse, P_0, diag=0) # Enforce PSD P_0_PSD = get_nearest_PSD(P_0) # Add nan to diffuse diagonal values P_0_PSD += np.diag(np.array([np.nan if i else 0 for i in is_diffuse])) # Compute ergodic mean if B is None or x_0 is None: Bx = np.zeros([dim, 1]) else: if B.shape[0] != dim: raise ValueError('B has the wrong dimension') Bx = B.dot(x_0) Bx[is_diffuse] = 0 F_star = F.copy() F_star[is_diffuse] = 0 xi_0 = inv(np.eye(dim) - F_star).dot(Bx) return P_0_PSD, xi_0
def find_zeros_and_poles_(self, eps_reg=1e-15, eps_stop=1e-10, P_estimate=0, pprint=False): """ Find the zeros and poles of a complex function (C->C) inside a closed curve. input: -fun: callable, the function of which the poles have to be found -dfun: callable, the derivative of the function -contours: list of callables, the contours (R->C) that constitute a closed curve in the complex plane -dcontours: list of callables, the derivatives (R->C) of the respective contours -t_params: list of arrays, the parametrizations of the contours !!! Hard to get stopping right !!! """ # total multiplicity of zeros (number of zeros * order) pol_unity = monicPolynomial([]) p_unity = pol_unity.f_polynomial() s0 = int(round(self.inner_prod(p_unity, p_unity).real)) N = s0 + 2 * P_estimate if pprint: print('N =', N) # list of FOPs phis = [] pols = [] phis.append(p_unity) pols.append(pol_unity) # phi_0 # if s0 is zero if s0 == 0: n = 0 # arithmetic mean of nodes is zero mu = 0. pol = monicPolynomial([0.]) phis.append(pol.f_polynomial()) pols.append(pol) # phi_1 zeros = np.array([]) r = 0 t = 1 else: # compute arithmetic mean of nodes p_aux = monicPolynomial([0.]).f_polynomial() mu = self.inner_prod(p_unity, p_aux) / N if pprint: print('mu =', mu) # append first polynomial pol = monicPolynomial([-mu]) phis.append(pol.f_polynomial()) pols.append(pol) # phi_1 # if the algorithm quits after the first zero, it is mu zeros = np.array([mu]) n = 1 # initialization r = 1 t = 0 while r + t < N: if pprint: print('1.') # naive criterion to check if phi_r+t+1 is regular prod_aux, maxpsum = self.inner_prod(phis[-1], phis[-1], compute_maxpsum=True) # compute eigenvalues of the next pencil G, G1 = self.generalized_hankel_matrices(phis, pols) eigv = la.eigvals(G1, G) # check if these eigenvalues lie within the contour if self.points_within_contour(eigv + mu): # if np.abs(prod_aux)/maxpsum > eps_reg: if pprint: print('1.1') # compute next FOP in the regular way pol = monicPolynomial(eigv, coef_type='zeros') phis.append(pol.f_polynomial()) pols.append(pol) r += t + 1 t = 0 allsmall = True tau = 0 while allsmall and (r + tau) < N: if pprint: print('1.1.1') # if all further inner porducts are zero taupol = npol.polyfromroots([mu for _ in range(tau)]) tauphi = monicPolynomial( npol.polymul(taupol, pols[-1].coef), coef_type='normal').f_polynomial() ip, maxpsum = self.inner_prod(tauphi, phis[r], compute_maxpsum=True) tau += 1 if np.abs(ip) / maxpsum > eps_stop: allsmall = False if allsmall: if pprint: print('1.1.2: STOP') n = r zeros = eigv + mu t = N # STOP else: if pprint: print('1.2') c = npol.polyfromroots([mu]) pol = monicPolynomial(npol.polymul(c, pols[-1].coef), coef_type='normal') pols.append(pol) phis.append(pol.f_polynomial()) t += 1 # check multiplicities # constuct vandermonde system A = np.vander(zeros).T[::-1, :] b = np.array( [self.inner_prod(p_unity, lambda x, k=k: x**k) for k in range(n)]) # solve the Vandermonde system nu = la.solve(A, b) nu = np.round(nu).real.astype(int) # for printing result if pprint: print('>>> Result of computation of zeros <<<') print('number of zeros = ', n - len(np.where(nu == 0)[0])) for i, zero in enumerate(zeros): print('zero #' + str(i + 1) + ' = ' + str(zero) + ' (multiplicity = ' + str(nu[i]) + ')') print('') # eliminate spurious zeros (the ones with zero multiplicity) inds = np.where(nu > 0)[0] return zeros[inds], n, nu[inds]
def ft(theta: np.ndarray, f: Callable, T: int, x_0: np.ndarray=None, xi_1_0: np.ndarray=None, P_1_0: np.ndarray=None, force_diffuse: List[bool]=None, is_warning: bool=True, const_M_type: str='simple') -> Dict: """ Duplicate arrays in M = f(theta) and generate list of Mt Output of f(theta) must contain all the required keys. Parameters: ---------- theta : input of f(theta). Underlying parameters to be optimized f : obtained from get_f. Mapping theta to M T : length of Mt. "Duplicate" M for T times x_0 : establish initial state mean xi_1_0 : specify initial state mean. override calculated mean P_1_0 : initial state cov force_diffuse : use-defined diffuse state is_warning : whether to display the warning about explosive roots const_M_type : type of list of constant matrices, default as 'simple' Returns: ---------- Mt : system matrices for a BSTS. Should contain all the required keywords. """ M = f(theta) # If a value is close to 0 or inf, set them to 0 or inf # If an array is 1D, convert it to 2D for key in M.keys(): if M[key].ndim < 2: M[key] = M[key].reshape(1, -1) M[key] = clean_matrix(M[key]) # Check validity of M required_keys = set(['F', 'H', 'Q', 'R']) M_keys = set(M.keys()) if len(required_keys - M_keys) > 0: raise ValueError('f does not have required outputs: {}'. format(required_keys - M_keys)) # Check dimensions of M for key in M_keys: if len(M[key].shape) < 2: raise TypeError('System matrices must be 2D') # Check PSD of R and Q if np.array_equal(M['Q'], M['Q'].T): eig_Q = linalg.eigvals(M['Q']) if not np.all(eig_Q >= 0): raise ValueError('Q is not semi-PSD') else: raise ValueError('Q is not symmetric') if np.array_equal(M['R'], M['R'].T): eig_R = linalg.eigvals(M['R']) if not np.all(eig_R >= 0): raise ValueError('R is not semi-PSD') else: raise ValueError('R is not symmetric') # Generate ft for required keys Ft = build_tensor(M['F'], T) Ht = build_tensor(M['H'], T) Qt = build_tensor(M['Q'], T) Rt = build_tensor(M['R'], T) # Set Bt if Bt is not Given if 'B' not in M_keys: dim_xi = M['F'].shape[0] if 'D' not in M_keys: M.update({'B': np.zeros((dim_xi, 1))}) else: dim_x = M['D'].shape[1] M.update({'B': np.zeros((dim_xi, dim_x))}) if 'D' not in M_keys: dim_x = M['B'].shape[1] # B is already defined dim_y = M['H'].shape[0] M.update({'D': np.zeros((dim_y, dim_x))}) # Get Bt and Dt for ft Bt = build_tensor(M['B'], T) Dt = build_tensor(M['D'], T) # Initialization if P_1_0 is None or xi_1_0 is None: P_1_0, xi_1_0 = get_ergodic(M['F'], M['Q'], M['B'], x_0=x_0, force_diffuse=force_diffuse, is_warning=is_warning) Mt = {'Ft': Ft, 'Bt': Bt, 'Ht': Ht, 'Dt': Dt, 'Qt': Qt, 'Rt': Rt, 'xi_1_0': xi_1_0, 'P_1_0': P_1_0} return Mt
def _numeric_nullspace_dim(mat): """Numerically computes the nullspace dimension of a matrix.""" mat_numeric = np.array(mat.evalf().tolist(), dtype=complex) eigenvals = la.eigvals(mat_numeric) return np.sum(np.isclose(eigenvals, np.zeros_like(eigenvals)))
m = np.mat('0 1 2; 1 0 3; 4 -3 8') print(sla.det(m)) # scipy.linalg print(nla.det(m)) # numpy.linalg # 7.8.2 求解逆矩阵 m = np.mat('0 1 2; 1 0 3; 4 -3 8') print(m.I) # 矩阵对象的逆矩阵属性 print(m * m.I) # 矩阵和其逆矩阵的乘积为单位矩阵 print(sla.inv(m)) # scipy.linalg print(nla.inv(m)) # numpy.linalg # 7.8.3 计算特征向量和特征值 A = np.mat('0 1 2; 1 0 3; 4 -3 8') # 生成3阶矩阵 print(sla.eigvals(A)) # 返回3个特征值 print(sla.eig(A)) # 返回3个特征值和3个特征向量组成的元组 print(nla.eigvals(A)) # 返回3个特征值 print(nla.eig(A)) # 返回3个特征值和3个特征向量组成的元组 # 7.8.4 矩阵的奇异值分解 A = np.mat(np.random.randint(0, 10, (3, 4))) print(A) u, s, v = sla.svd(A) print(u.shape, s.shape, v.shape) print(u) print(s) print(v)
def crDist(cr0, cr1): eigv = eigh.eigvals(cr0, cr1) sum = 0 for val in eigv: sum += pow(math.log(abs(val), math.e), 2) return math.sqrt(sum)