def riccati(c_yy, x, y,t): ''' Description: Solves the Riccati equation, specifically AOA^T - O + Q = 0 Parameters: c_yy: a whitened covariance matrix, used in calculation of Q x,y: matrices that are used to calculate koopman estimators t: timelaga Returns: The matrix, O, solved in the above equation. ''' # c_xx, c_yy = whitening(x,y) K_xx, K_xy = koopman_est(x, y ,t) evals_xx,evect_xx = np.linalg.eigh(K_xx) #hermetian # evals_xy,evect_xy = np.linalg.eigh(K_xy) print ('\n', evals_xx, '\n \n', evect_xx) v_x = evect_xx[:,list(evals_xx).index(1)] try: v_y = np.linalg.solve(K_xy, v_x) # I think this v_y is extraneous, there isn't v_y used anywhere except: raise Error('cannot find eigenvalue = 1') chi_bar = np.mean(chi(x).T, axis =1) gamma_bar = np.mean(gamma(y).T, axis =1) A = K_xx - np.outer(v_x*chi_bar) B = K_xy - np.outer(v_x*gamma_bar) Q = B.dot(np.inverse(c_yy)).dot(B.T) return solve_discrete_lyapunov(A, Q)
def ComputeG(A0, A1, d, Q0, tau0, beta, mu): ''' COMPUTEG Compute government income given mu and return tax revenues, and Policy matrixes for the planner. Here A0,A1,d,Q0,tau0,beta and mu are all asssumed to be floats ''' #Create Matrixes R = array([[0, -A0 / 2, 0, 0], [-A0 / 2, A1 / 2, -mu / 2, 0], [0, -mu / 2, 0, 0], [0, 0, 0, d / 2]]) A = array([[1, 0, 0, 0], [0, 1, 0, 1], [0, 0, 0, 0], [-A0 / d, A1 / d, 0, A1 / d + 1 / beta]]) B = array([0, 0, 1, 1 / d]).reshape(-1, 1) Q = 0 #OLRP to solve the Ramsey Problem. F, P = olrp(beta, A, B, -R, Q) #Need y_0 to compute government tax revenue. P21 = P[3, :3] P22 = P[3, 3] z0 = array([1, Q0, tau0]).reshape(-1, 1) u0 = -P22**(-1) * P21.dot(z0) y0 = vstack([z0, u0]) #Define A_F and S matricies AF = A - B.dot(F) S = array([0, 1, 0, 0]).reshape(-1, 1).dot(array([0, 0, 1, 0]).reshape(1, -1)) #Solves equation (25) #Omega = solve_sylvester(beta*AF.T,-linalg.inv(AF),-beta*AF.T.dot(S)) Omega = solve_discrete_lyapunov( sqrt(beta) * AF.T, beta * AF.T.dot(S).dot(AF)) T0 = y0.T.dot(Omega).dot(y0) return T0, A, B, F, P
def ComputeG( A0,A1,d,Q0,tau0,beta,mu ): ''' COMPUTEG Compute government income given mu and return tax revenues, and Policy matrixes for the planner. Here A0,A1,d,Q0,tau0,beta and mu are all asssumed to be floats ''' #Create Matrixes R = array([[0,-A0/2,0,0],[-A0/2,A1/2, -mu/2, 0],[0,-mu/2,0,0],[0,0,0,d/2]]) A = array([[1,0,0,0],[0,1,0,1],[0,0,0,0],[-A0/d,A1/d,0,A1/d+1/beta]]) B = array([0,0,1,1/d]).reshape(-1,1) Q = 0; #OLRP to solve the Ramsey Problem. F,P = olrp(beta,A,B,-R,Q) #Need y_0 to compute government tax revenue. P21 = P[3,:3]; P22 = P[3,3]; z0 = array([1,Q0,tau0]).reshape(-1,1) u0 = -P22**(-1)*P21.dot(z0) y0 = vstack([z0,u0]) #Define A_F and S matricies AF = A-B.dot(F); S =array([0,1,0,0]).reshape(-1,1).dot(array([0,0,1,0]).reshape(1,-1)) #Solves equation (25) #Omega = solve_sylvester(beta*AF.T,-linalg.inv(AF),-beta*AF.T.dot(S)) Omega = solve_discrete_lyapunov(sqrt(beta)*AF.T,beta*AF.T.dot(S).dot(AF)) T0 = y0.T.dot(Omega).dot(y0) return T0,A,B,F,P
def matrin_dist_for_LDS(lds1, lds2): # According to the formula (9) of the paper, the final matrix A is obtained, and the dimension is (2D, 2D) A = torch.zeros((lds1[0].shape[0] * 2, lds1[0].shape[1] * 2)) A[:lds1[0].shape[0], :lds1[0].shape[0]] = lds1[0] A[lds1[0].shape[0]:, lds1[0].shape[0]:] = lds2[0] # According to the formula (10) of the paper, the final matrix C is obtained, and the dimension is (D, 2D) C = torch.zeros((lds1[0].shape[0], lds1[0].shape[1] * 2)) C[:, :lds1[0].shape[0]] = lds1[1] C[:, lds1[0].shape[0]:] = lds2[1] # According to the paper, find the solution to the Lyapunov equation (A_t P A - P = -Q, where Q is C_t C) # First find Q in the Lyapunov equation, and then use the scipy package to solve to get P Q = torch.mm(C.transpose(0, 1), C) P = solve_discrete_lyapunov(A, Q) # According to the formula (8) of the paper, the P of the corresponding position is obtained P11 = P[:lds1[0].shape[0], :lds1[0].shape[0]] P12 = P[:lds1[0].shape[0], lds1[0].shape[0]:] P21 = P[lds1[0].shape[0]:, :lds1[0].shape[0]] P22 = P[lds1[0].shape[0]:, lds1[0].shape[0]:] # According to the formula (11) of the paper, the corresponding eigenvalues are obtained eig_P = np.matmul(np.linalg.inv(P11), P12) eig_P = np.matmul(eig_P, np.linalg.inv(P22)) eig_P = np.matmul(eig_P, P21) eig_P = np.linalg.eig(eig_P)[0] # According to the paper formula (12), the final distance is obtained return np.sqrt(-np.log(np.prod(eig_P**2)))
def check_stationary_initialization_2dim(dtype=float): endog = np.zeros(10, dtype=dtype) # 2-dimensional example mod = MLEModel(endog, k_states=2, k_posdef=2) mod.ssm.initialize_stationary() intercept = np.array([2.3, -10.2], dtype=dtype) phi = np.array([[0.8, 0.1], [-0.2, 0.7]], dtype=dtype) sigma2 = np.array([[1.4, -0.2], [-0.2, 4.5]], dtype=dtype) mod['state_intercept'] = intercept mod['transition'] = phi mod['selection'] = np.eye(2).astype(dtype) mod['state_cov'] = sigma2 mod.ssm._initialize_filter() mod.ssm._initialize_state() _statespace = mod.ssm._statespace initial_state = np.array(_statespace.initial_state) initial_state_cov = np.array(_statespace.initial_state_cov) desired = np.linalg.solve(np.eye(2).astype(dtype) - phi, intercept) assert_allclose(initial_state, desired) desired = solve_discrete_lyapunov(phi, sigma2) # precision reductions only required for single precision float / complex assert_allclose(initial_state_cov, desired, atol=1e-5)
def check_stationary_initialization_1dim(dtype=float): endog = np.zeros(10, dtype=dtype) # 1-dimensional example mod = MLEModel(endog, k_states=1, k_posdef=1) mod.ssm.initialize_stationary() intercept = np.array([2.3], dtype=dtype) phi = np.diag([0.9]).astype(dtype) sigma2 = np.diag([1.3]).astype(dtype) mod['state_intercept'] = intercept mod['transition'] = phi mod['selection'] = np.eye(1).astype(dtype) mod['state_cov'] = sigma2 mod.ssm._initialize_filter() mod.ssm._initialize_state() _statespace = mod.ssm._statespace initial_state = np.array(_statespace.initial_state) initial_state_cov = np.array(_statespace.initial_state_cov) # precision reductions only required for float complex case # mean = intercept + phi * mean # intercept = (1 - phi) * mean # mean = intercept / (1 - phi) assert_allclose(initial_state, intercept / (1 - phi[0, 0])) desired = np.linalg.inv(np.eye(1) - phi).dot(intercept) assert_allclose(initial_state, desired) # var = phi**2 var + sigma2 # var = sigma2 / (1 - phi**2) assert_allclose(initial_state_cov, sigma2 / (1 - phi**2)) assert_allclose(initial_state_cov, solve_discrete_lyapunov(phi, sigma2))
def __init__(self, Y, A0, A1, U0, U1, Q, Phi, initV='unconditional', X0=None, V0=None, statevar_names=None): self.Y, self.A0, self.A1, self.U0, self.U1, self.Q, self.Phi, self.initV, self.X0, self.V0, self.statevar_names = Y, A0, A1, U0, U1, \ Q, Phi, initV, X0, V0, statevar_names n = self.U0.size if self.X0 is None: # initialize X0 at unconditional mean self.X0 = np.mat(np.linalg.inv(np.identity(n) - self.U1) * self.U0) if self.V0 is None: if initV == 'steady_state': # solve discrete Ricatti equation self.V0 = np.mat( solve_discrete_are(np.array(self.U1.T), np.array(self.A1.T), np.array(self.Q), np.array(self.Phi.T))) elif initV == 'unconditional': # solve discrete Lyapunov equation self.V0 = np.mat( solve_discrete_lyapunov(np.array(self.U1.T), np.array(self.Q))) elif initV == 'identity': self.V0 = np.mat(np.identity(n)) if statevar_names is None: self.statevar_names = np.arange(len(self.X0)) Kalman.kalmanCount += 1
def gen_results(i): """ This function follows Zha to redraw coefficients from the regression and re-estimate the model parameters using those coefficients. Each of these draws has the ability to be weighted in relative importance by the marginal likelihood of X0 given the drawn VAR coefficients. Note that this function makes calls to np.block and la.solve_discrete_lyapunov, which as of yet are not compatible with numba's jit compiler. Input: i (int): Keeps track of the iteration number of the specific call. Used for random number generator seeding """ if current_process().pid % cpus == 0: pbar.update(cpus) # Track progress on one core only # Get coefficient draws [G, BB, mx] = MLEVARsim(n, T, b_hat0, Lam, dt, lags, cn, i + current_seed, noncinds) # Check if the matrix G is explosive; if so, discard the draw. Otherwise, proceed. if np.all(np.abs(la.eigvals(G)) <= 1): Sigma = la.solve_discrete_lyapunov( G, BB) # Written as Sigma_j in the paper # Check Sigma_j for invertibility conditions if np.linalg.cond(Sigma) <= cond_tol: return process_VAR(G, Sigma, num_vars, uc, mx, BB, X0) return 0, 0, 0, 0, 0, 0, False
def _H2P(A, B, analog): """Computes the positive-definite P matrix for determining the H2-norm.""" if analog: P = solve_lyapunov(A, -np.dot(B, B.T)) # AP + PA^T = -BB^T else: # Note: discretization is not performed for the user P = solve_discrete_lyapunov(A, np.dot(B, B.T)) # APA^T - P = -BB^T return P
def steady_state_cost(A, B, Q, R, w_covr, e_covr, K): AK = A + np.dot(B, K) QK = Q + mdot(K.T, R, K) # Steady-state covariance of the state Xss = sla.solve_discrete_lyapunov(AK, w_covr + mdot(B, e_covr, B.T)) # Steady-state cost css = np.trace(np.dot(QK, Xss)) return css
def checkdstable(A): n = len(A) P = solve_discrete_lyapunov(A.T, np.identity(n)) S = sqrtm(P) invS = np.linalg.inv(S) UB = S.dot(A).dot(invS) [U, B] = polar(UB) B = projectPSD(B, 0, 1) return P, S, U, B
def P_and_Pe_associated_to_K(self, K): if self.is_stable(K): cl_map = self.a_cl(K) P = LA.solve_discrete_lyapunov(cl_map.T, self.Q + K.T @ self.R @ K) distP = LA.norm(P - self.P_opt, 2) / LA.norm(self.P_opt, 2) else: P = 100.0 * np.eye(self.n) distP = float("inf") return P, distP
def log_lik(self, p0, y=None, return_filtered=False): yy = np.array(self.yy) TT, RR, QQ, DD, ZZ, HH = self.system_matrices(p0) EXPeffect_f = self.get_EXPeffect(p0) RQR = np.dot(np.dot(RR, QQ), RR.T) try: Pt = solve_discrete_lyapunov(TT, RQR) Pt = TT @ Pt @ TT.T + RQR except: return -1000000000.0 initial_distribution = np.array(self.initial_distribution(p0), dtype=float).squeeze() strans = self.transition(p0) sigrq = np.asarray(np.sqrt(self.conditional_variance(p0))).squeeze() epsr_m = self.conditional_mean(p0).squeeze() nobs = yy.shape[0] shocks = np.zeros(nobs) for t in range(1, nobs): shocks[t] = self.construct_shock(p0, yy[t], yy[t - 1]) epsr_m = np.atleast_1d(np.array(epsr_m)) try: res = _markov_switching_learning_lik( yy, TT, RR, QQ, DD.squeeze(), np.asarray(ZZ, dtype=float), HH, EXPeffect_f, Pt, self.obs_ind, self.shock_ind, np.asarray(np.atleast_1d(initial_distribution), dtype=float), np.asarray(np.atleast_2d(strans), dtype=float), self.shock_scaling, np.asarray(epsr_m, dtype=float), sigrq**2, shocks) except: res = -100000000.0, 0.0, 0.0 loglh, filtered_mean_mat, filtered_prob_mat = res if return_filtered: Ats = p.DataFrame(filtered_mean_mat, columns=self.state_names, index=self.yy.index) xts = p.DataFrame(filtered_prob_mat, columns=['pj%d' % d for d in range(nj)], index=self.yy.index) res = p.concat([Ats, xts], axis=1) return loglh, res else: if np.isnan(loglh): loglh = -10000000000 return loglh
def dlyap(A, Q): """ Solve the discrete-time Lyapunov equation. Wrapper around scipy.linalg.solve_discrete_lyapunov. Pass a copy of input matrices to protect them from modification. """ try: return solve_discrete_lyapunov(np.copy(A), np.copy(Q)) except ValueError: return np.full_like(Q, np.inf)
def update_secondary_params(self): if self.Q is not None: # Given QRS try: A_Eigs = linalg.eig(self.A)[0] except Exception as e: print('Error in eig ({})... Tying again!'.format(e)) A_Eigs = linalg.eig(self.A)[0] # Try again! isStable = np.max(np.abs(A_Eigs)) < 1 if isStable: self.XCov = linalg.solve_discrete_lyapunov(self.A, self.Q) self.G = self.A @ self.XCov @ self.C.T + self.S self.YCov = self.C @ self.XCov @ self.C.T + self.R self.YCov = (self.YCov + self.YCov.T) / 2 else: self.XCov = np.ones(self.A.shape) self.XCov[:] = np.nan self.YCov = np.ones(self.A.shape) self.XCov[:] = np.nan try: self.Pp = linalg.solve_discrete_are( self.A.T, self.C.T, self.Q, self.R, s=self.S) # Solves Katayama eq. 5.42a self.innovCov = self.C @ self.Pp @ self.C.T + self.R innovCovInv = np.linalg.pinv(self.innovCov) self.K = (self.A @ self.Pp @ self.C.T + self.S) @ innovCovInv self.Kf = self.Pp @ self.C.T @ innovCovInv self.Kv = self.S @ innovCovInv self.A_KC = self.A - self.K @ self.C except: print('Could not solve DARE') self.Pp = np.empty(self.A.shape) self.Pp[:] = np.nan self.K = np.empty((self.A.shape[0], self.R.shape[0])) self.K[:] = np.nan self.Kf = np.array(self.K) self.Kv = np.array(self.K) self.innovCov = np.empty(self.R.shape) self.innovCov[:] = np.nan self.A_KC = np.empty(self.A.shape) self.A_KC[:] = np.nan self.P2 = self.XCov - self.Pp # (should give the solvric solution) Proof: Katayama Theorem 5.3 and A.3 in pvo book elif self.K is not None: # Given K self.XCov = None self.G = None self.YCov = None self.Pp = None self.Kf = None self.Kv = None self.A_KC = self.A - self.K @ self.C self.P2 = None
def get_invariant_distribution(self, transition_matrix, noise_covariance): A = transition_matrix - np.eye(transition_matrix.shape[0]) B = np.zeros(transition_matrix.shape[0], dtype='float') print("get invariant distribution") print(transition_matrix) print(noise_covariance) s_0 = np.linalg.solve(A, B) p_0 = scipy.solve_discrete_lyapunov(transition_matrix, noise_covariance) return NormalVectorDistribution(s_0, np.matrix(p_0, dtype=float))
def _terminalObj(self): # estado terminal XN = self.X_pred[-1] # terminal state XdN = XN[self.nxs:self.nxs+self.nxd] Vt = 0 # Adição do custo terminal # Q terminal Q_lyap = [email protected]@[email protected]@self.sys.F Q_bar = solve_discrete_lyapunov(self.sys.F, Q_lyap, method='bilinear') Vt = csd.dot(XdN**2, csd.diag(Q_bar)) self.Q_bar = Q_bar return Vt
def test_global_stationary(): # Test for global approximate diffuse initialization # - 1-dimensional - endog = np.zeros(10) mod = sarimax.SARIMAX(endog, order=(1, 0, 0), trend='c') # no intercept intercept = 0 phi = 0.5 sigma2 = 2. mod.update(np.r_[intercept, phi, sigma2]) init = Initialization(mod.k_states, 'stationary') check_initialization(mod, init, [0], np.diag([0]), np.eye(1) * sigma2 / (1 - phi**2)) # intercept intercept = 1.2 phi = 0.5 sigma2 = 2. mod.update(np.r_[intercept, phi, sigma2]) init = Initialization(mod.k_states, 'stationary') check_initialization(mod, init, [intercept / (1 - phi)], np.diag([0]), np.eye(1) * sigma2 / (1 - phi**2)) # - n-dimensional - endog = np.zeros(10) mod = sarimax.SARIMAX(endog, order=(2, 0, 0), trend='c') # no intercept intercept = 0 phi = [0.5, -0.2] sigma2 = 2. mod.update(np.r_[intercept, phi, sigma2]) init = Initialization(mod.k_states, 'stationary') T = np.array([[0.5, 1], [-0.2, 0]]) Q = np.diag([sigma2, 0]) desired_cov = solve_discrete_lyapunov(T, Q) check_initialization(mod, init, [0, 0], np.diag([0, 0]), desired_cov) # intercept intercept = 1.2 phi = [0.5, -0.2] sigma2 = 2. mod.update(np.r_[intercept, phi, sigma2]) init = Initialization(mod.k_states, 'stationary') desired_intercept = np.linalg.inv(np.eye(2) - T).dot([intercept, 0]) check_initialization(mod, init, desired_intercept, np.diag([0, 0]), desired_cov)
def test_univariate(self): # Real case a = np.array([[0.5]]) q = np.array([[10.]]) actual = tools.solve_discrete_lyapunov(a, q) desired = solve_discrete_lyapunov(a, q) assert_allclose(actual, desired) # Complex case (where the Lyapunov equation is taken as a complex # function) a = np.array([[0.5+1j]]) q = np.array([[10.]]) actual = tools.solve_discrete_lyapunov(a, q) desired = solve_discrete_lyapunov(a, q) assert_allclose(actual, desired) # Complex case (where the Lyapunov equation is taken as a real # function) a = np.array([[0.5+1j]]) q = np.array([[10.]]) actual = tools.solve_discrete_lyapunov(a, q, complex_step=True) desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=True) assert_allclose(actual, desired)
def test_univariate(self): # Real case a = np.array([[0.5]]) q = np.array([[10.]]) actual = tools.solve_discrete_lyapunov(a, q) desired = solve_discrete_lyapunov(a, q) assert_allclose(actual, desired) # Complex case (where the Lyapunov equation is taken as a complex # function) a = np.array([[0.5 + 1j]]) q = np.array([[10.]]) actual = tools.solve_discrete_lyapunov(a, q) desired = solve_discrete_lyapunov(a, q) assert_allclose(actual, desired) # Complex case (where the Lyapunov equation is taken as a real # function) a = np.array([[0.5 + 1j]]) q = np.array([[10.]]) actual = tools.solve_discrete_lyapunov(a, q, complex_step=True) desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=True) assert_allclose(actual, desired)
def is_controllable(self): """Tests if the disc object is controllable. Returns: bool: Boolean, true if the disc configuration is controllable ndarray: Controllability gramiam from discrete Lyapunov equation """ if (self.ready()): if (self.B is not None): q = np.matmul(self.B,self.B.conj().T) x_c = linalg.solve_discrete_lyapunov(self.A.conj().T,q) controllable = (linalg.eigvals(x_c)>0).sum() == np.shape(self.A)[0] return [controllable,x_c] else: print("Please set B.")
def get_p_init_lyapunov(self, Q): pmat = self.precalc_mat[0] qmat = self.precalc_mat[1] F = np.vstack( (pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps])) E = np.vstack((pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:])) Q = E @ Q @ E.T p4 = sl.solve_discrete_lyapunov(F[self.dimp:, :], Q[self.dimp:, self.dimp:]) return F @ p4 @ F.T + Q
def __init__(self, Y, A0, A1, U0, U1, Q, Phi, initV='unconditional', X0 = None, V0 = None, statevar_names = None): self.Y, self.A0, self.A1, self.U0, self.U1, self.Q, self.Phi, self.initV, self.X0, self.V0, self.statevar_names = Y, A0, A1, U0, U1, \ Q, Phi, initV, X0, V0, statevar_names n = self.U0.size if self.X0 is None: # initialize X0 at unconditional mean self.X0 = np.mat(np.linalg.inv(np.identity(n) - self.U1)*self.U0) if self.V0 is None: if initV == 'steady_state': # solve discrete Ricatti equation self.V0 = np.mat(solve_discrete_are(np.array(self.U1.T), np.array(self.A1.T), np.array(self.Q), np.array(self.Phi.T))) elif initV == 'unconditional': # solve discrete Lyapunov equation self.V0 = np.mat(solve_discrete_lyapunov(np.array(self.U1.T), np.array(self.Q))) elif initV == 'identity': self.V0 = np.mat(np.identity(n)) if statevar_names is None: self.statevar_names = np.arange(len(self.X0)) Kalman.kalmanCount += 1
def controllability_gramian(self): """Controllability gramian matrix.""" from scipy import linalg B = self.B.evaluate() Q = -B @ B.T # Find Wc given A @ Wc + Wc @ A.T = Q # Wc > o if (A, B) controllable Wc = linalg.solve_discrete_lyapunov(self.A.evaluate(), Q) # Wc should be symmetric positive semi-definite Wc = (Wc + Wc.T) / 2 return Matrix(Wc)
def observability_gramian(self): """Observability gramian matrix.""" from scipy import linalg C = self.C.evaluate() Q = -C.T @ C # Find Wo given A.T @ Wo + Wo @ A = Q # Wo > o if (C, A) observable Wo = linalg.solve_discrete_lyapunov(self.A.evaluate().T, Q) # Wo should be symmetric positive semi-definite Wo = (Wo + Wo.T) / 2 return Matrix(Wo)
def initial_state(TT, RR, C, QQ): """ Compute the initial state x_0 and state covariance matrix P_0 """ Ns = TT.shape[0] e = np.linalg.eigvals(TT) if all(np.abs(e) < 1.0): x_0 = np.linalg.inv(np.eye(Ns) - TT) @ C P_0 = solve_discrete_lyapunov(TT, RR @ QQ @ RR.T) else: x_0 = C P_0 = 1e+6 * np.eye(Ns) return x_0, P_0
def is_observable(self): """Tests if the disc object is observable. Returns: bool: Boolean, true if the disc configuration is observable ndarray: Observability gramiam from discrete Lyapunov equation """ if (self.ready()): if (self.C is not None): q = np.matmul(self.C.conj().T,self.C) y_o = linalg.solve_discrete_lyapunov(self.A,q) y_o = y_o.conj().T observable = (linalg.eigvals(y_o)>0).sum() == np.shape(self.A)[0] return [observable,y_o] else: print("Please set C.")
def cost_inf_K(A,B,Q,R,K): ''' Arguments: State transition matrices (A,B) LQR Costs (Q,R) Control Gain K Outputs: cost: Infinite time horizon LQR cost of static gain K ''' cl_map = A+B.dot(K) if np.amax(np.abs(LA.eigvals(cl_map)))<(1.0-1.0e-6): cost = np.trace(LA.solve_discrete_lyapunov(cl_map.T,Q+np.dot(K.T,R.dot(K)))) else: cost = float("inf") return cost
def evaluate_F(self, F): """ Given a fixed policy F, with the interpretation u = -F x, this function computes the matrix P_F and constant d_F associated with discounted cost J_F(x) = x' P_F x + d_F. Parameters ========== F : array_like A self.k x self.n array Returns ======= P_F : array_like, dtype = float Matrix for discounted cost d_F : scalar Constant for discounted cost K_F : array_like, dtype = float Worst case policy O_F : array_like, dtype = float Matrix for discounted entropy o_F : scalar Constant for discounted entropy """ # == Simplify names == # Q, R, A, B, C = self.Q, self.R, self.A, self.B, self.C beta, theta = self.beta, self.theta # == Solve for policies and costs using agent 2's problem == # K_F, neg_P_F = self.F_to_K(F) P_F = - neg_P_F I = np.identity(self.j) H = inv(I - C.T.dot(P_F.dot(C)) / theta) d_F = log(det(H)) # == Compute O_F and o_F == # sig = -1.0 / theta AO = sqrt(beta) * (A - dot(B, F) + dot(C, K_F)) O_F = solve_discrete_lyapunov(AO.T, beta * dot(K_F.T, K_F)) ho = (trace(H - 1) - d_F) / 2.0 tr = trace(dot(O_F, C.dot(H.dot(C.T)))) o_F = (ho + beta * tr) / (1 - beta) return K_F, P_F, d_F, O_F, o_F
def evaluate_F(self, F): """ Given a fixed policy F, with the interpretation u = -F x, this function computes the matrix P_F and constant d_F associated with discounted cost J_F(x) = x' P_F x + d_F. Parameters ========== F : array_like A self.k x self.n array Returns ======= P_F : array_like, dtype = float Matrix for discounted cost d_F : scalar Constant for discounted cost K_F : array_like, dtype = float Worst case policy O_F : array_like, dtype = float Matrix for discounted entropy o_F : scalar Constant for discounted entropy """ # == Simplify names == # Q, R, A, B, C = self.Q, self.R, self.A, self.B, self.C beta, theta = self.beta, self.theta # == Solve for policies and costs using agent 2's problem == # K_F, neg_P_F = self.F_to_K(F) P_F = -neg_P_F I = np.identity(self.j) H = inv(I - C.T.dot(P_F.dot(C)) / theta) d_F = log(det(H)) # == Compute O_F and o_F == # sig = -1.0 / theta AO = sqrt(beta) * (A - dot(B, F) + dot(C, K_F)) O_F = solve_discrete_lyapunov(AO.T, beta * dot(K_F.T, K_F)) ho = (trace(H - 1) - d_F) / 2.0 tr = trace(dot(O_F, C.dot(H.dot(C.T)))) o_F = (ho + beta * tr) / (1 - beta) return K_F, P_F, d_F, O_F, o_F
def riccati(self): # if (1-self.epsilon) <= np.linalg.eigh(self.K_xx)[0][-1] <= 1 + self.epsilon: # self.v_x = np.linalg.eigh(self.K_xx)[1][-1] self.v_x = np.linalg.eigh(self.K_xx)[1][-1] # just setting the value of v_x to the largest evals evect # else: # raise # Something bad self.chi_bar = np.vstack(self.x_whitened).mean(0) # are these suppose to uphold K_xx.T(chi_bar) = chi_bar self.gamma_bar = np.vstack(self.y_whitened).mean(0) # and K_xy.T(chi_bar) = gamma_bar self.chi_bar = np.insert(self.chi_bar, len(self.chi_bar), 1) # Adding the final element to chi and gamma bar to match shape with Kxx and Kxy self.gamma_bar = np.insert(self.gamma_bar, len(self.gamma_bar), 1) self.A = self.K_xx - np.outer(self.v_x, self.chi_bar) self.B = self.K_xy - np.outer(self.v_x, self.gamma_bar) Q = np.dot(self.B, self.B.T) self.O = solve_discrete_lyapunov(self.A,Q) return self.O
def controllability_gramian_discrete(mat_a, mat_b): """ Calculates the controllabilty gramian of a discrete time LTI system. Parameters ---------- mat_a : ndarray The state matrix. mat_b : ndarray The input matrix. Returns ------- mat_wc : ndarray The controllability gramian. """ mat_q = mat_b @ mat_b.T mat_wc = linalg.solve_discrete_lyapunov(mat_a, mat_q) return mat_wc
def observability_gramian_discrete(mat_a, mat_c): """ Calculates the observability gramian of a discrete time LTI system. Parameters ---------- mat_a : ndarray The state matrix. mat_c : ndarray The output matrix. Returns ------- mat_wo : ndarray The observability gramian. """ mat_q = mat_c.T @ mat_c mat_wo = linalg.solve_discrete_lyapunov(mat_a.T, mat_q) return mat_wo
def test_nested(): endog = np.zeros(10) mod = sarimax.SARIMAX(endog, order=(6, 0, 0)) phi = [0.5, -0.2, 0.1, 0.0, 0.1, 0.0] sigma2 = 2. mod.update(np.r_[phi, sigma2]) # Create the initialization object as a series of nested objects init1_1 = Initialization(3) init1_1_1 = Initialization(2, 'stationary') init1_1_2 = Initialization(1, 'approximate_diffuse', approximate_diffuse_variance=1e9) init1_1.set((0, 2), init1_1_1) init1_1.set(2, init1_1_2) init1_2 = Initialization(3) init1_2_1 = Initialization(1, 'known', constant=[1], stationary_cov=[[2.]]) init1_2.set(0, init1_2_1) init1_2_2 = Initialization(1, 'diffuse') init1_2.set(1, init1_2_2) init1_2_3 = Initialization(1, 'approximate_diffuse') init1_2.set(2, init1_2_3) init = Initialization(6) init.set((0, 3), init1_1) init.set((3, 6), init1_2) # Check the output desired_cov = np.zeros((6, 6)) T = np.array([[0.5, 1], [-0.2, 0]]) Q = np.array([[sigma2, 0], [0, 0]]) desired_cov[:2, :2] = solve_discrete_lyapunov(T, Q) desired_cov[2, 2] = 1e9 desired_cov[3, 3] = 2. desired_cov[5, 5] = 1e6 check_initialization(mod, init, [0, 0, 0, 1, 0, 0], np.diag([0, 0, 0, 0, 1, 0]), desired_cov)
def _trainVARMA(self, data): """ Train correlated ARMA model on white noise ARMA, with Fourier already removed @ In, data, np.array(np.array(float)), data on which to train with shape (# pivot values, # targets) @ Out, results, statsmodels.tsa.arima_model.ARMAResults, fitted VARMA @ Out, stateDist, Distributions.MultivariateNormal, MVN from which VARMA noise is taken @ Out, initDist, Distributions.MultivariateNormal, MVN from which VARMA initial state is taken """ Pmax = self.Pmax Qmax = self.Qmax model = sm.tsa.VARMAX(endog=data, order=(Pmax, Qmax)) self.raiseADebug('... fitting VARMA ...') results = model.fit(disp=False, maxiter=1000) lenHist, numVars = data.shape # train multivariate normal distributions using covariances, keep it around so we can control the RNG ## it appears "measurement" always has 0 covariance, and so is all zeros (see _generateVARMASignal) ## all the noise comes from the stateful properties stateDist = self._trainMultivariateNormal( numVars, np.zeros(numVars), model.ssm['state_cov'.encode('ascii')]) # train initial state sampler ## Used to pick an initial state for the VARMA by sampling from the multivariate normal noise # and using the AR and MA initial conditions. Implemented so we can control the RNG internally. # Implementation taken directly from statsmodels.tsa.statespace.kalman_filter.KalmanFilter.simulate ## get mean smoother = model.ssm mean = np.linalg.solve( np.eye(smoother.k_states) - smoother['transition'.encode('ascii'), :, :, 0], smoother['state_intercept'.encode('ascii'), :, 0]) ## get covariance r = smoother['selection'.encode('ascii'), :, :, 0] q = smoother['state_cov'.encode('ascii'), :, :, 0] selCov = r.dot(q).dot(r.T) cov = solve_discrete_lyapunov( smoother['transition'.encode('ascii'), :, :, 0], selCov) # FIXME it appears this is always resulting in a lowest-value initial state. Why? initDist = self._trainMultivariateNormal(len(mean), mean, cov) # NOTE: uncomment this line to get a printed summary of a lot of information about the fitting. #self.raiseADebug('VARMA model training summary:\n',results.summary()) return model, stateDist, initDist
def test_multivariate(self): # Real case a = tools.companion_matrix([1, -0.4, 0.5]) q = np.diag([10., 5.]) actual = tools.solve_discrete_lyapunov(a, q) desired = solve_discrete_lyapunov(a, q) assert_allclose(actual, desired) # Complex case (where the Lyapunov equation is taken as a complex # function) a = tools.companion_matrix([1, -0.4 + 0.1j, 0.5]) q = np.diag([10., 5.]) actual = tools.solve_discrete_lyapunov(a, q, complex_step=False) desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=False) assert_allclose(actual, desired) # Complex case (where the Lyapunov equation is taken as a real # function) a = tools.companion_matrix([1, -0.4 + 0.1j, 0.5]) q = np.diag([10., 5.]) actual = tools.solve_discrete_lyapunov(a, q, complex_step=True) desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=True) assert_allclose(actual, desired)
def test_multivariate(self): # Real case a = tools.companion_matrix([1, -0.4, 0.5]) q = np.diag([10., 5.]) actual = tools.solve_discrete_lyapunov(a, q) desired = solve_discrete_lyapunov(a, q) assert_allclose(actual, desired) # Complex case (where the Lyapunov equation is taken as a complex # function) a = tools.companion_matrix([1, -0.4+0.1j, 0.5]) q = np.diag([10., 5.]) actual = tools.solve_discrete_lyapunov(a, q, complex_step=False) desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=False) assert_allclose(actual, desired) # Complex case (where the Lyapunov equation is taken as a real # function) a = tools.companion_matrix([1, -0.4+0.1j, 0.5]) q = np.diag([10., 5.]) actual = tools.solve_discrete_lyapunov(a, q, complex_step=True) desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=True) assert_allclose(actual, desired)
def computeG(A0, A1, d, Q0, tau0, beta, mu): """ Compute government income given mu and return tax revenues and policy matrixes for the planner. Parameters ---------- A0 : float A constant parameter for the inverse demand function A1 : float A constant parameter for the inverse demand function d : float A constant parameter for quadratic adjustment cost of production Q0 : float An initial condition for production tau0 : float An initial condition for taxes beta : float A constant parameter for discounting mu : float Lagrange multiplier Returns ------- T0 : array(float) Present discounted value of government spending A : array(float) One of the transition matrices for the states B : array(float) Another transition matrix for the states F : array(float) Policy rule matrix P : array(float) Value function matrix """ # Create Matrices for solving Ramsey problem R = np.array([[0, -A0/2, 0, 0], [-A0/2, A1/2, -mu/2, 0], [0, -mu/2, 0, 0], [0, 0, 0, d/2]]) A = np.array([[1, 0, 0, 0], [0, 1, 0, 1], [0, 0, 0, 0], [-A0/d, A1/d, 0, A1/d+1/beta]]) B = np.array([0, 0, 1, 1/d]).reshape(-1, 1) Q = 0 # Use LQ to solve the Ramsey Problem. lq = LQ(Q, -R, A, B, beta=beta) P, F, d = lq.stationary_values() # Need y_0 to compute government tax revenue. P21 = P[3, :3] P22 = P[3, 3] z0 = np.array([1, Q0, tau0]).reshape(-1, 1) u0 = -P22**(-1) * P21.dot(z0) y0 = np.vstack([z0, u0]) # Define A_F and S matricies AF = A - B.dot(F) S = np.array([0, 1, 0, 0]).reshape(-1, 1).dot(np.array([[0, 0, 1, 0]])) # Solves equation (25) temp = beta * AF.T.dot(S).dot(AF) Omega = solve_discrete_lyapunov(np.sqrt(beta) * AF.T, temp) T0 = y0.T.dot(Omega).dot(y0) return T0, A, B, F, P
Cit = convert_arb_mat(Ct) Dit = convert_arb_mat(Dt) # version numpy.mat+arb Anit = arb(1)*At Bnit = arb(1)*Bt Cnit = arb(1)*Ct Dnit = arb(1)*Dt # Résolution de l'Equation de Lyapunov: W = A W At + B Bt # References Wscipy = solve_discrete_lyapunov( A, B*Bt) #ref = A*Wscipy*At + B*Bt #print( "\"Précision\" résolution scipy: " + str(np.amax( Wscipy - ref)) ) Wschur = dlyap_schur( A, B*Bt) #ref = A*Wschur*At + B*Bt #print( "Précision résolution schur: " + str(np.amax( Wschur - ref)) ) Wslycot = dlyap_slycot( A, B*Bt) #ref = A*Wslycot*At + B*Bt #print( "\"Précision\" résolution slycot: " + str(np.amax( Wslycot - ref)) ) # Méthodes "maison" Wnaiv = lyap_naiv(A, B*Bt) #ref = A*Wnaiv*At + B*Bt
def check_discrete_case(self, a, q): x = solve_discrete_lyapunov(a, q) assert_array_almost_equal(np.dot(np.dot(a, x),a.conj().transpose()) - x, -1.0*q)
def _compute_multivariate_acovf_from_coefficients( coefficients, error_variance, maxlag=None, forward_autocovariances=False): r""" Compute multivariate autocovariances from vector autoregression coefficient matrices Parameters ---------- coefficients : array or list The coefficients matrices. If a list, should be a list of length `order`, where each element is an array sized `k_endog` x `k_endog`. If an array, should be the coefficient matrices horizontally concatenated and sized `k_endog` x `k_endog * order`. error_variance : array The variance / covariance matrix of the error term. Should be sized `k_endog` x `k_endog`. maxlag : integer, optional The maximum autocovariance to compute. Default is `order`-1. Can be zero, in which case it returns the variance. forward_autocovariances : boolean, optional Whether or not to compute forward autocovariances :math:`E(y_t y_{t+j}')`. Default is False, so that backward autocovariances :math:`E(y_t y_{t-j}')` are returned. Returns ------- autocovariances : list A list of the first `maxlag` autocovariance matrices. Each matrix is shaped `k_endog` x `k_endog`. Notes ----- Computes ..math:: \Gamma(j) = E(y_t y_{t-j}') for j = 1, ..., `maxlag`, unless `forward_autocovariances` is specified, in which case it computes: ..math:: E(y_t y_{t+j}') = \Gamma(j)' Coefficients are assumed to be provided from the VAR model: .. math:: y_t = A_1 y_{t-1} + \dots + A_p y_{t-p} + \varepsilon_t Autocovariances are calculated by solving the associated discrete Lyapunov equation of the state space representation of the VAR process. """ from scipy import linalg # Convert coefficients to a list of matrices, for use in # `companion_matrix`; get dimensions if type(coefficients) == list: order = len(coefficients) k_endog = coefficients[0].shape[0] else: k_endog, order = coefficients.shape order //= k_endog coefficients = [ coefficients[:k_endog, i*k_endog:(i+1)*k_endog] for i in range(order) ] if maxlag is None: maxlag = order-1 # Start with VAR(p): w_{t+1} = phi_1 w_t + ... + phi_p w_{t-p+1} + u_{t+1} # Then stack the VAR(p) into a VAR(1) in companion matrix form: # z_{t+1} = F z_t + v_t companion = companion_matrix( [1] + [-coefficients[i] for i in range(order)] ).T # Compute the error variance matrix for the stacked form: E v_t v_t' selected_variance = np.zeros(companion.shape) selected_variance[:k_endog, :k_endog] = error_variance # Compute the unconditional variance of z_t: E z_t z_t' stacked_cov = linalg.solve_discrete_lyapunov(companion, selected_variance) # The first (block) row of the variance of z_t gives the first p-1 # autocovariances of w_t: \Gamma_i = E w_t w_t+i with \Gamma_0 = Var(w_t) # Note: these are okay, checked against ArmaProcess autocovariances = [ stacked_cov[:k_endog, i*k_endog:(i+1)*k_endog] for i in range(min(order, maxlag+1)) ] for i in range(maxlag - (order-1)): stacked_cov = np.dot(companion, stacked_cov) autocovariances += [ stacked_cov[:k_endog, -k_endog:] ] if forward_autocovariances: for i in range(len(autocovariances)): autocovariances[i] = autocovariances[i].T return autocovariances
def kalman_filter(model, return_loglike=False): # Parameters dtype = model.dtype # Kalman filter properties filter_method = model.filter_method inversion_method = model.inversion_method stability_method = model.stability_method conserve_memory = model.conserve_memory tolerance = model.tolerance loglikelihood_burn = model.loglikelihood_burn # Check for acceptable values if not filter_method == FILTER_CONVENTIONAL: warn('The pure Python version of the kalman filter only supports the' ' conventional Kalman filter') implemented_inv_methods = INVERT_NUMPY | INVERT_UNIVARIATE | SOLVE_CHOLESKY if not inversion_method & implemented_inv_methods: warn('The pure Python version of the kalman filter only performs' ' inversion using `numpy.linalg.inv`.') if not tolerance == 0: warn('The pure Python version of the kalman filter does not check' ' for convergence.') # Convergence (this implementation does not consider convergence) converged = False period_converged = 0 # Dimensions nobs = model.nobs k_endog = model.k_endog k_states = model.k_states k_posdef = model.k_posdef # Allocate memory for variables filtered_state = np.zeros((k_states, nobs), dtype=dtype) filtered_state_cov = np.zeros((k_states, k_states, nobs), dtype=dtype) predicted_state = np.zeros((k_states, nobs+1), dtype=dtype) predicted_state_cov = np.zeros((k_states, k_states, nobs+1), dtype=dtype) forecast = np.zeros((k_endog, nobs), dtype=dtype) forecast_error = np.zeros((k_endog, nobs), dtype=dtype) forecast_error_cov = np.zeros((k_endog, k_endog, nobs), dtype=dtype) loglikelihood = np.zeros((nobs+1,), dtype=dtype) # Selected state covariance matrix selected_state_cov = ( np.dot( np.dot(model.selection[:, :, 0], model.state_cov[:, :, 0]), model.selection[:, :, 0].T ) ) # Initial values if model.initialization == 'known': initial_state = model._initial_state.astype(dtype) initial_state_cov = model._initial_state_cov.astype(dtype) elif model.initialization == 'approximate_diffuse': initial_state = np.zeros((k_states,), dtype=dtype) initial_state_cov = ( np.eye(k_states).astype(dtype) * model._initial_variance ) elif model.initialization == 'stationary': initial_state = np.zeros((k_states,), dtype=dtype) initial_state_cov = solve_discrete_lyapunov( np.array(model.transition[:, :, 0], dtype=dtype), np.array(selected_state_cov[:, :], dtype=dtype), ) else: raise RuntimeError('Statespace model not initialized.') # Copy initial values to predicted predicted_state[:, 0] = initial_state predicted_state_cov[:, :, 0] = initial_state_cov # print(predicted_state_cov[:, :, 0]) # Setup indices for possibly time-varying matrices design_t = 0 obs_intercept_t = 0 obs_cov_t = 0 transition_t = 0 state_intercept_t = 0 selection_t = 0 state_cov_t = 0 # Iterate forwards time_invariant = model.time_invariant for t in range(nobs): # Get indices for possibly time-varying arrays if not time_invariant: if model.design.shape[2] > 1: design_t = t if model.obs_intercept.shape[1] > 1: obs_intercept_t = t if model.obs_cov.shape[2] > 1: obs_cov_t = t if model.transition.shape[2] > 1: transition_t = t if model.state_intercept.shape[1] > 1: state_intercept_t = t if model.selection.shape[2] > 1: selection_t = t if model.state_cov.shape[2] > 1: state_cov_t = t # Selected state covariance matrix if model.selection.shape[2] > 1 or model.state_cov.shape[2] > 1: selected_state_cov = ( np.dot( np.dot(model.selection[:, :, selection_t], model.state_cov[:, :, state_cov_t]), model.selection[:, :, selection_t].T ) ) # #### Forecast for time t # `forecast` $= Z_t a_t + d_t$ # # *Note*: $a_t$ is given from the initialization (for $t = 0$) or # from the previous iteration of the filter (for $t > 0$). forecast[:, t] = ( np.dot(model.design[:, :, design_t], predicted_state[:, t]) + model.obs_intercept[:, obs_intercept_t] ) # *Intermediate calculation* (used just below and then once more) # `tmp1` array used here, dimension $(m \times p)$ # $\\#_1 = P_t Z_t'$ # $(m \times p) = (m \times m) (p \times m)'$ tmp1 = np.dot(predicted_state_cov[:, :, t], model.design[:, :, design_t].T) # #### Forecast error for time t # `forecast_error` $\equiv v_t = y_t -$ `forecast` forecast_error[:, t] = model.obs[:, t] - forecast[:, t] # #### Forecast error covariance matrix for time t # $F_t \equiv Z_t P_t Z_t' + H_t$ forecast_error_cov[:, :, t] = ( np.dot(model.design[:, :, design_t], tmp1) + model.obs_cov[:, :, obs_cov_t] ) # Store the inverse if k_endog == 1 and inversion_method & INVERT_UNIVARIATE: forecast_error_cov_inv = 1.0 / forecast_error_cov[0, 0, t] determinant = forecast_error_cov[0, 0, t] tmp2 = forecast_error_cov_inv * forecast_error[:, t] tmp3 = forecast_error_cov_inv * model.design[:, :, design_t] elif inversion_method & SOLVE_CHOLESKY: U, info = lapack.dpotrf(forecast_error_cov[:, :, t]) determinant = np.product(U.diagonal())**2 tmp2, info = lapack.dpotrs(U, forecast_error[:, t]) tmp3, info = lapack.dpotrs(U, model.design[:, :, design_t]) else: forecast_error_cov_inv = np.linalg.inv(forecast_error_cov[:, :, t]) determinant = np.linalg.det(forecast_error_cov[:, :, t]) tmp2 = np.dot(forecast_error_cov_inv, forecast_error[:, t]) tmp3 = np.dot(forecast_error_cov_inv, model.design[:, :, design_t]) # #### Filtered state for time t # $a_{t|t} = a_t + P_t Z_t' F_t^{-1} v_t$ # $a_{t|t} = 1.0 * \\#_1 \\#_2 + 1.0 a_t$ filtered_state[:, t] = ( predicted_state[:, t] + np.dot(tmp1, tmp2) ) # #### Filtered state covariance for time t # $P_{t|t} = P_t - P_t Z_t' F_t^{-1} Z_t P_t$ # $P_{t|t} = P_t - \\#_1 \\#_3 P_t$ filtered_state_cov[:, :, t] = ( predicted_state_cov[:, :, t] - np.dot( np.dot(tmp1, tmp3), predicted_state_cov[:, :, t] ) ) # #### Loglikelihood loglikelihood[t] = -0.5 * ( np.log((2*np.pi)**k_endog * determinant) + np.dot(forecast_error[:, t], tmp2) ) # #### Predicted state for time t+1 # $a_{t+1} = T_t a_{t|t} + c_t$ predicted_state[:, t+1] = ( np.dot(model.transition[:, :, transition_t], filtered_state[:, t]) + model.state_intercept[:, state_intercept_t] ) # #### Predicted state covariance matrix for time t+1 # $P_{t+1} = T_t P_{t|t} T_t' + Q_t^*$ predicted_state_cov[:, :, t+1] = ( np.dot( np.dot(model.transition[:, :, transition_t], filtered_state_cov[:, :, t]), model.transition[:, :, transition_t].T ) + selected_state_cov ) # Enforce symmetry of predicted covariance matrix predicted_state_cov[:, :, t+1] = ( predicted_state_cov[:, :, t+1] + predicted_state_cov[:, :, t+1].T ) / 2 if return_loglike: return np.array(loglikelihood) else: kwargs = dict( (k, v) for k, v in locals().items() if k in _kalman_filter._fields ) kwargs['model'] = _statespace( initial_state=initial_state, initial_state_cov=initial_state_cov ) kfilter = _kalman_filter(**kwargs) return FilterResults(model, kfilter)
def test_mixed_stationary(): # More specific tests when one or more blocks are initialized as stationary endog = np.zeros(10) mod = sarimax.SARIMAX(endog, order=(2, 1, 0)) phi = [0.5, -0.2] sigma2 = 2. mod.update(np.r_[phi, sigma2]) init = Initialization(mod.k_states) init.set(0, 'diffuse') init.set((1, 3), 'stationary') desired_cov = np.zeros((3, 3)) T = np.array([[0.5, 1], [-0.2, 0]]) Q = np.diag([sigma2, 0]) desired_cov[1:, 1:] = solve_discrete_lyapunov(T, Q) check_initialization(mod, init, [0, 0, 0], np.diag([1, 0, 0]), desired_cov) init.clear() init.set(0, 'diffuse') init.set(1, 'stationary') init.set(2, 'approximate_diffuse') T = np.array([[0.5]]) Q = np.diag([sigma2]) desired_cov = np.diag([0, np.squeeze(solve_discrete_lyapunov(T, Q)), 1e6]) check_initialization(mod, init, [0, 0, 0], np.diag([1, 0, 0]), desired_cov) init.clear() init.set(0, 'diffuse') init.set(1, 'stationary') init.set(2, 'stationary') desired_cov[2, 2] = 0 check_initialization(mod, init, [0, 0, 0], np.diag([1, 0, 0]), desired_cov) # Test with a VAR model endog = np.zeros((10, 2)) mod = varmax.VARMAX(endog, order=(1, 0), ) intercept = [1.5, -0.1] transition = np.array([[0.5, -0.2], [0.1, 0.8]]) cov = np.array([[1.2, -0.4], [-0.4, 0.4]]) tril = np.tril_indices(2) params = np.r_[intercept, transition.ravel(), np.linalg.cholesky(cov)[tril]] mod.update(params) # > stationary, global init = Initialization(mod.k_states, 'stationary') desired_intercept = np.linalg.solve(np.eye(2) - transition, intercept) desired_cov = solve_discrete_lyapunov(transition, cov) check_initialization(mod, init, desired_intercept, np.diag([0, 0]), desired_cov) # > diffuse, global init.set(None, 'diffuse') check_initialization(mod, init, [0, 0], np.eye(2), np.diag([0, 0])) # > stationary, individually init.unset(None) init.set(0, 'stationary') init.set(1, 'stationary') a, Pinf, Pstar = init(model=mod) desired_intercept = [intercept[0] / (1 - transition[0, 0]), intercept[1] / (1 - transition[1, 1])] desired_cov = np.diag([cov[0, 0] / (1 - transition[0, 0]**2), cov[1, 1] / (1 - transition[1, 1]**2)]) check_initialization(mod, init, desired_intercept, np.diag([0, 0]), desired_cov)