def find_rank_loop(sdpRelaxation, x_mat, base_level=0): """Helper function to detect rank loop in the solution matrix. :param sdpRelaxation: The SDP relaxation. :type sdpRelaxation: :class:`ncpol2sdpa.SdpRelaxation`. :param x_mat: The solution of the moment matrix. :type x_mat: :class:`numpy.array`. :param base_level: Optional parameter for specifying the lower level relaxation for which the rank loop should be tested against. :type base_level: int. :returns: list of int -- the ranks of the solution matrix with in the order of increasing degree. """ ranks = [] from numpy.linalg import matrix_rank if sdpRelaxation.hierarchy != "npa": raise Exception("The detection of rank loop is only implemented for \ the NPA hierarchy") if base_level == 0: levels = range(1, sdpRelaxation.level + 1) else: levels = [base_level] for level in levels: base_monomials = \ pick_monomials_up_to_degree(sdpRelaxation.monomial_sets[0], level) ranks.append(matrix_rank(x_mat[:len(base_monomials), :len(base_monomials)])) if x_mat.shape != (len(base_monomials), len(base_monomials)): ranks.append(matrix_rank(x_mat)) return ranks
def binaryMatrixTest(n,e,M,Q): # e séquence binaire epsilon= np.array([int(x) for x in list(e)]) # conversion string (entrée) en vecteur epsilon print(epsilon) N= n//(Q*M) slice=Q*M # taille choisie pour le découpage de la séquence binaire Ranks=[] Ranks += [matrix_rank(np.reshape(epsilon[0:slice],(M,Q)))] # rang des matrices formées par les blocs du découpage rangés dans une liste for k in range(1,N): Ranks +=[matrix_rank(np.reshape(epsilon[k*slice +1 :(k+1)*slice+1],(M,Q)))] # print(Ranks) FM=0 FM_1=0 #comptage des rangs valant M et M-1 for i in range(len(Ranks)): if(Ranks[i]==M): FM +=1 if(Ranks[i]==M-1): FM_1 +=1 ki_carre= ((FM-0.2888*N)**2)/(0.2888*N)+(FM_1-0.5776*N)**2/(0.5776*N)+((N-FM-FM_1-0.1336*N)**2)/(0.1336*N) P_value= np.exp((-1)*ki_carre/2) # print(P_value) return P_value
def test(): from numpy.linalg import matrix_rank from mfpy.materials.linearelastic import LinearElastic # Test create nodes = [array((0,0)), array((1,0)), array((1,1)), array((0,1))] enm = [0,1,2,3] mat = LinearElastic(lmbda=0,mu=1.0,rho=1) elem = QuadRI(nodes, enm, mat, thickness=1) # Test internal force u = array([-1,0, +1,0, -1,0, +1,0]) fint = elem.calc_internal_force([], u) K = elem.calc_linear_stiffness([], u) print("Rank =", matrix_rank(K)) print(fint) print(K.dot(u)) elem = Quad(nodes, enm, mat, thickness=1) fint = elem.calc_internal_force([], u) K = elem.calc_linear_stiffness([], u) print("Rank =", matrix_rank(K)) print(fint) print(K.dot(u)) # Test lumped mass matrix M = elem.calc_lumped_mass()
def test_reduced_rank(): # Test matrices with reduced rank rng = np.random.RandomState(20120714) for i in range(100): # Make a rank deficient matrix X = rng.normal(size=(40, 10)) X[:, 0] = X[:, 1] + X[:, 2] # Assert that matrix_rank detected deficiency assert_equal(matrix_rank(X), 9) X[:, 3] = X[:, 4] + X[:, 5] assert_equal(matrix_rank(X), 8)
def read_train_images(_data,_class): # Read each image for i in _data: img = cv2.imread(i,0) img_flat = img.flatten() # Build our train_array list train_array.append(img_flat) # Calculate the mean of the set mean = [sum(i) for i in zip(*train_array)] length = len(_data) mean = [float(i) / length for i in mean] # Substract mean to each image (this can be negative so python # transform each vector to int) # This is Omega list_norm_train = [i - mean for i in train_array] # Transform the list to an array norm_train_array = numpy.asarray(list_norm_train) # Compute the covariance matrix of the array cov = numpy.dot(norm_train_array, norm_train_array.T) # This line will help us to define how many eigenvectors we # can calculate. # of eigs = rank -1 print matrix_rank(cov) eigval, eigvec = lin.eigs(cov, 38) print "size of the eigen vector " + str(len(eigvec[:, 0])) print "size of the eigen vector matrix" + str(len(eigvec)) print "number of eigenvalues" + str(len(eigval)) # Each eigvec[:,i] is an eigenvector of size 40 # We need to find u_i = A * eigvec[:,i] A = norm_train_array.T for i in range(1,(len(eigvec[0]+1))): u.append(numpy.dot(A,eigvec[:,i])) for i, val in enumerate(u): u[i] = u[i] / numpy.linalg.norm(u[i]) # We're only keeping 75% of the number of eigenvector u[i] # This will correspond to the largest eigenvalues for i in range(1,(int(0.75*len(u))+4)): u_reduced.append(u[i]) # u_reduced[i] are called Eigenfaces # Now lets represent each face in this basis sigma = [] omega = [] for i, val in enumerate(list_norm_train): for j, val in enumerate(u_reduced): w = numpy.dot(u_reduced[j].T,list_norm_train[i]) sigma.append(w.real) sigma_array = numpy.asarray(sigma) omega.append(sigma_array) #print omega print "size of eigenvector" + str(len(omega[0])) print "size of omega" + str(len(omega)) #return eigval, eigvec return omega
def reachable_form(xsys): """Convert a system into reachable canonical form Parameters ---------- xsys : StateSpace object System to be transformed, with state `x` Returns ------- zsys : StateSpace object System in reachable canonical form, with state `z` T : matrix Coordinate transformation: z = T * x """ # Check to make sure we have a SISO system if not issiso(xsys): raise ControlNotImplemented( "Canonical forms for MIMO systems not yet supported") # Create a new system, starting with a copy of the old one zsys = StateSpace(xsys) # Generate the system matrices for the desired canonical form zsys.B = zeros(shape(xsys.B)) zsys.B[0, 0] = 1.0 zsys.A = zeros(shape(xsys.A)) Apoly = poly(xsys.A) # characteristic polynomial for i in range(0, xsys.states): zsys.A[0, i] = -Apoly[i+1] / Apoly[0] if (i+1 < xsys.states): zsys.A[i+1, i] = 1.0 # Compute the reachability matrices for each set of states Wrx = ctrb(xsys.A, xsys.B) Wrz = ctrb(zsys.A, zsys.B) if matrix_rank(Wrx) != xsys.states: raise ValueError("System not controllable to working precision.") # Transformation from one form to another Tzx = solve(Wrx.T, Wrz.T).T # matrix right division, Tzx = Wrz * inv(Wrx) # Check to make sure inversion was OK. Note that since we are inverting # Wrx and we already checked its rank, this exception should never occur if matrix_rank(Tzx) != xsys.states: # pragma: no cover raise ValueError("Transformation matrix singular to working precision.") # Finally, compute the output matrix zsys.C = solve(Tzx.T, xsys.C.T).T # matrix right division, zsys.C = xsys.C * inv(Tzx) return zsys, Tzx
def fftconvolve(in1, in2): """Convolve two N-dimensional arrays using FFT. This is a modified version of the scipy.signal.fftconvolve. The new feature is derived from the fftconvolve algorithm used in the IDL package. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`; if sizes of `in1` and `in2` are not equal then `in1` has to be the larger array. Returns ------- out : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. """ in1 = asarray(in1) in2 = asarray(in2) if matrix_rank(in1) == matrix_rank(in2) == 0: # scalar inputs return in1 * in2 elif not in1.ndim == in2.ndim: raise ValueError("in1 and in2 should have the same rank") elif in1.size == 0 or in2.size == 0: # empty arrays return array([]) s1 = np.array(in1.shape) s2 = np.array(in2.shape) complex_result = (np.issubdtype(in1.dtype, np.complex) or np.issubdtype(in2.dtype, np.complex)) fsize = s1 fslice = tuple([slice(0, int(sz)) for sz in fsize]) if not complex_result: ret = irfftn(rfftn(in1, fsize) * rfftn(in2, fsize), fsize)[fslice].copy() ret = ret.real else: ret = ifftn(fftn(in1, fsize) * fftn(in2, fsize))[fslice].copy() shift = array([int(floor(fsize[0]/2.0)), int(floor(fsize[1]/2.0))]) ret = roll(roll(ret, -shift[0], axis=0), -shift[1], axis=1) return ret
def geneigh(A,B,tol=1e-12): """ Solves the generalized eigenvalue problem also in the case where A and B share a common null-space. The eigenvalues corresponding to the null-space are given a Nan value. The null-space is defined with the tolereance tol. """ # first check if there is a null-space issue if lg.matrix_rank(B,tol)==np.shape(B)[0]: return eigh(A,B) # first diagonalize the overlap matrix B Be,Bv=eigh(B) # rewrite the A matrix in the B-matrix eigenspace At=np.dot(np.conj(Bv.T),np.dot(A,Bv)) Bt=np.diag(Be) # detect shared null-space. that is given by the first n null eigenvalues of B try: idx=next(i for i,v in enumerate(Be) if v>tol) except StopIteration: raise(RuntimeError('geneigh: Rank of B < B.shape[0] but null-space could not be found!')) # check that the B matrix null-space is shared by A. m=np.amax(abs(At[0:idx,:].flatten())) if m>tol: warnings.warn('Maximum non-diagonal element in A written in B null-space is bigger than the tolerance \''+str(tol)+'\'.',UserWarning) # diagonalize the non-null-space part of the problem Et,Vt=eigh(At[idx:,idx:],Bt[idx:,idx:]) # define Ut, the change of basis in the non-truncated space Ut=np.zeros(np.shape(A),A.dtype) Ut[0:idx,0:idx]=np.eye(idx) Ut[idx:,idx:]=Vt U=np.dot(Bv,Ut) E=np.concatenate((float('NaN')*np.ones(idx),Et)) return E,U
def f_test(V, R, beta, r, df_d): """Arbitrary F test. Args: V (array): K-by-K variance-covariance matrix. R (array): K-by-K Test matrix. beta (array): Length-K vector of coefficient estimates. r (array): Length-K vector of null hypotheses. df_d (int): Denominator degrees of freedom. Returns: tuple: A tuple containing: - **F** (float): F-stat. - **pF** (float): p-score for ``F``. """ Rbr = (R.dot(beta) - r) if Rbr.ndim == 1: Rbr = Rbr.reshape(-1, 1) middle = la.inv(R.dot(V).dot(R.T)) df_n = matrix_rank(R) # Can't just squeeze, or we get a 0-d array F = (Rbr.T.dot(middle).dot(Rbr)/df_n).flatten()[0] pF = 1 - stats.f.cdf(F, df_n, df_d) return F, pF
def glm_diagnostics(B_4d, design, data_4d): """ Return a tuple of the MRSS in 3 dimensions, fitted values in 4 dimensions, and residuals in 4 dimensions. Parameters ---------- B_4d: numpy array of 4 dimensions The estimated coefficients design: numpy array The design matrix used to get the estimated coefficients data_4d: numpy array of 4 dimensions The corresponding image data Returns ------- diagnostics : tuple MRSS (3d), fitted values (4d), and residuals (4d). """ B_2d = np.reshape(B_4d, (-1, B_4d.shape[-1])).T data_2d = np.reshape(data_4d, (-1, data_4d.shape[-1])) fitted = design.dot(B_2d) residuals = data_2d.T - fitted df = design.shape[0] - npl.matrix_rank(design) MRSS = (residuals**2).sum(0)/df MRSS_3d = np.reshape(MRSS.T, data_4d.shape[:-1]) fitted_4d = np.reshape(fitted.T, data_4d.shape) residuals_4d = np.reshape(residuals.T, data_4d.shape) return MRSS_3d, fitted_4d, residuals_4d
def t_stat(self): """ betas, t statistic and significance test given data, design matix, contrast This is OLS estimation; we assume the errors to have independent and identical normal distributions around zero for each $i$ in $\e_i$ (i.i.d). """ if self.design is None: self.get_design_matrix() if self.t_values is None: y = self.data.T X = self.design c = [0, 0, 1] c = np.atleast_2d(c).T beta = npl.pinv(X).dot(y) fitted = X.dot(beta) errors = y - fitted RSS = (errors**2).sum(axis=0) df = X.shape[0] - npl.matrix_rank(X) MRSS = RSS / df SE = np.sqrt(MRSS * c.T.dot(npl.pinv(X.T.dot(X)).dot(c))) try: SE[SE == 0] = np.amin(SE[SE != 0]) except ValueError: pass t = c.T.dot(beta) / SE self.t_values = abs(t[0]) self.t_indices = np.array(self.t_values).argsort( )[::-1][:self.t_values.size] return self.t_indices
def significant(X,Y,beta): """ Calculates t statistic for the first two entries of given beta estimates. Particularly, this is a function to calculate t values for beta gain and beta loss for a single voxel. Parameters: ----------- X: Design matrix Y: Data matrix for a single voxel beta: beta gain/loss estimates from OLS regression of a single voxel, 1-d array of length = 2 Returns: -------- t1, t2: t value for beta gain, t value for beta loss, type: double Example use for ith voxel: significant(X, Y[:,i], beta[:,i]) """ y_hat = X.dot(beta) residuals = Y - y_hat RSS = np.sum(residuals ** 2) df = X.shape[0] - npl.matrix_rank(X) MRSS = RSS / df s2 = MRSS v_cov = s2 * npl.inv(X.T.dot(X)) numerator1 = beta[0] denominator1 = np.sqrt(v_cov[0, 0]) t1= numerator1 / denominator1 numerator2 = beta[1] denominator2 = np.sqrt(v_cov[1, 1]) t2= numerator2 / denominator2 return t1,t2
def t_stat(data, X_matrix): """ Return the estimated betas, t-values, degrees of freedom, and p-values for the glm_multi regression Parameters ---------- data_4d: numpy array of 4 dimensions The image data of one subject, one run X_matrix: numpy array The design matrix for glm_multi Note that the fourth dimension of `data_4d` (time or the number of volumes) must be the same as the number of rows that X has. Returns ------- beta: estimated beta values t: t-values of the betas df: degrees of freedom p: p-values corresponding to the t-values and degrees of freedom """ beta = glm_beta(data, X_matrix) # Calculate the parameters - b hat beta = np.reshape(beta, (-1, beta.shape[-1])).T fitted = X_matrix.dot(beta) # Residual error y = np.reshape(data, (-1, data.shape[-1])) errors = y.T - fitted # Residual sum of squares RSS = (errors**2).sum(axis=0) df = X_matrix.shape[0] - npl.matrix_rank(X_matrix) # Mean residual sum of squares MRSS = RSS / df # calculate bottom half of t statistic Cov_beta=npl.pinv(X_matrix.T.dot(X_matrix)) SE =np.zeros(beta.shape) for i in range(X_matrix.shape[-1]): c = np.zeros(X_matrix.shape[-1]) c[i]=1 c = np.atleast_2d(c).T SE[i,:]= np.sqrt(MRSS* c.T.dot(npl.pinv(X_matrix.T.dot(X_matrix)).dot(c))) zeros = np.where(SE==0) SE[zeros] = 1 t = beta / SE t[:,zeros] =0 # Get p value for t value using CDF of t didstribution ltp = t_dist.cdf(abs(t), df) p = 1 - ltp # upper tail return beta.T, t, df, p
def __LDL__(A, combined=False): import numpy.linalg as nplinalg assert(A.shape[0] == A.shape[1]) L = np.zeros(A.shape) D = np.zeros(A.shape) n = A.shape[0] for i in xrange(n): for j in xrange(n): if i == j: D[i, i] = A[i, i] for k in xrange(i): D[i, i] -= (L[i, k] ** 2) * D[k, k] L[i, i] = 1 elif j <= i: L[i, j] = A[i, j] for k in xrange(j): L[i, j] -= L[i, k] * D[k, k] * L[j, k] L[i, j] *= 1/D[j, j] if combined: return np.dot(L, np.sqrt(D[:,:nplinalg.matrix_rank(A)])) else: return L, D
def comp(M): """Returns a basis for the space orthogonal to the range of M """ I = eye(M.shape[0]) Q,R = qr(concatenate((M,I),axis=1)) return Q[:,matrix_rank(M):]
def Regression_Calculation(): Y = matrix( Get_Y_Matrix(con), dtype = float ) X = matrix( Get_X_Matrix(con), dtype = float ) if matrix_rank(X) == 32: return (X.T * X).I * X.T * Y.T else: return numpy.linalg.pinv(X) * Y.T
def test_glm(): # Read in the image data. img = nib.load(pathtoclassdata + "ds114_sub009_t2r1.nii") data = img.get_data()[..., 4:] # Read in the convolutions. convolved = np.loadtxt(pathtoclassdata + "ds114_sub009_t2r1_conv.txt")[4:] # Create design matrix. actual_design = np.ones((len(convolved), 2)) actual_design[:, 1] = convolved # Calculate betas, copied from the exercise. data_2d = np.reshape(data, (-1, data.shape[-1])) actual_B = npl.pinv(actual_design).dot(data_2d.T) actual_B_4d = np.reshape(actual_B.T, img.shape[:-1] + (-1,)) # Run function. exp_B_4d, exp_design = glm(data, convolved) assert_almost_equal(actual_B_4d, exp_B_4d) assert_almost_equal(actual_design, exp_design) # Pick a single voxel to check diagnostics. # Calculate actual fitted values, residuals, and MRSS of voxel. actual_fitted = actual_design.dot(actual_B_4d[42, 32, 19]) actual_residuals = data[42, 32, 19] - actual_fitted actual_MRSS = np.sum(actual_residuals**2)/(actual_design.shape[0] - npl.matrix_rank(actual_design)) # Calculate using glm_diagnostics function. exp_MRSS, exp_fitted, exp_residuals = glm_diagnostics(exp_B_4d, exp_design, data) assert_almost_equal(actual_fitted, exp_fitted[42, 32, 19]) assert_almost_equal(actual_residuals, exp_residuals[42, 32, 19]) assert_almost_equal(actual_MRSS, exp_MRSS[42, 32, 19])
def test_glm_mrss(): img = nib.load(project_path + \ 'data/ds114/sub009/BOLD/task002_run001/ds114_sub009_t2r1.nii') data_int = img.get_data() data = data_int.astype(float) convolved1 = np.loadtxt(project_path + \ 'data/ds114/sub009/behav/task002_run001/ds114_sub009_t2r1_conv.txt') X_matrix = np.ones((len(convolved1), 2)) X_matrix[:, 1] = convolved1 data_2d = np.reshape(data, (-1, data.shape[-1])) B = npl.pinv(X_matrix).dot(data_2d.T) B_4d = np.reshape(B.T, img.shape[:-1] + (-1,)) test_B_4d = glm_beta(data, X_matrix) # Pick a single voxel to check mrss functiom. # Calculate actual fitted values, residuals, and MRSS of voxel. fitted = X_matrix.dot(B_4d[12, 22, 10]) residuals = data[12, 22, 10] - fitted MRSS = np.sum(residuals**2)/(X_matrix.shape[0] - npl.matrix_rank(X_matrix)) # Calculate using glm_diagnostics function. test_MRSS, test_fitted, test_residuals = glm_mrss(test_B_4d, X_matrix, data) assert_almost_equal(MRSS, test_MRSS[12, 22, 10]) assert_almost_equal(fitted, test_fitted[12, 22, 10]) assert_almost_equal(residuals, test_residuals[12, 22, 10])
def glm_util(design_matrix, response): """ Fits a generalized linear model to a set of training data. Parameters ---------- design_matrix : np.ndarray 2-D array with rows that correspond to observations and columns that correspond to regressors. Let the shape of design_matrix be (N, P) response : np.ndarray 1- or 2-D array representing the response variable. Let the shape of response be (N, X) Return ------ regression_coefficients : np.ndarray Array of shape (P, X) containing the coefficient of the regressor for the individual data point df : int Degrees of freedom, which is the difference of the number of independent regressors from the number of observations MRSS : float Mean residual sum of squares, a commmonly used measure of a predictive model's accuracy (lower is better) """ regression_coefficients = npl.pinv(design_matrix).dot(response) prediction = design_matrix.dot(regression_coefficients) error = response - prediction RSS = (error ** 2).sum(0) df = int(design_matrix.shape[0] - npl.matrix_rank(design_matrix)) MRSS = RSS / df return (regression_coefficients, df, MRSS)
def simplex(graph, wp_trajs, withODs=False): """Build simplex constraints from waypoint trajectories wp_trajs wp_trajs is given by WP.get_wp_trajs()[1] Parameters: ----------- graph: Graph object wp_trajs: list of waypoint trajectories with paths along this trajectory [(wp_traj, path_list, flow)] """ if wp_trajs is None: return None, None n = len(wp_trajs) I, J, r, i = [], [], matrix(0.0, (n,1)), 0 for wp_traj, path_ids, flow in wp_trajs: r[i] = flow for id in path_ids: I.append(i) J.append(graph.indpaths[id]) i += 1 U = spmatrix(1.0, I, J, (n, graph.numpaths)) if not withODs: return U, r else: U1, r1 = path.simplex(graph) U, r = matrix([U, U1]), matrix([r, r1]) if la.matrix_rank(U) < U.size[0]: logging.info('Remove redundant constraint(s)'); ind = find_basis(U.trans()) return U[ind,:], r[ind] return U, r
def train(x,y): """ Build the linear least weight vector W :param x: NxD matrix containing N attributes vectors for training :param y: NxK matrix containing N class vectors for training """ # D = Number of attributes D = x.shape[1] + 1 # K = Number of classes K = y.shape[1] # Build the sums of xi*xi' and xi*yi' sum1 = np.zeros((D,D)) # init placeholder sum2 = np.zeros((D,K)) i = 0 for x_i in x: # loop over all vectors x_i = np.append(1, x_i) # augment vector with a 1 y_i = y[i] sum1 += np.outer(x_i, x_i) # find xi*xi' sum2 += np.outer(x_i, y_i) # find xi*yi' i += 1 # Check that condition number is finite # and therefore sum1 is nonsingular (invertable) while matrix_rank(sum1) != D: # Naive choice of sigma. # Could cause inaccuracies when sum1 has small values # However, in most cases the matrix WILL be invertable sum1 = sum1 + 0.001 * np.eye(D) # Return weight vector # Weight vector multiplies sums and inverse of sum1 return np.dot(inv(sum1),sum2)
def geneigh(A,B,tol=1e-12): """ Solves the generalized eigenvalue problem also in the case where A and B share a common null-space. The eigenvalues corresponding to the null-space are given a Nan value. The null-space is defined with the tolereance tol. """ # first check if there is a null-space issue if matrix_rank(B,tol)==shape(B)[0]: return eigh(A,B) # first diagonalize the overlap matrix B Be,Bv=eigh(B) # rewrite the A matrix in the B-matrix eigenspace At=dot(conj(Bv.T),dot(A,Bv)) Bt=diag(Be) # detect shared null-space. that is given by the first n null eigenvalues of B idx=find(Be>tol) idx=idx[0] # check that the B matrix null-space is shared by A. m=amax(abs(At[0:idx,:].flatten())) if m>tol: warnings.warn('Maximum non-diagonal element in A written in B null-space is bigger than the tolerance \''+str(tol)+'\'.',UserWarning) # diagonalize the non-null-space part of the problem Et,Vt=eigh(At[idx:,idx:],Bt[idx:,idx:]) # define Ut, the change of basis in the non-truncated space Ut=zeros(shape(A),A.dtype) Ut[0:idx,0:idx]=eye(idx) Ut[idx:,idx:]=Vt U=dot(Bv,Ut) E=append(float('NaN')*ones(idx),Et) return E,U
def qr_decomposition(X, job=1): """Performs the QR decomposition using LINPACK, BLAS and LAPACK Fortran subroutines. Parameters ---------- X : array_like, shape (n_samples, n_features) The matrix to decompose job : int, optional (default=1) Whether to perform pivoting. 0 is False, any other value will be coerced to 1 (True). Returns ------- X : np.ndarray, shape=(n_samples, n_features) The matrix rank : int The rank of the matrix qraux : np.ndarray, shape=(n_features,) Contains further information required to recover the orthogonal part of the decomposition. pivot : np.ndarray, shape=(n_features,) The pivot array, or None if not ``job`` """ X = check_array(X, dtype='numeric', order='F', copy=True) n, p = X.shape # check on size _validate_matrix_size(n, p) rank = matrix_rank(X) # validate job: job_ = 0 if not job else 1 qraux, pivot, work = (np.zeros(p, dtype=np.double, order='F'), # can't use arange, because need fortran order ('order' not kw in arange) np.array([i for i in range(1, p + 1)], dtype=np.int, order='F'), np.zeros(p, dtype=np.double, order='F')) # sanity checks assert qraux.shape[0] == p, 'expected qraux to be of length %i' % p assert pivot.shape[0] == p, 'expected pivot to be of length %i' % p assert work.shape[0] == p, 'expected work to be of length %i' % p # call the fortran module IN PLACE _safecall(dqrsl.dqrdc, X, n, n, p, qraux, pivot, work, job_) # do returns return (X, rank, qraux, (pivot - 1) if job_ else None) # subtract one because pivot started at 1 for the fortran
def chi(matrix_P, matrix_Q, matrix_A): P = np.transpose(np.loadtxt(matrix_P)) Q = np.transpose(np.loadtxt(matrix_Q)) #flags for testing the entries test_size = True test_entry = True # testing the size of the entries, # if bigger than 4x4 take the 4x4 upper left part of the matrix # if to small return that the test is false if (P.shape[0] >= 4 and P.shape[1] >= 4): P = P[0:4,0:4] else: test_size = False if (Q.shape[0] >= 4 and Q.shape[1] >= 4): Q = Q[0:4,0:4] else: test_size = False # test if all the entries are integer, # if Q and P have the same determinant in absolute value # if P an Q are of full rank if test_size : for i in range(4): for j in range(4): if P[i,j] != int(P[i,j]): test_entry = False if Q[i,j] != int(Q[i,j]): test_entry = False if abs(determinant(P,4)) != abs(determinant(Q,4)): test_entry = False if lin.matrix_rank(P)!=4 or lin.matrix_rank(Q)!=4 : test_entry = False # Compute the matrix A if P and Q passed all the test if test_size & test_entry : file = open(matrix_A,'w') list_A = matrix_computation(P, Q, file) file.close() else: print('Wrong Entries')
def w_update(weights, x_white, bias1, lrate1): """ Update rule for infomax This function recieves parameters to update W1 * Input W1: unmixing matrix (must be a square matrix) Xwhite1: whitened data bias1: current estimated bias lrate1: current learning rate startW1: in case update blows up it will start again from startW1 * Output W1: updated mixing matrix bias: updated bias lrate1: updated learning rate """ NVOX = x_white.shape[1] NCOMP = x_white.shape[0] block1 = int(np.floor(np.sqrt(NVOX / 3))) permute1 = permutation(NVOX) for start in range(0, NVOX, block1): if start + block1 < NVOX: tt2 = start + block1 else: tt2 = NVOX block1 = NVOX - start # unmixed = dot(weights, x_white[:, permute1[start:tt2]]) + \ # dot(bias1, ib1[:, 0:block1]) unmixed = dot(weights, x_white[:, permute1[start:tt2]]) + bias1 logit = 1 - (2 / (1 + np.exp(-unmixed))) weights = weights + lrate1 * dot(block1 * np.eye(NCOMP) + dot(logit, unmixed.T), weights) bias1 = bias1 + lrate1 * logit.sum(axis=1).reshape(bias1.shape) # Checking if W blows up if (np.isnan(weights)).any() or np.max(np.abs(weights)) > MAX_W: print("Numeric error! restarting with lower learning rate") lrate1 = lrate1 * ANNEAL weights = np.eye(NCOMP) bias1 = np.zeros((NCOMP, 1)) error = 1 if lrate1 > 1e-6 and \ matrix_rank(x_white) < NCOMP: print("Data 1 is rank defficient" ". I cannot compute " + str(NCOMP) + " components.") return (None, None, None, 1) if lrate1 < 1e-6: print("Weight matrix may" " not be invertible...") return (None, None, None, 1) break else: error = 0 return(weights, bias1, lrate1, error)
def w_update(unmixer, x_white, bias1, lrate1): """ Update rule for infomax This function recieves parameters to update W1 * Input W1: unmixing matrix (must be a square matrix) Xwhite1: whitened data bias1: current estimated bias lrate1: current learning rate startW1: in case update blows up it will start again from startW1 * Output W1: updated mixing matrix bias: updated bias lrate1: updated learning rate """ nvox1 = x_white.shape[1] ncomp1 = x_white.shape[0] block1 = int(np.floor(np.sqrt(nvox1/3))) ib1 = np.ones((1, block1)) permute1 = permutation(nvox1) for start in range(0, nvox1, block1): if start+block1 < nvox1: tt2 = start+block1 else: tt2 = nvox1 block1 = nvox1 - start unmixed = dot(unmixer, x_white[:, permute1[start:tt2]]) + \ dot(bias1, ib1[:, 0:block1]) logit = 1/(1 + np.exp(-unmixed)) unmixer = unmixer + lrate1*dot(block1*np.eye(ncomp1) + dot(1-2*logit, unmixed.T), unmixer) bias1 = (bias1.T + lrate1*(1-2*logit).sum(axis=1)).T # Checking if W blows up if np.isnan(np.sum(unmixer)) or np.max(np.abs(unmixer)) > MAX_W: print "Numeric error! restarting with lower learning rate" lrate1 = lrate1 * ANNEAL unmixer = np.eye(ncomp1) bias1 = np.zeros((ncomp1, 1)) error = 1 if lrate1 > 1e-6 and \ matrix_rank(x_white) < ncomp1: print("Data 1 is rank defficient" ". I cannot compute " + str(ncomp1) + " components.") return (None, None, None, 1) if lrate1 < 1e-6: print("Weight matrix may" " not be invertible...") return (None, None, None, 1) break else: error = 0 return(unmixer, bias1, lrate1, error)
def get_R_rank(self): """Get the rank of the R matrix. Returns ------- rank : int The rank of the R matrix """ return matrix_rank(self.get_R())
def feedback(self, other=1, sign=-1): """Feedback interconnection between two LTI systems.""" other = _convertToStateSpace(other) # Check to make sure the dimensions are OK if ((self.inputs != other.outputs) or (self.outputs != other.inputs)): raise ValueError("State space systems don't have compatible \ inputs/outputs for feedback.") # Figure out the sampling time to use if (self.dt == None and other.dt != None): dt = other.dt # use dt from second argument elif (other.dt == None and self.dt != None) or \ timebaseEqual(self, other): dt = self.dt # use dt from first argument else: raise ValueError("Systems have different sampling times") A1 = self.A B1 = self.B C1 = self.C D1 = self.D A2 = other.A B2 = other.B C2 = other.C D2 = other.D F = eye(self.inputs) - sign * D2 * D1 if matrix_rank(F) != self.inputs: raise ValueError("I - sign * D2 * D1 is singular to working precision.") # Precompute F\D2 and F\C2 (E = inv(F)) # We can solve two linear systems in one pass, since the # coefficients matrix F is the same. Thus, we perform the LU # decomposition (cubic runtime complexity) of F only once! # The remaining back substitutions are only quadratic in runtime. E_D2_C2 = solve(F, concatenate((D2, C2), axis=1)) E_D2 = E_D2_C2[:, :other.inputs] E_C2 = E_D2_C2[:, other.inputs:] T1 = eye(self.outputs) + sign * D1 * E_D2 T2 = eye(self.inputs) + sign * E_D2 * D1 A = concatenate( (concatenate( (A1 + sign * B1 * E_D2 * C1, sign * B1 * E_C2), axis=1), concatenate( (B2 * T1 * C1, A2 + sign * B2 * D1 * E_C2), axis=1)), axis=0) B = concatenate((B1 * T2, B2 * D1 * T2), axis=0) C = concatenate((T1 * C1, sign * D1 * E_C2), axis=1) D = D1 * T2 return StateSpace(A, B, C, D, dt)
def __init__(self, A): if not isinstance(A, matrix): A = mat(A) self.__A = A self.__proj = None U, S, VH = svd(A) rank = matrix_rank(A) self.__rank = rank B = U[:, 0:rank] self.__basis = B self.__proj = Operator(func=lambda x: B * B.H*x)
def get_driver_nodes(G): """Return the driver nodes number and driver nodes of a (directed or undirected) graph G Basic Idea: Given a graph G, it have been proved that the number of driver nodes (N_D) is determined by the maximum geometric multiplicity of the adjacency matrix A. i.e. N_D = max{N - rank(\lambda_i I_N - A)} where \lambda_i is the eigenvalue of A For undirected network, the above equation can be reduced to N_D = max {\delta (\lambda_i)} the maximum algebraic multiplicity of \lambda_i Parameters: ----------- G: directed or undirected network Returns: -------- ND: the number of driver nodes driverNodes: the list of driver nodes Note: ----- IF you only want get THE NUMBER of DRIVER NODES, but DONOT CARE which are driver nodes Please consider the other function get_number_of_driver_nodes(G), it is much more efficient. References: ----------- Yuan Z, Zhao C, Di Z, et al. Exact controllability of complex networks[J]. Nature communications, 2013, 4. """ N = G.number_of_nodes() A = (nx.adjacency_matrix(G)).todense() # get adjacency matrix A of G all_eigs = LA.eigvals(A) # get eigenvalues of A # which means any two eigenvalues less than 1e-8 will be considered as identical L = list(set(np.round(all_eigs,8))) ND = -1 ND_lambda = 0.0 IN = np.eye(N) for my_lambda in L: # get geometric multiplicity for each lambda of A miu_lambda = N - LA.matrix_rank(my_lambda * IN - A, tol=1E-8) if miu_lambda > ND: ND = miu_lambda ND_lambda = my_lambda middle_matrix = A - ND_lambda * np.eye(N) # get the middle matrix A - \lambda * I_N middle_matrix = np.round(middle_matrix, 8); (reduced_matrix, pivot_array) = sympy.Matrix(middle_matrix).rref() # rref get the reduced row echelon form all_nodes = G.nodes() pivot_nodes = [all_nodes[i] for i in pivot_array] driver_nodes = [x for x in G.nodes() if x not in pivot_nodes] return (ND, driver_nodes)
# vector rank from numpy import array from numpy.linalg import matrix_rank # rank v1 = array([1, 2, 3]) print(v1) vr1 = matrix_rank(v1) print(vr1) # zero rank v2 = array([0, 0, 0, 0, 0]) print(v2) vr2 = matrix_rank(v2) print(vr2)
def testData(rank, n, A): #A = np.random.rand(n, n) print("Called with RANK = " + str(rank) + ", SIZE = " + str(n)) B = low_rank_approx(A, rank) nTrain = 10000 nTest = 1000 #training data x_train = np.random.rand(nTrain, n) y_train = batch_mult(A, x_train) #test data x_test = np.random.rand(nTest, n) y_test = batch_mult(A, x_test) power = closestPower(n) #build neural net model = Sequential() model.add(Dense(n, input_dim=n, bias=False, init='he_normal')) model.add(Activation('linear')) model.add(Dense(n, bias=False, init='he_normal')) model.add(Activation('linear')) # model.add(Dense(power, bias=False, init='he_normal')) # model.add(Activation('linear')) model.add(Dense(n - 1, bias=False, init='he_normal')) model.add(Activation('linear')) # model.add(Dense(power/4, bias=False, init='he_normal')) # model.add(Activation('linear')) model.add(Dense(rank, bias=False, init='he_normal')) model.add(Activation('linear')) model.add(Dense(n - 1, bias=False, init='he_normal')) model.add(Activation('linear')) model.add(Dense(n - 1, bias=False, init='he_normal')) model.add(Activation('linear')) model.add(Dense(rank, bias=False, init='he_normal')) model.add(Activation('linear')) # model.add(Dense(power/4, bias=False, init='he_normal')) # model.add(Activation('linear')) # model.add(Dense(power/2, bias=False, init='he_normal')) # model.add(Activation('linear')) # model.add(Dense(power, bias=False, init='he_normal')) # model.add(Activation('linear')) model.add(Dense(n, bias=False, init='he_normal')) model.add(Activation('linear')) model.compile(loss='mean_squared_error', optimizer='adam', metrics=["accuracy"]) #train the neural net using the training data history = model.fit( x_train, y_train, nb_epoch=30, batch_size=30, ) plt.plot(history.history['loss'][1:]) plt.show() plt.clf score = model.evaluate(x_test, y_test, batch_size=30) print('\n score: ', score) C = model.predict(np.identity(n)) print('Dylan\'s errors (SVD): ', np.linalg.norm(A - B, 2) / np.linalg.norm(A, 2), np.linalg.norm(A - B, 'fro') / np.linalg.norm(A, 'fro')) print('Dylan\'s errors (NN): ', np.linalg.norm(A - C.T, 2) / np.linalg.norm(A, 2), np.linalg.norm(A - C.T, 'fro') / np.linalg.norm(A, 'fro')) rankC = matrix_rank(C) rankB = matrix_rank(B) rankA = matrix_rank(A) print("\nNN Rank is " + str(rankC)) print("SVD Rank is " + str(rankB)) print("Original Rank is " + str(rankA)) # return abs(np.linalg.norm(A-B, 2)/np.linalg.norm(A, 2) - np.linalg.norm(A-C.T, 2)/np.linalg.norm(A, 2)) return C
def t_stat_mult_regression_single(data_4d, X, c=()): """ Return four values, the estimated beta, t-value, degrees of freedom, and p-value for the given t-value Parameters ---------- data_4d: numpy array of 4 dimensions The image data of one subject X: numpy array the matrix to be put into the glm_mutiple function c: numpy array of 1 dimension The contrast vector fo the weights of the beta vector. If not entered, it will be set as np.array([0,1,...]) which corresponds to beta_1 Note that the fourth dimension of `data_4d` (time or the number of volumes) must be the same as the number of rows that X has. Returns ------- beta: estimated beta values t: numpy array of 1 dimension (spe) t-value of the betas df: int degrees of freedom p: numpy array of 1 dimension p-value corresponding to the t-value and degrees of freedom """ # Make sure y, X, c are all arrays beta, X = glm_multiple(data_4d, X) # dealing with no c put in if c is (): c = np.zeros(X.shape[-1]) c[1] = 1 c = np.atleast_2d(c).T # As column vector # Calculate the parameters - b hat beta = np.reshape(beta, (-1, beta.shape[-1])).T fitted = X.dot(beta) # Residual error y = np.reshape(data_4d, (-1, data_4d.shape[-1])) errors = y.T - fitted # Residual sum of squares RSS = (errors**2).sum(axis=0) df = X.shape[0] - npl.matrix_rank(X) # Mean residual sum of squares MRSS = RSS / df # calculate bottom half of t statistic SE = np.sqrt(MRSS * c.T.dot(npl.pinv(X.T.dot(X)).dot(c))) zeros = np.where(SE == 0) SE[zeros] = 1 t = c.T.dot(beta) / SE t[:, zeros] = 0 # Get p value for t value using cumulative density dunction # (CDF) of t distribution ltp = t_dist.cdf(abs(t), df) # lower tail p p = 1 - ltp # upper tail p return beta.T, t, df, p
solution = solveset(d, x) print(d) l = len(solution) solution = list(solution) k = 0 store_c = zeros([n, n]) energy = empty(n) for i in range(0, l): if i > 0: if abs(solution[i] - solution[i - 1]) < eps: continue M = con_M + E * float(solution[i]) rank = matrix_rank(M) n_zero = n - rank for j in range(k, k + n_zero): energy[j] = solution[i] record_p = function.Gauss(M) flag = 0 for I in range(k, k + n_zero): for J in range(0, n): if J == record_p[flag]: store_c[I, J] = 1 elif J not in record_p: store_c[I, J] = -M[J, record_p[flag]] / M[J, J] flag += 1 k += n_zero
def LinApp_Solve(AA, BB, CC, DD, FF, GG, HH, JJ, KK, LL, MM, NN, Z0, Sylv): """ This code takes Uhlig's original code and puts it in the form of a function. This version outputs the policy function coefficients: PP, QQ and UU for X, and RR, SS and VV for Y. Inputs overview: The matrices of derivatives: AA - TT. The autoregression coefficient matrix NN from the law of motion for Z. Z0 is the Z-point about which the linearization is taken. For linearizing about the steady state this is Zbar and normally Zbar = 0. Sylv is an indicator variable telling the program to use the built-in function sylvester() to solve for QQ and SS, if possible. Default is to use Sylv=1. Parameters ---------- AA : array_like, dtype=float, shape=(ny, nx) The matrix represented above by :math:`A`. It is the matrix of derivatives of the Y equations with repsect to :math:`X_t` BB : array_like, dtype=float, shape=(ny, nx) The matrix represented above by :math:`B`. It is the matrix of derivatives of the Y equations with repsect to :math:`X_{t-1}`. CC : array_like, dtype=float, shape=(ny, ny) The matrix represented above by :math:`C`. It is the matrix of derivatives of the Y equations with repsect to :math:`Y_t` DD : array_like, dtype=float, shape=(ny, nz) The matrix represented above by :math:`C`. It is the matrix of derivatives of the Y equations with repsect to :math:`Z_t` FF : array_like, dtype=float, shape=(nx, nx) The matrix represetned above by :math:`F`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`X_{t+1}` GG : array_like, dtype=float, shape=(nx, nx) The matrix represetned above by :math:`G`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`X_t` HH : array_like, dtype=float, shape=(nx, nx) The matrix represetned above by :math:`H`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`X_{t-1}` JJ : array_like, dtype=float, shape=(nx, ny) The matrix represetned above by :math:`J`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`Y_{t+1}` KK : array_like, dtype=float, shape=(nx, ny) The matrix represetned above by :math:`K`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`Y_t` LL : array_like, dtype=float, shape=(nx, nz) The matrix represetned above by :math:`L`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`Z_{t+1}` MM : array_like, dtype=float, shape=(nx, nz) The matrix represetned above by :math:`M`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`Z_t` NN : array_like, dtype=float, shape=(nz, nz) The autocorrelation matrix for the exogenous state vector z. Z0 : array, dtype=float, shape=(nz,) the Z-point about which the linearization is taken. For linearizing about the steady state this is Zbar and normally Zbar = 0. QQ if true. Sylv: binary, dtype=int an indicator variable telling the program to use the built-in function sylvester() to solve for QQ and SS, if possible. Default is to use Sylv=1. Returns ------- P : 2D-array, dtype=float, shape=(nx, nx) The matrix :math:`P` in the law of motion for endogenous state variables described above. Q : 2D-array, dtype=float, shape=(nx, nz) The matrix :math:`Q` in the law of motion for exogenous state variables described above. R : 2D-array, dtype=float, shape=(ny, nx) The matrix :math:`R` in the law of motion for endogenous state variables described above. S : 2D-array, dtype=float, shape=(ny, nz) The matrix :math:`S` in the law of motion for exogenous state variables described above. References ---------- .. [1] Uhlig, H. (1999): "A toolkit for analyzing nonlinear dynamic stochastic models easily," in Computational Methods for the Study of Dynamic Economies, ed. by R. Marimon, pp. 30-61. Oxford University Press. """ #The original coding we did used the np.matrix form for our matrices so we #make sure to set our inputs to numpy matrices. AA = np.matrix(AA) BB = np.matrix(BB) CC = np.matrix(CC) DD = np.matrix(DD) FF = np.matrix(FF) GG = np.matrix(GG) HH = np.matrix(HH) JJ = np.matrix(JJ) KK = np.matrix(KK) LL = np.matrix(LL) MM = np.matrix(MM) NN = np.matrix(NN) Z0 = np.array(Z0) #Tolerance level to use TOL = .000001 # Here we use matrices to get pertinent dimensions. nx = FF.shape[1] l_equ = CC.shape[0] ny = CC.shape[1] nz = min(NN.shape) # The following if and else blocks form the # Psi, Gamma, Theta Xi, Delta mats if l_equ == 0: if CC.any(): # This blcok makes sure you don't throw an error with an empty CC. CC_plus = la.pinv(CC) CC_0 = _nullSpaceBasis(CC.T) else: CC_plus = np.mat([]) CC_0 = np.mat([]) Psi_mat = FF Gamma_mat = -GG Theta_mat = -HH Xi_mat = np.mat(vstack((hstack((Gamma_mat, Theta_mat)), hstack((eye(nx), zeros((nx, nx))))))) Delta_mat = np.mat(vstack((hstack((Psi_mat, zeros((nx, nx)))), hstack((zeros((nx, nx)), eye(nx)))))) else: CC_plus = la.pinv(CC) CC_0 = _nullSpaceBasis(CC.T) if l_equ != ny: Psi_mat = vstack((zeros((l_equ - ny, nx)), FF \ - dot(dot(JJ, CC_plus), AA))) Gamma_mat = vstack((dot(CC_0, AA), dot(dot(JJ, CC_plus), BB) \ - GG + dot(dot(KK, CC_plus), AA))) Theta_mat = vstack((dot(CC_0, BB), dot(dot(KK, CC_plus), BB) - HH)) else: CC_inv = la.inv(CC) Psi_mat = FF - dot(JJ.dot(CC_inv), AA) Gamma_mat = dot(JJ.dot(CC_inv), BB) - GG + dot(dot(KK, CC_inv), AA) Theta_mat = dot(KK.dot(CC_inv), BB) - HH Xi_mat = vstack((hstack((Gamma_mat, Theta_mat)), \ hstack((eye(nx), zeros((nx, nx)))))) Delta_mat = vstack((hstack((Psi_mat, np.mat(zeros((nx, nx))))),\ hstack((zeros((nx, nx)), eye(nx))))) # Now we need the generalized eigenvalues/vectors for Xi with respect to # Delta. That is eVals and eVecs below. eVals, eVecs = la.eig(Xi_mat, Delta_mat) if npla.matrix_rank(eVecs) < nx: print("Error: Xi is not diagonalizable, stopping...") # From here to line 158 we Diagonalize Xi, form Lambda/Omega and find P. else: Xi_sortabs = np.sort(abs(eVals)) Xi_sortindex = np.argsort(abs(eVals)) Xi_sortedVec = np.array([eVecs[:, i] for i in Xi_sortindex]).T Xi_sortval = eVals[Xi_sortindex] Xi_select = np.arange(0, nx) if np.imag(Xi_sortval[nx - 1]).any(): if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL): drop_index = 1 cond_1 = (abs(np.imag(Xi_sortval[drop_index-1])) > TOL) cond_2 = drop_index < nx while cond_1 and cond_2: drop_index += 1 if drop_index >= nx: print("There is an error. Too many complex eigenvalues." +" Quitting...") else: print("Droping the lowest real eigenvalue. Beware of" + " sunspots!") Xi_select = np.array([np.arange(0, drop_index - 1),\ np.arange(drop_index, nx + 1)]) # Here Uhlig computes stuff if user chose "Manual roots" I skip it. if max(abs(Xi_sortval[Xi_select])) > 1 + TOL: print("It looks like we have unstable roots. This might not work...") if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL: print("Check the model to make sure you have a unique steady" + " state we are having problems with convergence.") Lambda_mat = np.diag(Xi_sortval[Xi_select]) Omega_mat = Xi_sortedVec[nx:2 * nx, Xi_select] if npla.matrix_rank(Omega_mat) < nx: print("Omega matrix is not invertible, Can't solve for P; we" + " proceed with QZ-method instead.") #~~~~~~~~~ QZ-method codes from SOLVE_QZ ~~~~~~~~# Delta_up,Xi_up,UUU,VVV=la.qz(Delta_mat,Xi_mat, output='complex') UUU=UUU.T Xi_eigval = np.diag( np.diag(Xi_up)/np.maximum(np.diag(Delta_up),TOL)) Xi_sortabs= np.sort(abs(np.diag(Xi_eigval))) Xi_sortindex= np.argsort(abs(np.diag(Xi_eigval))) Xi_sortval = Xi_eigval[Xi_sortindex, Xi_sortindex] Xi_select = np.arange(0, nx) stake = max(abs(Xi_sortval[Xi_select])) + TOL Delta_up, Xi_up, UUU, VVV = qzdiv(stake,Delta_up,Xi_up,UUU,VVV) #Check conditions from line 49-109 if np.imag(Xi_sortval[nx - 1]).any(): if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL): print("Problem: You have complex eigenvalues! And this means"+ " PP matrix will contain complex numbers by this method." ) drop_index = 1 cond_1 = (abs(np.imag(Xi_sortval[drop_index-1])) > TOL) cond_2 = drop_index < nx while cond_1 and cond_2: drop_index += 1 if drop_index >= nx: print("There is an error. Too many complex eigenvalues." +" Quitting...") else: print("Dropping the lowest real eigenvalue. Beware of" + " sunspots!") for i in range(drop_index,nx+1): Delta_up,Xi_up,UUU,VVV = qzswitch(i,Delta_up,Xi_up,UUU,VVV) Xi_select1 = np.arange(0,drop_index-1) Xi_select = np.append(Xi_select1, np.arange(drop_index,nx+1)) if Xi_sortval[max(Xi_select)] < 1 - TOL: print('There are stable roots NOT used. Proceeding with the' + ' smallest root.') if max(abs(Xi_sortval[Xi_select])) > 1 + TOL: print("It looks like we have unstable roots. This might not work...") if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL: print("Check the model to make sure you have a unique steady" + " state we are having problems with convergence.") #End of checking conditions #Lambda_mat = np.diag(Xi_sortval[Xi_select]) # to help sol_out.m VVV=VVV.conj().T VVV_2_1 = VVV[nx : 2*nx, 0 : nx] VVV_2_2 = VVV[nx : 2*nx, nx :2*nx] UUU_2_1 = UUU[nx : 2*nx, 0 : nx] VVV = VVV.conj().T if abs(la.det(UUU_2_1))< TOL: print("One necessary condition for computing P is NOT satisfied,"+ " but we proceed anyways...") if abs(la.det(VVV_2_1))< TOL: print("VVV_2_1 matrix, used to compute for P, is not invertible; we"+ " are in trouble but we proceed anyways...") PP = np.matrix( la.solve(- VVV_2_1, VVV_2_2) ) PP_imag = np.imag(PP) PP = np.real(PP) if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any(): print("A lot of P is complex. We will continue with the" + " real part and hope we don't lose too much information.") #~~~~~~~~~ End of QZ-method ~~~~~~~~~# #This follows the original uhlig.py file else: PP = dot(dot(Omega_mat, Lambda_mat), la.inv(Omega_mat)) PP_imag = np.imag(PP) PP = np.real(PP) if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any(): print("A lot of P is complex. We will continue with the" + " real part and hope we don't lose too much information.") # The code from here to the end was from he Uhlig file calc_qrs.m. # I think for python it fits better here than in a separate file. # The if and else below make RR and VV depending on our model's setup. if l_equ == 0: RR = zeros((0, nx)) VV = hstack((kron(NN.T, FF) + kron(eye(nz), \ (dot(FF, PP) + GG)), kron(NN.T, JJ) + kron(eye(nz), KK))) else: RR = - dot(CC_plus, (dot(AA, PP) + BB)) VV = sp.vstack((hstack((kron(eye(nz), AA), \ kron(eye(nz), CC))), hstack((kron(NN.T, FF) +\ kron(eye(nz), dot(FF, PP) + dot(JJ, RR) + GG),\ kron(NN.T, JJ) + kron(eye(nz), KK))))) # Now we use LL, NN, RR, VV to get the QQ, RR, SS, VV matrices. # first try using Sylvester equation solver if Sylv: if ny>0: PM = (FF-la.solve(JJ.dot(CC),AA)) if npla.matrix_rank(PM)< nx+ny: Sylv=0 print("Sylvester equation solver condition is not satisfied;"\ +" proceed with the original method...") else: if npla.matrix_rank(FF)< nx: Sylv=0 print("Sylvester equation solver condition is not satisfied;"\ +" proceed with the original method...") print("Using Sylvester equation solver...") if ny>0: Anew = la.solve(PM, (FF.dot(PP)+GG+JJ.dot(RR)-\ la.solve(KK.dot(CC), AA)) ) Bnew = NN Cnew1 = la.solve(JJ.dot(CC),DD.dot(NN))+la.solve(KK.dot(CC), DD)-\ LL.dot(NN)-MM Cnew = la.solve(PM, Cnew1) QQ = la.solve_sylvester(Anew,Bnew,Cnew) SS = la.solve(-CC, (AA.dot(QQ)+DD)) else: Anew = la.solve(FF, (FF.dot(PP)+GG)) Bnew = NN Cnew = la.solve(FF, (-LL.dot(NN)-MM)) QQ = la.solve_sylvester(Anew,Bnew,Cnew) SS = np.zeros((0,nz)) #empty matrix # then the Uhlig's way else: ''' # This code is from Spencer Lypn's 2012 version q_eqns = sp.shape(FF)[0] m_states = sp.shape(FF)[1] l_equ = sp.shape(CC)[0] n_endog = sp.shape(CC)[1] k_exog = min(sp.shape(sp.mat(NN))[0], sp.shape(sp.mat(NN))[1]) sp.mat(LL.T) sp.mat(NN) sp.dot(sp.mat(LL),sp.mat(NN)) LLNN_plus_MM = sp.dot(sp.mat(LL),sp.mat(NN)) + sp.mat(MM.T) QQSS_vec = sp.dot(la.inv(sp.mat(VV)), sp.mat(LLNN_plus_MM)) QQSS_vec = -QQSS_vec if max(abs(QQSS_vec)) == sp.inf: print("We have issues with Q and S. Entries are undefined. Probably because V is no inverible.") QQ = sp.reshape(QQSS_vec[0:m_states*k_exog],(m_states,k_exog)) SS = sp.reshape(QQSS_vec[(m_states*k_exog):((m_states+n_endog)*k_exog)] ,(n_endog,k_exog)) ''' # this code is from Yulong Li's 2015 version if (npla.matrix_rank(VV) < nz * (nx + ny)): print("Sorry but V is not invertible. Can't solve for Q and S;"+ " but we proceed anyways...") LL = sp.mat(LL) NN = sp.mat(NN) LLNN_plus_MM = dot(LL, NN) + MM if DD.any(): impvec = vstack([DD, LLNN_plus_MM]) else: impvec = LLNN_plus_MM impvec = np.reshape(impvec, ((nx + ny) * nz, 1), 'F') QQSS_vec = np.matrix(la.solve(-VV, impvec)) if (max(abs(QQSS_vec)) == sp.inf).any(): print("We have issues with Q and S. Entries are undefined." + " Probably because V is no inverible.") #Build QQ SS QQ = np.reshape(np.matrix(QQSS_vec[0:nx * nz, 0]), (nx, nz), 'F') SS = np.reshape(QQSS_vec[(nx * nz):((nx + ny) * nz), 0],\ (ny, nz), 'F') return np.array(PP), np.array(QQ), np.array(RR), np.array(SS)
def _rank_test(S, Sjk): if len(Sjk) > S.shape[0] + 1: return False else: return len(Sjk) - matrix_rank(S[:, Sjk]) == 1
def linear_equation_solver(): """ Solve a linear matrix equation, or system of linear scalar equations Examples: ------------------------------ Solve the system of equations ``3 * x0 + x1 = 9`` ``x0 + 2 * x1 = 8`` you should input like this: N of variables: 2 A: 3,1;1,2 B: 9,8 Solve the system of equations ``4 * x0 + 3 * x1 + 2 * x2 = 25`` ``-2 * x0 + 2 * x1 3 * x2 = -10`` ``3 * x0 - 5 * x1 + 2 * x2 = -4`` you should input like this: N of variables: 3 A: 4,3,2;-2,2,3;3,-5,2 B: 25,-10,-4 """ while True: N_of_vars = input('N of variables: ') try: N_of_vars = int(N_of_vars) except ValueError: print('Invalid input! try again!\n') continue if N_of_vars >= 2: break else: print('N of variables should equal or greater than 2\n') while True: A = input('A: ') try: A = array([[int(j) for j in i.split(',')] for i in A.split(';')]) except ValueError: print('Invalid input! try again!\n') continue if A.shape == (N_of_vars, N_of_vars): break else: print('A is not a N-square!\n') while True: B = input('B: ') try: B = array([int(i) for i in B.split(',')]) except ValueError: print('Invalid input! try again!\n') continue if B.shape == (N_of_vars, ): break else: print('B is not N!\n') rank_A = matrix_rank(A) rank_A_B = matrix_rank(column_stack((A, B))) print('the system ', end='') if rank_A == rank_A_B: if rank_A == N_of_vars: print('has a unique solution: {}'.format(solve(A, B))) elif rank_A < N_of_vars: print('has infinitely many solutions.') elif rank_A < rank_A_B: print('is inconsistent.')
def t_stat_mult_regression(data_4d, X): """ Return four values, the estimated beta, t-value, degrees of freedom, and p-value for the given t-value Parameters ---------- data_4d: numpy array of 4 dimensions The image data of one subject X: numpy array the matrix to be put into the glm_mutiple function Note that the fourth dimension of `data_4d` (time or the number of volumes) must be the same as the number of rows that X has. Returns ------- beta: estimated beta values t: numpy array of 2 dimensions t-value of the betas df: int degrees of freedom p: numpy array of 2 dimensions p-value corresponding to the t-value and degrees of freedom """ beta, X = glm_multiple(data_4d, X) # Calculate the parameters - b hat beta = np.reshape(beta, (-1, beta.shape[-1])).T fitted = X.dot(beta) # Residual error y = np.reshape(data_4d, (-1, data_4d.shape[-1])) errors = y.T - fitted # Residual sum of squares RSS = (errors**2).sum(axis=0) df = X.shape[0] - npl.matrix_rank(X) # Mean residual sum of squares MRSS = RSS / df # calculate bottom half of t statistic Cov_beta = npl.pinv(X.T.dot(X)) SE = np.zeros(beta.shape) for i in range(X.shape[-1]): c = np.zeros(X.shape[-1]) c[i] = 1 c = np.atleast_2d(c).T SE[i, :] = np.sqrt(MRSS * c.T.dot(npl.pinv(X.T.dot(X)).dot(c))) zeros = np.where(SE == 0) SE[zeros] = 1 t = beta / SE t[:, zeros] = 0 # Get p value for t value using cumulative density dunction # (CDF) of t distribution ltp = t_dist.cdf(abs(t), df) # lower tail p p = 1 - ltp # upper tail p return beta.T, t, df, p
def dense_matrix_rank(M): return matrix_rank(M)
# In[8]: x+y # In[9]: x=np.matrix([[3,4,5],[3,5,6],[2,8,6]]) # In[10]: from numpy import linalg as la # In[11]: la.matrix_rank(x) # In[ ]:
def rand(self, dims=None): """""" if dims is not None: dims = set(dims) hps = [ hp for i, hp in enumerate(self) if (not hp.fixed) and ((dims is None) or (i in dims)) ] mat = [np.ones([len(self.scores), 1])] + [hp.as_matrix() for hp in hps] mat = np.concatenate(mat, axis=1) d = mat.shape[1] - 1 interactmat = [] for i, vec1 in enumerate(mat.T): for j, vec2 in enumerate(mat.T): if i <= j: interactmat.append((vec1 * vec2)[:, None]) X = np.concatenate(interactmat, axis=1) n, d2 = X.shape I = np.eye(d2) I[0, 0] = 0 XTXinv = la.inv(X.T.dot(X) + .05 * I) # TODO maybe: L1 regularization on the interactions mean = XTXinv.dot(X.T).dot(self.scores) H = X.dot(XTXinv).dot(X.T) epsilon_hat = self.scores - H.dot(self.scores) dof = np.trace(np.eye(n) - H) s_squared = epsilon_hat.dot(epsilon_hat) / dof cov = s_squared * XTXinv eigenvals, eigenvecs = la.eig(cov) eigenvals = np.diag(np.abs(eigenvals)) eigenvecs = np.real(eigenvecs) cov = eigenvecs.dot(eigenvals).dot(eigenvecs.T) cov += .05**2 * np.eye(len(cov)) if la.matrix_rank(cov) < len(cov): print('WARNING: indefinite covariance matrix') return {} rand_dict = DefaultDict(dict) vals = np.random.multivariate_normal(mean, cov) bias = vals[0] lins = vals[1:d + 1] bilins = np.zeros([d, d]) bilins[np.tril_indices(d)] = vals[d + 1:] bilins = .5 * bilins + .5 * bilins.T eigenvals, eigenvecs = la.eig(bilins) eigenvals = -np.diag(np.abs(eigenvals)) eigenvecs = np.real(eigenvecs) bilins = eigenvecs.dot(eigenvals).dot(eigenvecs.T) if la.matrix_rank(bilins) < len(bilins): print('WARNING: indefinite interaction matrix') return {} rand_dict = DefaultDict(dict) vals = np.clip(.5 * la.inv(bilins).dot(lins), 0, 1) i = 0 for hp in hps: if isinstance(hp, NumericHyperparam): rand_dict[hp.section][hp.option] = hp.denormalize(vals[i]) i += 1 else: rand_dict[hp.section][hp.option] = hp.denormalize( vals[i:i + len(hp.bounds) - 1]) i += len(hp.bounds) - 1 return rand_dict
#print(ratings_list) ##Creating the dataframe from the ratings data ratings_df = pd.DataFrame(ratings_list, columns=['UserID', 'BookID', 'Rating'], dtype=float) ## Normalizing the values of the ratings in betweeen 0 to 1 ratings_df.loc[:, 'Rating'] = sk.minmax_scale(ratings_df.loc[:, 'Rating']) ##print(ratings_df.loc[:,'Rating']) ## Creating the user-item matrix form the dataframe and finding its rank to reduce the dimension of the matrix R_df = ratings_df.pivot(index='UserID', columns='BookID', values='Rating').fillna(0) U_R_matrix = R_df.as_matrix() rank = matrix_rank(U_R_matrix) ## performing the truncated SVD on the user-item matrix svd = TruncatedSVD(n_components=rank, n_iter=7) transformed_mat = svd.fit_transform(U_R_matrix) Sigma_mat = np.diag(svd.singular_values_) ##printing the values of the decomposed components print("VT") print(svd.components_) print("Sigma") print(svd.singular_values_) ##If we want to know the U component of the decomposition then we need to use the randomized_svd method which is used by the tuncated_svd in the implementation ##U, Sigma, VT = randomized_svd(U_R_matrix,n_components=rank)
[beta[i] * (alpha[i] - 1), beta[i] * alpha[i]]]) G = np.array([[alpha[i]], [beta[i] * alpha[i]]]) H = np.array([1, 1]) w, v = LA.eig(F) print("eigenvalues of F") print(w) mags = np.absolute(w) if mags[0] > 1.0 or mags[1] > 1.0: print("These params are not stable") else: print("these params are stable") # determine observability max_iters = 5 j = 1 cur_obs = H O = H rank = 0 while rank < 2 and j < max_iters: cur_obs = np.dot(cur_obs, F) O = np.vstack((O, cur_obs)) rank = matrix_rank(O) print("rank is " + str(rank)) print(O) j = j + 1 if j < max_iters: print("system is observable with " + str(j) + " observations") else: print("system is not observable")
project_path+\ 'fig/linear_model/mosaic/middle_slice/%s_withPCA_middle_slice_%s'\ %(d_path['type'] + str(name), str(k))+'.png') #plt.show() plt.clf() plt.close() # Residuals MRSS_dict = {} MRSS_dict['ds005' + d_path['type']] = {} MRSS_dict['ds005' + d_path['type']]['drifts'] = {} MRSS_dict['ds005' + d_path['type']]['pca'] = {} for z in MRSS_dict['ds005' + d_path['type']]: MRSS_dict['ds005' + d_path['type']][z]['MRSS'] = [] residuals = Y - X.dot(betas) df = X.shape[0] - npl.matrix_rank(X) MRSS = np.sum(residuals**2, axis=0) / df residuals_pca = Y - X_pca.dot(B_pca) df_pca = X_pca.shape[0] - npl.matrix_rank(X_pca) MRSS_pca = np.sum(residuals_pca**2, axis=0) / df_pca MRSS_dict['ds005' + d_path['type']]['drifts']['mean_MRSS'] = np.mean(MRSS) MRSS_dict['ds005' + d_path['type']]['pca']['mean_MRSS'] = np.mean(MRSS_pca) # Save the mean MRSS values to compare the performance # of the design matrices for design_matrix, beta, mrss, name in \ [(X, betas, MRSS, 'drifts'), (X_pca, B_pca, MRSS_pca, 'pca')]: MRSS_dict['ds005' + d_path['type']][name]['p-values'] = [] MRSS_dict['ds005' + d_path['type']][name]['t-test'] = [] with open(project_path+'txt_output/MRSS/ds005%s_MRSS.json'\ %(d_path['type']), 'w') as file_out: json.dump(MRSS_dict, file_out)
# ga_ga_corr = np.dot(ga_map, ga_map.T) / npix # ga_ps_corr = np.dot(ga_map, ps_map.T) / npix # ga_cm_corr = np.dot(ga_map, cm_map.T) / npix # ps_ps_corr = np.dot(ps_map, ps_map.T) / npix # ps_cm_corr = np.dot(ps_map, cm_map.T) / npix cm_cm_corr = np.dot(cm_map, cm_map.T) / npix # fg_fg_corr = np.dot(fg_map, fg_map.T) / npix # fg_cm_corr = np.dot(fg_map, cm_map.T) / npix tt_tt_corr = np.dot(tt_map, tt_map.T) / npix rpca = R_pca(tt_tt_corr, mu=1.0e6, lmbda=None) L, S = rpca.fit(tol=1.0e-14, max_iter=20000, iter_print=100) print matrix_rank(L) print matrix_rank(S) # plt.figure() # plt.subplot(221) # plt.imshow(tt_tt_corr, origin='lower') # plt.colorbar() # plt.subplot(222) # plt.imshow(tt_tt_corr-L-S, origin='lower') # plt.colorbar() # plt.subplot(223) # plt.imshow(L, origin='lower') # plt.colorbar() # plt.subplot(224) # plt.imshow(S, origin='lower') # plt.colorbar()
def llr_pvalue(X, llrf): df_model = float(la.matrix_rank(X) - 1) return stats.chisqprob(llrf, df_model)
def LinApp_Solve(AA, BB, CC, DD, FF, GG, HH, JJ, KK, LL, MM, NN, Z0, Sylv): """ This code takes Uhlig's original code and puts it in the form of a function. This version outputs the policy function coefficients: PP, QQ and UU for X, and RR, SS and VV for Y. Inputs overview: The matrices of derivatives: AA - MM. The autoregression coefficient matrix NN from the law of motion for Z. Z0 is the Z-point about which the linearization is taken. For linearizing about the steady state this is Zbar and normally Zbar = 0. Sylv is an indicator variable telling the program to use the built-in function sylvester() to solve for QQ and SS, if possible. Default is to use Sylv=1. This option is now disabled and we always set Sylv=1. Parameters ---------- AA : array_like, dtype=float, shape=(ny, nx) The matrix represented above by :math:`A`. It is the matrix of derivatives of the Y equations with repsect to :math:`X_t` BB : array_like, dtype=float, shape=(ny, nx) The matrix represented above by :math:`B`. It is the matrix of derivatives of the Y equations with repsect to :math:`X_{t-1}`. CC : array_like, dtype=float, shape=(ny, ny) The matrix represented above by :math:`C`. It is the matrix of derivatives of the Y equations with repsect to :math:`Y_t` DD : array_like, dtype=float, shape=(ny, nz) The matrix represented above by :math:`C`. It is the matrix of derivatives of the Y equations with repsect to :math:`Z_t` FF : array_like, dtype=float, shape=(nx, nx) The matrix represetned above by :math:`F`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`X_{t+1}` GG : array_like, dtype=float, shape=(nx, nx) The matrix represetned above by :math:`G`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`X_t` HH : array_like, dtype=float, shape=(nx, nx) The matrix represetned above by :math:`H`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`X_{t-1}` JJ : array_like, dtype=float, shape=(nx, ny) The matrix represetned above by :math:`J`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`Y_{t+1}` KK : array_like, dtype=float, shape=(nx, ny) The matrix represetned above by :math:`K`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`Y_t` LL : array_like, dtype=float, shape=(nx, nz) The matrix represetned above by :math:`L`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`Z_{t+1}` MM : array_like, dtype=float, shape=(nx, nz) The matrix represetned above by :math:`M`. It is the matrix of derivatives of the model's characterizing equations with respect to :math:`Z_t` NN : array_like, dtype=float, shape=(nz, nz) The autocorrelation matrix for the exogenous state vector z. Z0 : array, dtype=float, shape=(nz,) the Z-point about which the linearization is taken. For linearizing about the steady state this is Zbar and normally Zbar = 0. QQ if true. Sylv: binary, dtype=int an indicator variable telling the program to use the built-in function sylvester() to solve for QQ and SS, if possible. Default is to use Sylv=1. Returns ------- P : 2D-array, dtype=float, shape=(nx, nx) The matrix :math:`P` in the law of motion for endogenous state variables described above. Q : 2D-array, dtype=float, shape=(nx, nz) The matrix :math:`Q` in the law of motion for exogenous state variables described above. R : 2D-array, dtype=float, shape=(ny, nx) The matrix :math:`R` in the law of motion for endogenous state variables described above. S : 2D-array, dtype=float, shape=(ny, nz) The matrix :math:`S` in the law of motion for exogenous state variables described above. References ---------- .. [1] Uhlig, H. (1999): "A toolkit for analyzing nonlinear dynamic stochastic models easily," in Computational Methods for the Study of Dynamic Economies, ed. by R. Marimon, pp. 30-61. Oxford University Press. """ # The coding for Uhlig's solution for QQ and SS gives incorrect results # So we will use numpy's Sylvester equation solver regardless of the value # chosen for Sylv #The original coding we did used the np.matrix form for our matrices so we #make sure to set our inputs to numpy matrices. AA = np.matrix(AA) BB = np.matrix(BB) CC = np.matrix(CC) DD = np.matrix(DD) FF = np.matrix(FF) GG = np.matrix(GG) HH = np.matrix(HH) JJ = np.matrix(JJ) KK = np.matrix(KK) LL = np.matrix(LL) MM = np.matrix(MM) NN = np.matrix(NN) Z0 = np.array(Z0) #Tolerance level to use TOL = .000001 # Here we use matrices to get pertinent dimensions. nx = FF.shape[1] l_equ = CC.shape[0] ny = CC.shape[1] nz = min(NN.shape) # The following if and else blocks form the # Psi, Gamma, Theta Xi, Delta mats if l_equ == 0: if CC.any(): # This blcok makes sure you don't throw an error with an empty CC. CC_plus = la.pinv(CC) CC_0 = _nullSpaceBasis(CC.T) else: CC_plus = np.mat([]) CC_0 = np.mat([]) Psi_mat = FF Gamma_mat = -GG Theta_mat = -HH Xi_mat = np.mat( vstack((hstack( (Gamma_mat, Theta_mat)), hstack((eye(nx), zeros((nx, nx))))))) Delta_mat = np.mat( vstack((hstack((Psi_mat, zeros( (nx, nx)))), hstack((zeros((nx, nx)), eye(nx)))))) else: CC_plus = la.pinv(CC) CC_0 = _nullSpaceBasis(CC.T) if l_equ != ny: Psi_mat = vstack((zeros((l_equ - ny, nx)), FF \ - dot(dot(JJ, CC_plus), AA))) Gamma_mat = vstack((dot(CC_0, AA), dot(dot(JJ, CC_plus), BB) \ - GG + dot(dot(KK, CC_plus), AA))) Theta_mat = vstack((dot(CC_0, BB), dot(dot(KK, CC_plus), BB) - HH)) else: CC_inv = la.inv(CC) Psi_mat = FF - dot(JJ.dot(CC_inv), AA) Gamma_mat = dot(JJ.dot(CC_inv), BB) - GG + dot(dot(KK, CC_inv), AA) Theta_mat = dot(KK.dot(CC_inv), BB) - HH Xi_mat = vstack((hstack((Gamma_mat, Theta_mat)), \ hstack((eye(nx), zeros((nx, nx)))))) Delta_mat = vstack((hstack((Psi_mat, np.mat(zeros((nx, nx))))),\ hstack((zeros((nx, nx)), eye(nx))))) # Now we need the generalized eigenvalues/vectors for Xi with respect to # Delta. That is eVals and eVecs below. eVals, eVecs = la.eig(Xi_mat, Delta_mat) if npla.matrix_rank(eVecs) < nx: print("Error: Xi is not diagonalizable, stopping...") # From here to line 158 we Diagonalize Xi, form Lambda/Omega and find P. else: Xi_sortindex = np.argsort(abs(eVals)) Xi_sortedVec = np.array([eVecs[:, i] for i in Xi_sortindex]).T Xi_sortval = eVals[Xi_sortindex] Xi_select = np.arange(0, nx) if np.imag(Xi_sortval[nx - 1]).any(): if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL): drop_index = 1 cond_1 = (abs(np.imag(Xi_sortval[drop_index - 1])) > TOL) cond_2 = drop_index < nx while cond_1 and cond_2: drop_index += 1 if drop_index >= nx: print("There is an error. Too many complex eigenvalues." + " Quitting...") else: print("Droping the lowest real eigenvalue. Beware of" + " sunspots!") Xi_select = np.array([np.arange(0, drop_index - 1),\ np.arange(drop_index, nx + 1)]) # Here Uhlig computes stuff if user chose "Manual roots" I skip it. if max(abs(Xi_sortval[Xi_select])) > 1 + TOL: print( "It looks like we have unstable roots. This might not work...") if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL: print("Check the model to make sure you have a unique steady" + " state we are having problems with convergence.") Lambda_mat = np.diag(Xi_sortval[Xi_select]) Omega_mat = Xi_sortedVec[nx:2 * nx, Xi_select] if npla.matrix_rank(Omega_mat) < nx: print("Omega matrix is not invertible, Can't solve for P; we" + " proceed with QZ-method instead.") #~~~~~~~~~ QZ-method codes from SOLVE_QZ ~~~~~~~~# Delta_up, Xi_up, UUU, VVV = la.qz(Delta_mat, Xi_mat, output='complex') UUU = UUU.T Xi_eigval = np.diag( np.diag(Xi_up) / np.maximum(np.diag(Delta_up), TOL)) Xi_sortindex = np.argsort(abs(np.diag(Xi_eigval))) Xi_sortval = Xi_eigval[Xi_sortindex, Xi_sortindex] Xi_select = np.arange(0, nx) stake = max(abs(Xi_sortval[Xi_select])) + TOL Delta_up, Xi_up, UUU, VVV = qzdiv(stake, Delta_up, Xi_up, UUU, VVV) #Check conditions from line 49-109 if np.imag(Xi_sortval[nx - 1]).any(): if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL): print( "Problem: You have complex eigenvalues! And this means" + " PP matrix will contain complex numbers by this method." ) drop_index = 1 cond_1 = (abs(np.imag(Xi_sortval[drop_index - 1])) > TOL) cond_2 = drop_index < nx while cond_1 and cond_2: drop_index += 1 if drop_index >= nx: print("There is an error. Too many complex eigenvalues." + " Quitting...") else: print("Dropping the lowest real eigenvalue. Beware of" + " sunspots!") for i in range(drop_index, nx + 1): Delta_up, Xi_up, UUU, VVV = qzswitch( i, Delta_up, Xi_up, UUU, VVV) Xi_select1 = np.arange(0, drop_index - 1) Xi_select = np.append(Xi_select1, np.arange(drop_index, nx + 1)) if Xi_sortval[max(Xi_select)] < 1 - TOL: print('There are stable roots NOT used. Proceeding with the' + ' smallest root.') if max(abs(Xi_sortval[Xi_select])) > 1 + TOL: print( "It looks like we have unstable roots. This might not work..." ) if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL: print("Check the model to make sure you have a unique steady" + " state we are having problems with convergence.") #End of checking conditions #Lambda_mat = np.diag(Xi_sortval[Xi_select]) # to help sol_out.m VVV = VVV.conj().T VVV_2_1 = VVV[nx:2 * nx, 0:nx] VVV_2_2 = VVV[nx:2 * nx, nx:2 * nx] UUU_2_1 = UUU[nx:2 * nx, 0:nx] VVV = VVV.conj().T if abs(la.det(UUU_2_1)) < TOL: print( "One necessary condition for computing P is NOT satisfied," + " but we proceed anyways...") if abs(la.det(VVV_2_1)) < TOL: print( "VVV_2_1 matrix, used to compute for P, is not invertible; we" + " are in trouble but we proceed anyways...") PP = np.matrix(la.solve(-VVV_2_1, VVV_2_2)) PP_imag = np.imag(PP) PP = np.real(PP) if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any(): print( "A lot of P is complex. We will continue with the" + " real part and hope we don't lose too much information.") #~~~~~~~~~ End of QZ-method ~~~~~~~~~# #This follows the original uhlig.py file else: PP = dot(dot(Omega_mat, Lambda_mat), la.inv(Omega_mat)) PP_imag = np.imag(PP) PP = np.real(PP) if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any(): print( "A lot of P is complex. We will continue with the" + " real part and hope we don't lose too much information.") # The if and else below make RR depending on our model's setup. if l_equ == 0: RR = zeros((0, nx)) #empty matrix else: RR = -dot(CC_plus, (dot(AA, PP) + BB)) # Now we use Sylvester equation solver to find QQ and SS matrices. ''' This code written by Kerk Phillips 2020 updated in July 2021 ''' CCinv = npla.inv(CC) # if ny>0: # PM = npla.inv(FF-np.matmul(np.matmul(JJ,CCinv), AA)) # if npla.matrix_rank(PM)< nx+ny: # print("Sylvester equation solver condition is not satisfied") # else: # PM = npla.inv(FF) # if npla.matrix_rank(FF)< nx: # print("Sylvester equation solver condition is not satisfied") if ny > 0: JCAP = np.matmul(np.matmul(JJ, CCinv), np.matmul(AA, PP)) JCB = np.matmul(np.matmul(JJ, CCinv), BB) KCA = np.matmul(np.matmul(KK, CCinv), AA) KCD = np.matmul(np.matmul(KK, CCinv), DD) JCDN = np.matmul(np.matmul(JJ, CCinv), np.matmul(DD, NN)) Dnew = FF.dot(PP) + GG - JCAP - JCB - KCA Fnew = FF - np.matmul(np.matmul(JJ, CCinv), AA) Gnew = NN Hnew = KCD - LL.dot(NN) + JCDN - MM Asyl = npla.inv(Dnew).dot(Fnew) Bsyl = npla.inv(Gnew) Csyl = np.matmul(npla.inv(Dnew).dot(Hnew), npla.inv(Gnew)) QQ = la.solve_sylvester(Asyl, Bsyl, Csyl) SS = la.solve(-CC, (AA.dot(QQ) + DD)) else: Dnew = FF.dot(PP) + GG Fnew = FF Gnew = NN Hnew = -LL.dot(NN) - MM Asyl = npla.inv(Dnew).dot(Fnew) Bsyl = npla.inv(Gnew) Csyl = np.matmul(npla.inv(Dnew).dot(Hnew), npla.inv(Gnew)) QQ = la.solve_sylvester(Asyl, Bsyl, Csyl) SS = np.zeros((0, nz)) #empty matrix return np.array(PP), np.array(QQ), np.array(RR), np.array(SS)
def FactorAnalysis(c2, d, k_): # This function computes the Low Rank Diagonal Conditional Correlation # INPUT: # c2 :[matrix](n_ x n_) correlation matrix # d :[matrix](m_ x n_) matrix of constraints # k_ :[scalar] rank of matrix beta. # OP: # c2_LRD :[matrix](n_ x n_) shrunk matrix of the form [email protected]+I-diag([email protected]) where beta is a n_ x k_ matrix # beta :[matrix](n_ x k_) low rank matrix: n_ x k_ # iter :[scalar] number of iterations # constraint :[scalar] boolean indicator, it is equal to 1 in case the constraint is satisfied, i.e. d@beta = 0 # For details on the exercise, see here . ## Code CONDITIONAL = 1 if npsum(abs(d.flatten())) == 0: CONDITIONAL = 0 n_ = c2.shape[0] if k_ > n_ - matrix_rank(d): raise Warning('k_ has to be <= rho.shape[0]-rank[d]') NmaxIter = 1000 eps1 = 1e-9 eta = 0.01 gamma = 0.1 constraint = 0 #initialize output c2_LRD = c2 dist = zeros iter = 0 #0. Initialize Diag_lambda2, e = eig(c2) lambda2 = Diag_lambda2 lambda2_ord, order = sort(lambda2)[::-1], argsort(lambda2)[::-1] lam = np.real(sqrt(lambda2_ord[:k_])) e_ord = e[:, order] beta = np.real(e_ord[:n_, :k_] @ np.diagflat(maximum(lam, eps1))) c = c2 for j in range(NmaxIter): #1. Conditional PC a = c - eye(n_) + np.diagflat(diag(beta @ beta.T)) if CONDITIONAL == 1: lambda2, E = ConditionalPC(a, d) lambda2 = lambda2[:k_] E = E[:, :k_] lam = sqrt(lambda2) else: #if there aren't constraints: standard PC using the covariance matrix Diag_lambda2, e = eig(a) lambda2 = Diag_lambda2 lambda2_ord, order = sort(lambda2)[::-1], argsort(lambda2)[::-1] e_ord = e[:, order] E = e_ord[:, :k_] lam = sqrt(lambda2_ord[:k_]) #2.loadings beta_new = E @ np.diagflat(maximum(lam, eps1)) #3. Rows length l_n = sqrt(npsum(beta_new**2, 1)) #4. Rows scaling beta_new[l_n > 1, :] = beta_new[l_n > 1, :] / tile( l_n[l_n > 1, np.newaxis] * (1 + gamma), (1, k_)) #5. reconstruction c = beta_new @ beta_new.T + eye(n_, n_) - diag( diag(beta_new @ beta_new.T)) #6. check for convergence distance = 1 / n_ * npsum(sqrt(npsum((beta_new - beta)**2, 1))) if distance <= eta: c2_LRD = c dist = distance iter = j beta = beta_new.copy() if d.shape == (1, 1): tol = npmax(abs(d * beta)) else: tol = npmax(abs(d.dot(beta))) if tol < 1e-9: constraint = 1 break else: beta = beta_new.copy() beta = np.real(beta) c2_LRD = np.real(c2_LRD) c2_LRD = (c2_LRD + c2_LRD.T) / 2 return c2_LRD, beta, dist, iter, constraint
# --- Compute the SVD --- U, S, VT = la.svd(G, full_matrices=True, compute_uv=True) V = np.transpose(VT) # Display semilog plot of singular values fig0, ax0 = plt.subplots(1, 1) ax0.semilogy(S, 'ko', mfc='None') ax0.set_xlabel('i') ax0.set_ylabel(r'$s_{i}$') ax0.set_xticks(range(0, S.size+1, 5)) fig0.savefig('c3fshaw_sing.pdf') plt.close() # --- Plot column of V corresponds to the smallest nonzero singular value --- p = la.matrix_rank(G) print('M =', M, 'and', 'N =', N) print('System rank', 'p =', p, '\n') fig1, ax1 = plt.subplots(1, 1) ax1.step(theta, V[:, p-1], 'k-', where='mid') ax1.set_xlabel(r'$\theta$ [rad]') ax1.set_ylabel('Intensity') ax1.set_xlim(-2, 2) fig1.savefig('c3fV_18.pdf') plt.close() # --- Plot column of V corresponds to the largest nonzero singular value --- fig2, ax2 = plt.subplots(1, 1) ax2.step(theta, V[:, 0], 'k-', where='mid')
from numpy import array from numpy.linalg import matrix_rank M0 = array([[0, 0], [0, 0]]) # [[0 0] # [0 0]] print(M0) mr0 = matrix_rank(M0) # 0 print(mr0) M1 = array([[1, 2], [1, 2]]) # [[1 2] # [1 2]] print(M1) mr1 = matrix_rank(M1) # 1 print(mr1) M2 = array([[1, 2], [3, 4]]) # [[1 2] # [3 4]] print(M2) mr2 = matrix_rank(M2) # 2 print(mr2)
def __contains__(self, x): '''If column vector x lies in Col(A), then __contains__ return True.''' C = hstack((self.__A, x)) return matrix_rank(C) == self.__rank
def HCRB(rho, drho, W, eps=1e-8): """ Calculation of the Holevo Cramer-Rao bound (HCRB) via the semidefinite program (SDP). Parameters ---------- > **rho:** `matrix` -- Density matrix. > **drho:** `list` -- Derivatives of the density matrix on the unknown parameters to be estimated. For example, drho[0] is the derivative vector on the first parameter. > **W:** `matrix` -- Weight matrix. > **eps:** `float` -- Machine epsilon. Returns ---------- **HCRB:** `float` -- The value of Holevo Cramer-Rao bound. """ if type(drho) != list: raise TypeError("Please make sure drho is a list!") if len(drho) == 1: print( "In single parameter scenario, HCRB is equivalent to QFI. This function will return the value of QFI." ) f = QFIM(rho, drho, eps=eps) return f elif matrix_rank(W) == 1: print( "For rank-one weight matrix, the HCRB is equivalent to QFIM. This function will return the value of Tr(WF^{-1})." ) F = QFIM(rho, drho, eps=eps) return np.trace(np.dot(W, np.linalg.pinv(F))) else: dim = len(rho) num = dim * dim para_num = len(drho) Lambda = [np.identity(dim)] + suN_generator(dim) Lambda = Lambda / np.sqrt(2) vec_drho = [[] for i in range(para_num)] for pi in range(para_num): vec_drho[pi] = np.array([ np.real(np.trace(np.dot(drho[pi], Lambda[i]))) for i in range(len(Lambda)) ]) S = np.zeros((num, num), dtype=np.complex128) for a in range(num): for b in range(num): S[a][b] = np.trace(np.dot(Lambda[a], np.dot(Lambda[b], rho))) accu = len(str(int(1 / eps))) - 1 lu, d, perm = sp.linalg.ldl(S.round(accu)) R = np.dot(lu, sp.linalg.sqrtm(d)).conj().T # ============optimization variables================ V = cp.Variable((para_num, para_num)) X = cp.Variable((num, para_num)) # ================add constraints=================== constraints = [ cp.bmat([[V, X.T @ R.conj().T], [R @ X, np.identity(num)]]) >> 0 ] for i in range(para_num): for j in range(para_num): if i == j: constraints += [X[:, i].T @ vec_drho[j] == 1] else: constraints += [X[:, i].T @ vec_drho[j] == 0] prob = cp.Problem(cp.Minimize(cp.trace(W @ V)), constraints) prob.solve() return prob.value
print(pesos.shape) print(np.matmul(inputs, pesos.T)) #%% Exercício 25 - Desafio # Use as duas matrizes abaixo para resulver os exercícios de 25 a 28 a = np.array([[3, 2, -1], [6, 4, -2], [5, 0, 3]]) b = np.array(([[2, 3, 2], [3, -4, -2], [4, -1, 1]])) # Na álgebra linear, o Rank de uma matriz A é a dimensão do espaço vetorial gerado (ou estendido) por # suas colunas. Isso corresponde ao número máximo de colunas linearmente independentes de A. Isso, por # sua vez, é idêntico à dimensão do espaço ocupado por suas linhas. Rank é, portanto, uma medida # da "não-degeneração" do sistema de equações lineares e transformação linear codificada por A. Existem # várias definições equivalentes de Rank e o Rank de uma matriz é uma de suas caracteristicas mais fundamentais. linalg.matrix_rank(a) #%% Exercício 26 - Calcule a * b np.dot(a, b) #%% Exercício 27 - Qual o segundo autovetor em B eig_vals, eig_vecs = linalg.eig(b) eig_vecs[:, 1] #%% Exercicio 28 - Resolva Bx = b onde b= [14,-1,11] _b = np.array([14, -1, 11]) linalg.solve(b, _b) #%% Exercício 29 - Calcule a inversa da matriz abaixo a_matriz = [[3,2,1],\ [2,-1,0],\
def t_stat(data_4d, convolved, c=[0, 1]): """ Return four values, the estimated beta, t-value, degrees of freedom, and p-value for the given t-value Parameters ---------- data_4d: numpy array of 4 dimensions The image data of one subject convolved: numpy array of 1 dimension The convolved time course c: numpy array of 1 dimension The contrast vector fo the weights of the beta vector. Default is [0,1] which corresponds to beta_1 Note that the fourth dimension of `data_4d` (time or the number of volumes) must be the same as the length of `convolved`. Returns ------- beta: estimated beta values t: numpy array of 1 dimension t-value of the betas df: int degrees of freedom p: numpy array of 1 dimension p-value corresponding to the t-value and degrees of freedom """ # Make sure y, X, c are all arrays beta, X = glm(data_4d, convolved) c = np.atleast_2d(c).T # As column vector # Calculate the parameters - b hat beta = np.reshape(beta, (-1, beta.shape[-1])).T fitted = X.dot(beta) # Residual error y = np.reshape(data_4d, (-1, data_4d.shape[-1])) errors = y.T - fitted # Residual sum of squares RSS = (errors**2).sum(axis=0) df = X.shape[0] - npl.matrix_rank(X) # Mean residual sum of squares MRSS = RSS / df # calculate bottom half of t statistic SE = np.sqrt(MRSS * c.T.dot(npl.pinv(X.T.dot(X)).dot(c))) zeros = np.where(SE == 0) SE[zeros] = 1 t = c.T.dot(beta) / SE t[:, zeros] = 0 # Get p value for t value using cumulative density dunction # (CDF) of t distribution ltp = t_dist.cdf(abs(t), df) # lower tail p p = 1 - ltp # upper tail p return beta, t, df, p
def full_rank(M): return matrix_rank(M) == min(M.shape)
import numpy as np import numpy.linalg as LA A = np.arange(1, 17).reshape(4, 4) B = np.eye(4, 4) print("A的行列式为", LA.det(A)) print("A的秩", LA.matrix_rank(A)) print("A的转置为", A.transpose()) print("A的逆矩阵为:", LA.inv(A + 10 * B)) print("A的平方为:\n", A.dot(A)) print("A,B的乘积为:", A.dot(B)) print("横联矩阵为", np.c_[A, B]) print("纵联矩阵为:", np.r_[A, B]) print("A1为", A[0:2, 0:2])
def solvePQRS(AA=None, BB=None, CC=None, DD=None, FF=None, GG=None, HH=None, JJ=None, KK=None, LL=None, MM=None, NN=None): """ This function mimics the behavior of Harald Uhlig's solve.m and calc_qrs.m files in Uhlig's toolkit. In order to use this function, the user must have log-linearized the model they are dealing with to be in the following form (assume that y corresponds to the model's "jump variables", z represents the exogenous state variables and x is for endogenous state variables. nx, ny, nz correspond to the number of variables in each category.) The inputs to this function are the matrices found in the following equations. .. math:: Ax_t + Bx_t-1 + Cy_t + Dz_t = 0 E\{Fx_{t+1} + Gx_t + Hx_{t-1} + Jy_{t+1} + Ky_t + Lz_{t+1} Mz_t \} = 0 The purpose of this function is to find the recursive equilibrium law of motion defined by the following equations. .. math:: X_t = PX_{t-1} + Qz_t Y_t = RY_{t-1} + Sz_t Following outline given in Uhhlig (1997), we solve for :math:`P` and :math:`Q` using the following set of equations: .. math:: FP^2 + Gg + H =0 FQN + (FP+G)Q + (LN + M)=0 Once :math:`P` and :math:`Q` are known, one ca solve for :math:`R` and :math:`S` using the following equations: .. math:: R = -C^{-1}(AP + B) S = - C^{-1}(AQ + D) Parameters ---------- AA : array_like, dtype=float, shape=(ny, nx) The matrix represented above by :math:`A`. It is the matrix of derivatives of the Y equations with respect to :math:`X_t` BB : array_like, dtype=float, shape=(ny, nx) The matrix represented above by :math:`B`. It is the matrix of derivatives of the Y equations with respect to :math:`X_{t-1}`. CC : array_like, dtype=float, shape=(ny, ny) The matrix represented above by :math:`C`. It is the matrix of derivatives of the Y equations with respect to :math:`Y_t` DD : array_like, dtype=float, shape=(ny, nz) The matrix represented above by :math:`C`. It is the matrix of derivatives of the Y equations with respect to :math:`Z_t` FF : array_like, dtype=float, shape=(nx, nx) The matrix represented above by :math:`F`. It the matrix of derivatives of the model's characterizing equations with respect to :math:`X_{t+1}` GG : array_like, dtype=float, shape=(nx, nx) The matrix represented above by :math:`G`. It the matrix of derivatives of the model's characterizing equations with respect to :math:`X_t` HH : array_like, dtype=float, shape=(nx, nx) The matrix represented above by :math:`H`. It the matrix of derivatives of the model's characterizing equations with respect to :math:`X_{t-1}` JJ : array_like, dtype=float, shape=(nx, ny) The matrix represented above by :math:`J`. It the matrix of derivatives of the model's characterizing equations with respect to :math:`Y_{t+1}` KK : array_like, dtype=float, shape=(nx, ny) The matrix represented above by :math:`K`. It the matrix of derivatives of the model's characterizing equations with respect to :math:`Y_t` LL : array_like, dtype=float, shape=(nx, nz) The matrix represented above by :math:`L`. It the matrix of derivatives of the model's characterizing equations with respect to :math:`Z_{t+1}` MM : array_like, dtype=float, shape=(nx, nz) The matrix represented above by :math:`M`. It the matrix of derivatives of the model's characterizing equations with respect to :math:`Z_t` NN : array_like, dtype=float, shape=(nz, nz) The autocorrelation matrix for the exogenous state vector z. Returns ------- P : array_like, dtype=float, shape=(nx, nx) The matrix :math:`P` in the law of motion for endogenous state variables described above. Q : array_like, dtype=float, shape=(nx, nz) The matrix :math:`P` in the law of motion for endogenous state variables described above. R : array_like, dtype=float, shape=(ny, nx) The matrix :math:`P` in the law of motion for endogenous state variables described above. S : array_like, dtype=float, shape=(ny, nz) The matrix :math:`P` in the law of motion for endogenous state variables described above. References ---------- .. [1] Uhlig, H. (1999): "A toolkit for analyzing nonlinear dynamic stochastic models easily," in Computational Methods for the Study of Dynamic Economies, ed. by R. Marimon, pp. 30-61. Oxford University Press. """ # The original coding we did used the np.matrix form for our matrices so we # make sure to set our inputs to numpy matrices. AA = np.matrix(AA) BB = np.matrix(BB) CC = np.matrix(CC) DD = np.matrix(DD) FF = np.matrix(FF) GG = np.matrix(GG) HH = np.matrix(HH) JJ = np.matrix(JJ) KK = np.matrix(KK) LL = np.matrix(LL) MM = np.matrix(MM) NN = np.matrix(NN) # Tolerance level to use TOL = .000001 # Here we use matrices to get pertinent dimensions. nx = FF.shape[1] l_equ = CC.shape[0] ny = CC.shape[1] nz = LL.shape[1] k_exog = min(NN.shape) # The following if and else blocks form the # Psi, Gamma, Theta Xi, Delta mats if l_equ == 0: if CC.any(): # This blcok makes sure you don't throw an error with an empty CC. CC_plus = la.pinv(CC) CC_0 = _nullSpaceBasis(CC.T) else: CC_plus = np.mat([]) CC_0 = np.mat([]) Psi_mat = FF Gamma_mat = -GG Theta_mat = -HH Xi_mat = np.mat(vstack((hstack((Gamma_mat, Theta_mat)), hstack((eye(nx), zeros((nx, nx))))))) Delta_mat = np.mat(vstack((hstack((Psi_mat, zeros((nx, nx)))), hstack((zeros((nx, nx)), eye(nx)))))) else: CC_plus = la.pinv(CC) CC_0 = _nullSpaceBasis(CC.T) Psi_mat = vstack((zeros((l_equ - ny, nx)), FF - dot(dot(JJ, CC_plus), AA))) if CC_0.size == 0: # This block makes sure you don't throw an error with an empty CC. Gamma_mat = vstack((dot(CC_0, AA), dot(dot(JJ, CC_plus), BB) - GG + dot(dot(KK, CC_plus), AA))) Theta_mat = vstack((dot(CC_0, AA), dot(dot(KK, CC_plus), BB) - HH)) else: Gamma_mat = dot(dot(JJ, CC_plus), BB) - GG +\ dot(dot(KK, CC_plus), AA) Theta_mat = dot(dot(KK, CC_plus), BB) - HH Xi_mat = vstack((hstack((Gamma_mat, Theta_mat)), hstack((eye(nx), zeros((nx, nx)))))) Delta_mat = vstack((hstack((Psi_mat, np.mat(zeros((nx, nx))))), hstack((zeros((nx, nx)), eye(nx))))) # Now we need the generalized eigenvalues/vectors for Xi with respect to # Delta. That is eVals and eVecs below. eVals, eVecs = la.eig(Xi_mat, Delta_mat) if npla.matrix_rank(eVecs) < nx: print('Error: Xi is not diagonalizable, stopping') # From here to line 158 we Diagonalize Xi, form Lambda/Omega and find P. else: Xi_sortabs = np.sort(abs(eVals)) Xi_sortindex = np.argsort(abs(eVals)) Xi_sortedVec = np.array([eVecs[:, i] for i in Xi_sortindex]).T Xi_sortval = Xi_sortabs Xi_select = np.arange(0, nx) if np.imag(Xi_sortedVec[nx - 1]).any(): if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL): drop_index = 1 cond_1 = (abs(np.imag(Xi_sortval[drop_index])) > TOL) cond_2 = drop_index < nx while cond_1 and cond_2: drop_index += 1 if drop_index >= nx: print('There is an error. Too many complex eigenvalues.' + ' Quitting') else: print('droping the lowest real eigenvalue. Beware of' + ' sunspots') Xi_select = np.array([np.arange(0, drop_index - 1), np.arange(drop_index + 1, nx)]) if max(abs(Xi_sortval[Xi_select])) > 1 + TOL: print('It looks like we have unstable roots. This might not work') if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL: print('Check the model to make sure you have a unique steady' + ' state we are having problems with convergence.') Lambda_mat = np.diag(Xi_sortval[Xi_select]) Omega_mat = Xi_sortedVec[nx:2 * nx, Xi_select] if npla.matrix_rank(Omega_mat) < nx: print("Omega matrix is not invertible, Can't solve for P") else: PP = dot(dot(Omega_mat, Lambda_mat), la.inv(Omega_mat)) PP_imag = np.imag(PP) PP = np.real(PP) if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any(): print("A lot of P is complex. We will continue with the" + " real part and hope we don't lose too much information") # The code from here to the end was from the Uhlig file cacl_qrs.m. # The if and else below make RR and VV depending on our model's setup. if l_equ == 0: RR = zeros((0, nx)) VV = hstack((kron(NN.T, FF) + kron(eye(k_exog), (dot(FF, PP) + GG)), kron(NN.T, JJ) + kron(eye(k_exog), KK))) else: RR = - dot(CC_plus, (dot(AA, PP) + BB)) VV = sp.vstack((hstack((kron(eye(k_exog), AA), kron(eye(k_exog), CC))), hstack((kron(NN.T, FF) + kron(eye(k_exog), dot(FF, PP) + dot(JJ, RR) + GG), kron(NN.T, JJ) + kron(eye(k_exog), KK))))) # Now we use LL, NN, RR, VV to get the QQ, RR, SS matrices. if (npla.matrix_rank(VV) < k_exog * (nx + ny)): print("Sorry but V is not invertible. Can't solve for Q and S") else: LL = sp.mat(LL) NN = sp.mat(NN) LLNN_plus_MM = dot(LL, NN) + MM if DD.any(): # impvec = vstack([DD.T, np.reshape(LLNN_plus_MM, # (nx * k_exog, 1), 'F')]) DD = np.array(DD).T.ravel().reshape((-1,1)) LLNN_plus_MM = np.array(LLNN_plus_MM).reshape((-1,1)) impvec = np.vstack((DD, LLNN_plus_MM)) else: impvec = LLNN_plus_MM.flatten() QQSS_vec = np.matrix(la.solve(-VV, impvec)) if (max(abs(QQSS_vec)) == sp.inf).any(): print("We have issues with Q and S. Entries are undefined." + " Probably because V is no inverible.") QQ = np.reshape(np.matrix(QQSS_vec[0:nx * k_exog, 0]), (nx, k_exog), 'F') SS = np.reshape(QQSS_vec[(nx * k_exog):((nx + ny) * k_exog), 0], (ny, k_exog), 'F') # Build WW - we don't use this, but Uhlig defines it so we do too. WW = sp.vstack(( hstack((eye(nx), zeros((nx, k_exog)))), hstack((dot(RR, la.pinv(PP)), (SS - dot(dot(RR, la.pinv(PP)), QQ)))), hstack((zeros((k_exog, nx)), eye(k_exog))))) return PP, QQ, RR, SS
def expectMax(self, X, init='mc', update='mcw', maxIter=10, convEps=0.01, verbose=False): ''' Performs maximum likelihood to estimate the distribution parameters mu, Sigma, and w. PARAMETERS ---------- X {N,D}: matrix of training data RETURNS ------- lnP_history: learning curve ''' # debug: save training data # self.X = X N, dim = X.shape if dim != self.D: raise ValueError( 'GMM: training data dimensions not compatible with GMM') if 'm' in init or 'c' in init: if N >= self.M: # k-means requires more observations than means to run clusters = cluster.KMeans(k=self.M).fit(X) # initialize distribution parameters if 'm' in init: if N >= self.M: self._setMu(clusters.cluster_centers_) else: # set means randomly from data iRandObs = np.random.randint(N, size=(self.M, self.D)) iCol = np.tile(np.arange(self.D), (self.M, 1)) self._setMu(X[iRandObs, iCol]) if 'c' in init: # if more than one observation and not enough for kmeans, reinitialize with covariance of data if N > 1 and N < self.M: # each row represents a variable, each column an observation cov = np.cov(X.T) # corner case: for univariate gaussian, turn into array if self.D == 1: cov = np.asarray([[cov]]) # add constant along diagonal to rank-deficient covariance matrices # taken from GMM library netlab3.3 (matlab code) # GMM_WIDTH = 1.0 is arbitrary if nlinalg.matrix_rank(cov) < self.D: cov += 1.0 * np.eye(self.D) self._setSigma(np.tile(cov, (self.M, 1, 1))) elif N >= self.M: # get cluster labels for training data labels = np.asarray(clusters.labels_, dtype=np.int) cov = np.zeros([self.M, self.D, self.D]) # for each cluster for l in range(0, self.M): # Pick out data points belonging to this centre c = X[labels == l] if len(c) > 0: diffs = c - self._mu[l, :] cov[l, :, :] = np.dot(diffs.T, diffs) / len(c) else: # at this point self.M number of mixtures is probably too complex a model for the data # continue anyways # each row represents a variable, each column an observation cov[l, :, :] = np.cov(X.T) # corner case: for univariate gaussian, turn into array if self.D == 1: cov = np.asarray([[cov]]) # add constant along diagonal to rank-deficient covariance matrices # taken from GMM library netlab3.3 (matlab code) # GMM_WIDTH = 1.0 is arbitrary if nlinalg.matrix_rank(cov[l, :, :]) < self.D: cov[l, :, :] += 1.0 * np.eye(self.D) self._setSigma(cov) # Expectation Maximization lnP_history = [] for i in range(maxIter): # Expectation step lnP, posteriors = self._expect(X, verbose) lnP_history.append(lnP.sum()) if verbose: print "EM iteration %d, lnP = %f" % (i, lnP_history[-1]) if i > 0 and abs(lnP_history[-1] - lnP_history[-2]) < convEps: # if little improvement, stop training break # Maximization Step self._maximize(X, posteriors, update) # only keep covariance diagonals if self.covType == 'diag': self._Sigma *= np.eye(self.D) if verbose: if i < maxIter - 1: print "EM converged in %d steps" % len(lnP_history) else: print "EM did not converge (maxIter reached)" return lnP_history
def calc_compression_ratio(square_mat, k): r = matrix_rank(square_mat) # r=rank n = len(square_mat) return ((2 * k * n) + k) / ((2 * n * r) + n)
# R = r[1] # # print('Q:\n',Q) # print("R:\n",R) # print(np.matmul(Q, Q.T)) # 单位矩阵 r = la.svd(m) print(r[0]) print(r[1]) print(r[2]) print(np.matmul(r[0], r[0].T)) print(la.norm(m, 2)) print(la.cond(m, 2)) # m与逆矩阵的范数的乘积 """ 向量与矩阵: 0-范数: 矩阵中非0元素的个数。 1-范数:矩阵中绝对值的和 2-范数:所有元素的平方和开方。 numpy矩阵: 0范数:没有定义 1范数:列向量的最大值 2范数:M.MT的最大特征值的平方根 """ print(la.det(m)) print(la.matrix_rank(m))