def find_factors(idat, odat, k = None): """ A routine to compute the main predictors (linear combinations of coordinates) in idat to predict odat. *Parameters* idat: d x n data matrix, with n measurements, each of dimension d odat: q x n data matrix with n measurements, each of dimension q *Returns* **Depending on whether or not** *k* **is provided, the returned value is different** * if k is given, compute the first k regressors and return an orthogonal matrix that contains the regressors in its colums, i.e. reg[0,:] is the first regressor * if k is not given or None, return a d-dimensional vector v(k) explaining which fraction of the total predictable variance can be explained using only k regressors. **NOTE** #. idat and odat must have zero mean #. To interpret the regressors, it is advisable to have the for columns of idat having the same variance """ # transform into z-scores u, s, v = svd(idat, full_matrices = False) su = dot(diag(1./s), u.T) z = dot(su,idat) # ! Note that the covariance of z is *NOT* 1, but 1/n; z*z.T = 1 ! # least-squares regression: A = dot(odat, pinv(z)) uA, sigma_A, vA = svd(A, full_matrices = False) if k is None: vk = cumsum(sigma_A**2) / sum(sigma_A**2) return vk else: # choose k predictors sigma_A1 = sigma_A.copy() sigma_A1[k:] = 0 A1 = reduce(dot, [uA, diag(sigma_A1), vA]) B = dot(A1, su) uB, sigma_B, vB = svd(B, full_matrices = False) regs = vB[:k,:].T return regs
def pc_pm_std(data, ndim): """ This is a helper function. It returns the value of +1 * std(x), where x is the ndim-th principal component of the data Parameters: ----------- data: `array` (*n*-by-*d*) the data on which the principal component analysis is performed. ndim: `integer` the number of the principal axis on which the analysis is performed. **NOTE** this is zero-based, i.e. to compute the first principal component, ndim=0 Returns: -------- std_pc: `array` (1-by-*d*) the vector that points in the direction of the *ndim*th principal axis, and has the length of the standard deviation of the scores along this axis. """ u,s,v = svd(data.T, full_matrices = False) direction = u[:, ndim : ndim + 1] scale = std(dot(direction.T, data.T)) return scale * direction.T
def homog3D (points2d, points3d): """ Compute a matrix relating homogeneous 3D points (4xN) to homogeneous 2D points (3xN) Not sure why anyone would do this. Note that the returned transformation *NOT* an isometry. But it's here... so deal with it. """ numPoints = points2d.shape[1] assert (numPoints >= 4) A = None for i in range (0, numPoints): xiPrime = points2d[:,i] xi = points3d[:,i] Ai_row0 = pl.concatenate ((pl.zeros (4,), -xiPrime[2]*xi, xiPrime[1]*xi)) Ai_row1 = pl.concatenate ((xiPrime[2]*xi, pl.zeros (4,), -xiPrime[0]*xi)) Ai = pl.row_stack ((Ai_row0, Ai_row1)) if A is None: A = Ai else: A = pl.vstack ((A, Ai)) U, S, V = pl.svd (A) V = V.T h = V[:,-1] P = pl.reshape (h, (3, 4)) return P
def homog2D(xPrime, x): """ Compute the 3x3 homography matrix mapping a set of N 2D homogeneous points (3xN) to another set (3xN) """ numPoints = xPrime.shape[1] assert numPoints >= 4 A = None for i in range(0, numPoints): xiPrime = xPrime[:, i] xi = x[:, i] Ai_row0 = pl.concatenate((pl.zeros(3), -xiPrime[2] * xi, xiPrime[1] * xi)) Ai_row1 = pl.concatenate((xiPrime[2] * xi, pl.zeros(3), -xiPrime[0] * xi)) Ai = pl.row_stack((Ai_row0, Ai_row1)) if A is None: A = Ai else: A = pl.vstack((A, Ai)) U, S, V = pl.svd(A) V = V.T h = V[:, -1] H = pl.reshape(h, (3, 3)) return H
def reduceDim(fullmat,n=1): """ reduces the dimension of a d x d - matrix to a (d-n)x(d-n) matrix, keeping the largest eigenvalues unchanged. """ u,s,v = svd(fullmat) return dot(u[:-n,:-n],dot(diag(s[:-n]),v[:-n,:-n]))
def reduceDim(fullmat, n=1): """ reduces the dimension of a d x d - matrix to a (d-n)x(d-n) matrix, keeping the largest eigenvalues unchanged. """ u, s, v = svd(fullmat) return dot(u[:-n, :-n], dot(diag(s[:-n]), v[:-n, :-n]))
def homog3D(points2d, points3d): """ Compute a matrix relating homogeneous 3D points (4xN) to homogeneous 2D points (3xN) Not sure why anyone would do this. Note that the returned transformation *NOT* an isometry. But it's here... so deal with it. """ numPoints = points2d.shape[1] assert numPoints >= 4 A = None for i in range(0, numPoints): xiPrime = points2d[:, i] xi = points3d[:, i] Ai_row0 = pl.concatenate((pl.zeros(4), -xiPrime[2] * xi, xiPrime[1] * xi)) Ai_row1 = pl.concatenate((xiPrime[2] * xi, pl.zeros(4), -xiPrime[0] * xi)) Ai = pl.row_stack((Ai_row0, Ai_row1)) if A is None: A = Ai else: A = pl.vstack((A, Ai)) U, S, V = pl.svd(A) V = V.T h = V[:, -1] P = pl.reshape(h, (3, 4)) return P
def homog2D (xPrime, x): """ Compute the 3x3 homography matrix mapping a set of N 2D homogeneous points (3xN) to another set (3xN) """ numPoints = xPrime.shape[1] assert (numPoints >= 4) A = None for i in range (0, numPoints): xiPrime = xPrime[:,i] xi = x[:,i] Ai_row0 = pl.concatenate ((pl.zeros (3,), -xiPrime[2]*xi, xiPrime[1]*xi)) Ai_row1 = pl.concatenate ((xiPrime[2]*xi, pl.zeros (3,), -xiPrime[0]*xi)) Ai = pl.row_stack ((Ai_row0, Ai_row1)) if A is None: A = Ai else: A = pl.vstack ((A, Ai)) U, S, V = pl.svd (A) V = V.T h = V[:,-1] H = pl.reshape (h, (3, 3)) return H
def matDist(mat1, mat2,nidx = 100): """ returns the distance of two lists of matrices mat1 and mat2. output: [d(mat1,mat1),d(mat2,mat2),d(mat1,mat2),d(mat2,mat1)] d(mat1,mat2) and d(mat2,mat1) should be the same up to random variance (when d(mat1,mat1) and d(mat2,mat2) have the same width in "FWHM sense") nidx: n matrices are compared to n matrices each, that is the result has length n**2 """ # pick up a random matrix from mat1 # compute distances from out-of-sample mat1 # compute distances from sample of same size in mat2 # repeat; for random matrix from mat2 d_11 = [] d_22 = [] d_12 = [] d_21 = [] nidx1 = nidx nidx2 = nidx # for d_11 and d_12 for nmat in randint(0,len(mat1),nidx1): refmat = mat1[nmat] for nmat_x in randint(0,len(mat1),nidx1): if nmat_x == nmat: nmat_x = (nmat - 1) if nmat > 0 else (nmat + 1) d_11.append(svd(mat1[nmat_x] - refmat,False,False)[0]) # ... I could use a []-statement, but I do not want to reformat a list # of lists ... for nmat_x in randint(0,len(mat2),nidx1): d_12.append(svd(mat2[nmat_x] - refmat,False,False)[0]) for nmat in randint(0,len(mat2),nidx2): refmat = mat2[nmat] for nmat_x in randint(0,len(mat2),nidx2): if nmat_x == nmat: nmat_x = (nmat - 1) if nmat > 0 else (nmat + 1) d_22.append(svd(mat2[nmat_x] - refmat,False,False)[0]) # ... I could use a []-statement, but I do not want to reformat a list # of lists ... for nmat_x in randint(0,len(mat1),nidx2): d_21.append(svd(mat1[nmat_x] - refmat,False,False)[0]) return (d_11,d_22,d_21,d_12)
def matDist(mat1, mat2, nidx=100): """ returns the distance of two lists of matrices mat1 and mat2. output: [d(mat1,mat1),d(mat2,mat2),d(mat1,mat2),d(mat2,mat1)] d(mat1,mat2) and d(mat2,mat1) should be the same up to random variance (when d(mat1,mat1) and d(mat2,mat2) have the same width in "FWHM sense") nidx: n matrices are compared to n matrices each, that is the result has length n**2 """ # pick up a random matrix from mat1 # compute distances from out-of-sample mat1 # compute distances from sample of same size in mat2 # repeat; for random matrix from mat2 d_11 = [] d_22 = [] d_12 = [] d_21 = [] nidx1 = nidx nidx2 = nidx # for d_11 and d_12 for nmat in randint(0, len(mat1), nidx1): refmat = mat1[nmat] for nmat_x in randint(0, len(mat1), nidx1): if nmat_x == nmat: nmat_x = (nmat - 1) if nmat > 0 else (nmat + 1) d_11.append(svd(mat1[nmat_x] - refmat, False, False)[0]) # ... I could use a []-statement, but I do not want to reformat a list # of lists ... for nmat_x in randint(0, len(mat2), nidx1): d_12.append(svd(mat2[nmat_x] - refmat, False, False)[0]) for nmat in randint(0, len(mat2), nidx2): refmat = mat2[nmat] for nmat_x in randint(0, len(mat2), nidx2): if nmat_x == nmat: nmat_x = (nmat - 1) if nmat > 0 else (nmat + 1) d_22.append(svd(mat2[nmat_x] - refmat, False, False)[0]) # ... I could use a []-statement, but I do not want to reformat a list # of lists ... for nmat_x in randint(0, len(mat1), nidx2): d_21.append(svd(mat1[nmat_x] - refmat, False, False)[0]) return (d_11, d_22, d_21, d_12)
def singularValueDecomposition(self): """This method calculates the svd of the raw data. """ self.U = [] self.S = [] self.V = [] self.U, self.S, self.V = plab.svd(self.dockedOpt.TT)
def get_transformation_matrix(matrix): # Get the vector p and the values that are in there by taking the SVD. # Since D is diagonal with the eigenvalues sorted from large to small # on the diagonal, the optimal q in min ||Dq|| is q = [[0]..[1]]. # Therefore, p = Vq means p is the last column in V. U, D, V = svd(matrix) p = V[8][:] return inv(array([[p[0],p[1],p[2]], [p[3],p[4],p[5]], [p[6],p[7],p[8]]]))
def reduceDimDat(fulldat, n=1): """ reduces the dimension of a given data set by removing the lowest principal component. data must be given in D X N - format (D: dimension, N: number of measurements) """ raise NotImplementedError, \ 'Wait a minute - this function in raw form does not make much sense here ...' u, s, v = svd(fulldat, full_matrices=False)
def reduceDimDat(fulldat,n=1): """ reduces the dimension of a given data set by removing the lowest principal component. data must be given in D X N - format (D: dimension, N: number of measurements) """ raise NotImplementedError, \ 'Wait a minute - this function in raw form does not make much sense here ...' u,s,v = svd(fulldat, full_matrices = False)
def plotEllipse(pos,P,edge,face,transparency): U, s , Vh = pl.svd(P) orient = math.atan2(U[1,0],U[0,0])*180/math.pi ellipsePlot = Ellipse(xy=pos, width=2.0*math.sqrt(s[0]), height=2.0*math.sqrt(s[1]), angle=orient, facecolor=face,edgecolor=edge,alpha=transparency, zorder=0) ax = pl.gca() ax.add_patch(ellipsePlot) return ellipsePlot
def fun(xp, threshold=1e-10, debug=False): x = np.dot(projector, xp) if np.linalg.norm(x - xp) > threshold: if debug: print "debug:", svd(projector, 0, 0) print "threshold exceeded:", x, xp print ("(norm of", x - xp, ":", np.linalg.norm(x - xp), " > ", threshold, ") ") return 0 return scale * np.exp(-0.5 * np.dot(x - x_ref, np.dot(cv_i, x - x_ref)))
def kern(mat, threshold = 1e-8): """ returns the kernel of the matrix parameter: mat: matrix to be analyzed threshold: min. singular value to be considered as "different from 0" """ u,s,v = svd(mat,full_matrices = True) dim_k = len(find(s < threshold)) + v.shape[0] - len(s) if dim_k > 0: return v[-dim_k:,:].T else: return None
def kern(mat, threshold=1e-8): """ returns the kernel of the matrix parameter: mat: matrix to be analyzed threshold: min. singular value to be considered as "different from 0" """ u, s, v = svd(mat, full_matrices=True) dim_k = len(find(s < threshold)) + v.shape[0] - len(s) if dim_k > 0: return v[-dim_k:, :].T else: return None
def perspectiveTransform(image, x1, y1, x2, y2, x3, y3, x4, y4, M, N): # Construct the matrix M x1_a, y1_a = 0, 0 x2_a, y2_a = M, 0 x3_a, y3_a = M, N x4_a, y4_a = 0, N mat_M = array([[x1, y1, 1, 0, 0, 0, -x1_a * x1, -x1_a * y1, -x1_a], \ [0, 0, 0, x1, y1, 1, -y1_a * x1, -y1_a * y1, -y1_a], \ [x2, y2, 1, 0, 0, 0, -x2_a * x2, -x2_a * y2, -x2_a], \ [0, 0, 0, x2, y2, 1, -y2_a * x2, -y2_a * y2, -y2_a], \ [x3, y3, 1, 0, 0, 0, -x3_a * x3, -x3_a * y3, -x3_a], \ [0, 0, 0, x3, y3, 1, -y3_a * x3, -y3_a * y3, -y3_a], \ [x4, y4, 1, 0, 0, 0, -x4_a * x4, -x4_a * y4, -x4_a], \ [0, 0, 0, x4, y4, 1, -y4_a * x4, -y4_a * y4, -y4_a]]) # Get the vector p and the values that are in there by taking the SVD. # Since D is diagonal with the eigenvalues sorted from large to small on # the diagonal, the optimal q in min ||Dq|| is q = [[0]..[1]]. Therefore, # p = Vq means p is the last column in V. U, D, V = svd(mat_M) p = V[8][:] a, b, c, d, e, f, g, h, i = p[0], \ p[1], \ p[2], \ p[3], \ p[4], \ p[5], \ p[6], \ p[7], \ p[8] # P is the resulting matrix that describes the transformation P = array([[a, b, c], \ [d, e, f], \ [g, h, i]]) # Create the new image b = array([zeros(M, float)] * N) for i in range(0, M): for j in range(0, N): or_coor = dot(inv(P),([[i],[j],[1]])) or_coor_h = or_coor[1][0] / or_coor[2][0], \ or_coor[0][0] / or_coor[2][0] b[j][i] = pV(image, or_coor_h[0], or_coor_h[1], 'linear') return b
def generate(numDims, numClasses, k, numPatternsPerClass, numPatterns, numTests, numSVDSamples, keep): LOGGER.info('N dims=%s', numDims) LOGGER.info('N classes=%s', numClasses) LOGGER.info('k=%s', k) LOGGER.info('N vectors per class=%s', numPatternsPerClass) LOGGER.info('N training vectors=%s', numPatterns) LOGGER.info('N test vectors=%s', numTests) LOGGER.info('N SVD samples=%s', numSVDSamples) LOGGER.info('N reduced dims=%s', int(keep*numDims)) LOGGER.info('Generating data') numpy.random.seed(42) data0 = numpy.zeros((numPatterns + numTests, numDims)) class0 = numpy.zeros((numPatterns + numTests), dtype='int') c = 0 for i in range(numClasses): pt = 5*i*numpy.ones((numDims)) for _j in range(numPatternsPerClass): data0[c] = pt+5*numpy.random.random((numDims)) class0[c] = i c += 1 if 0: # Change this to visualize the output import pylab pylab.ion() pylab.figure() _u, _s, vt = pylab.svd(data0[:numPatterns]) tmp = numpy.zeros((numPatterns, 2)) for i in range(numPatterns): tmp[i] = numpy.dot(vt, data0[i])[:2] pylab.scatter(tmp[:, 0], tmp[:, 1]) ind = numpy.random.permutation(numPatterns + numTests) train_data = data0[ind[:numPatterns]] train_class = class0[ind[:numPatterns]] test_data = data0[ind[numPatterns:]] test_class = class0[ind[numPatterns:]] return train_data, train_class, test_data, test_class
def triangulate(points2d, cameras): """ Compute the N-view triangulation of corresponding 2D image points, given calibrated camera poses points2d: 2N-by-M matrix of 2D image points (N: number of cameras, M: number of image points) cameras: length-N list of Camera objects """ N = len(cameras) M = points2d.shape[1] pointsNorm = pl.zeros(points2d.shape) for camInd, camera in enumerate(cameras): points = points2d[camInd * 2 : (camInd * 2) + 2, :] pointsNorm[camInd * 2 : (camInd * 2) + 2, :] = camera.normalize(points) X = pl.zeros((3, M)) for pointInd in range(M): A = pl.zeros((3 * N, 4)) AStartRow = 0 skewsyms = pl.zeros((3, 3, N)) for camInd, camera in enumerate(cameras): skewsyms[:, :, camInd] = skewsym( homogeneous.homogenize(pointsNorm[camInd * 2 : (camInd * 2) + 2, pointInd]) ) A[AStartRow : AStartRow + 3, 0:4] = skewsyms[:, :, camInd].dot(camera.wHc[0:3, 0:4]) AStartRow += 3 U, S, VT = pl.svd(A) V = VT.T X[:, pointInd] = homogeneous.dehomogenize(V[:, -1]) return X
def _calculate_svd(pp, r_0, beta, N_piercepoints): """ Returns result (U) of svd for K-L vectors Parameters ---------- pp : array Array of piercepoint locations r_0: float Scale size of amp fluctuations (m) beta: float Power-law index for amp structure function (5/3 => pure Kolmogorov turbulence) N_piercepoints : int Number of piercepoints Returns ------- C : array C matrix pinvC : array Inv(C) matrix U : array Unitary matrix """ import numpy as np from pylab import kron, concatenate, pinv, norm, newaxis, find, amin, svd, eye D = np.resize(pp, (N_piercepoints, N_piercepoints, 3)) D = np.transpose(D, (1, 0, 2)) - D D2 = np.sum(D**2, axis=2) C = -(D2 / r_0**2)**(beta / 2.0) / 2.0 pinvC = pinv(C, rcond=1e-3) U, S, V = svd(C) return C, pinvC, U
def triangulate (points2d, cameras): """ Compute the N-view triangulation of corresponding 2D image points, given calibrated camera poses points2d: 2N-by-M matrix of 2D image points (N: number of cameras, M: number of image points) cameras: length-N list of Camera objects """ N = len (cameras) M = points2d.shape[1] pointsNorm = pl.zeros (points2d.shape) for camInd, camera in enumerate (cameras): points = points2d[camInd*2:(camInd*2)+2,:] pointsNorm[camInd*2:(camInd*2)+2,:] = camera.normalize (points) X = pl.zeros ((3,M)) for pointInd in range (M): A = pl.zeros ((3*N,4)) AStartRow = 0 skewsyms = pl.zeros ((3,3,N)) for camInd, camera in enumerate (cameras): skewsyms[:,:,camInd] = skewsym (homogeneous.homogenize (pointsNorm[camInd*2:(camInd*2)+2,pointInd])) A[AStartRow:AStartRow+3, 0:4] = skewsyms[:,:,camInd].dot (camera.wHc[0:3,0:4]) AStartRow += 3 U, S, VT = pl.svd (A) V = VT.T X[:,pointInd] = homogeneous.dehomogenize (V[:,-1]) return X
def ksvd(self, X, n_components, dictionary=None, max_err=0, max_iter=10, approx=False, preserve_dc=False): (n_samples, n_features) = X.shape # if we're not given a dictionary for starters, make our own if dictionary is None: dictionary = np.random.rand(n_samples, n_components) # make the first dictionary element constant; remove the mean from the # rest of the dictionary elements if preserve_dc: dictionary[:, 0] = 1 for i in range(1, n_components): dictionary[:, i] -= np.mean(dictionary[:, i]) # normalize the dictionary regardless for i in range(n_components): dictionary[:, i] /= np.linalg.norm(dictionary[:, i]) print("running ksvd on %d %d-dimensional vectors with K=%d" \ % (n_features, n_samples, n_components)) # algorithm stuff code = np.zeros((n_components, n_samples)) err = np.inf iter_num = 0 while iter_num < max_iter and err > max_err: # batch omp, woo! print("staring omp...") # X = omp(dictionary, Y, T, max_err) print("omp complete!") print( \ 'average l0 "norm" for ksvd iteration %d after omp was %f' \ % (iter_num, len(np.nonzero(code)[0]) / n_features)) # dictionary update -- protip: update dictionary columns in random # order atom_indices = range(n_components) if preserve_dc: atom_indices = atom_indices[1:] np.random.shuffle(atom_indices) unused_atoms = [] for (i, j) in zip(atom_indices, xrange(n_components)): if False: if j % 25 == 0: print("ksvd: iteration %d, updating atom %d of %d" \ % (iter_num + 1, j, n_components)) # find nonzero entries x_using = np.nonzero(code[i, :])[0] if len(x_using) == 0: unused_atoms.append(i) continue if not approx: # Non-approximate K-SVD, as described in the original K-SVD # paper # compute residual error ... here's a trick passing almost all the # work to BLAS code[i, x_using] = 0 Residual_err = X[:, x_using] - np.dot(dictionary, code[:, x_using]) # update dictionary and weights -- sparsity-restricted rank-1 # approximation U, s, Vt = pl.svd(Residual_err) dictionary[:, i] = U[:, 0] code[i, x_using] = s[0] * Vt.T[:, 0] else: # Approximate K-SVD dictionary[:, i] = 0 g = code[i, x_using] d = np.dot(X[:, x_using], g) - np.dot(dictionary[:, x_using], g) d = d / np.linalg.norm(d) g = np.dot(X[:, x_using].T, d) - np.dot(dictionary[:, x_using].T, d) dictionary[:, i] = d code[i, x_using] = g # fill in values for unused atoms # unused column -> replace by signal in training data with worst # representation Repr_err = X - np.dot(dictionary, code) Repr_err_norms = (np.linalg.norm(Repr_err[:, n]) for n in range(n_features)) err_indices = sorted(zip(Repr_err_norms, xrange(n_features)), reverse=True) for (unused_index, err_tuple) in zip(unused_atoms, err_indices): (err, err_idx) = err_tuple d = X[:, err_idx].copy() if preserve_dc: d -= np.mean(d) d /= np.linalg.norm(d) dictionary[:, unused_index] = d # compute maximum representation error Repr_err_norms = [np.linalg.norm(Repr_err[:, n]) for n in range(n_features)] err = max(Repr_err_norms) print("maximum representation error: %f" % (err))
def create_cm(dim,eigList1 = None, eigList2 = None): """ returns two real-valued commuting matrices of dimension dim x dim the eigenvalues of each matrix can be given; single complex numbers will be interpreted as pair of complex conjuates. With this restriction, the (internally augmented) lists must have the length of dim """ if eigList1 is None: eigList1 = rand(dim) if eigList2 is None: eigList2 = rand(dim) # order 1st array such that complex numbers are first EL1 = array(eigList1) imPos1 = find(iscomplex(EL1)) rePos1 = find(isreal(EL1)) # shorter than set comparisons :D EL1 = hstack([EL1[imPos1],EL1[rePos1]]) # order 2nd array such that complex numbers are last EL2 = array(eigList2) imPos2 = find(iscomplex(EL2)) rePos2 = find(isreal(EL2)) # shorter than set comparisons :D EL2 = hstack([EL2[rePos2],EL2[imPos2]]) # now: make eigenvalues of list #2, where a block is in list #1, # pairwise equal, and other way round EL2[1:2*len(imPos1):2] = EL2[0:2*len(imPos1):2] EL1[-2*len(imPos2)+1::2] = EL1[-2*len(imPos2)::2] if len(imPos2)*2 + len(imPos1)*2 > dim: raise ValueError( 'too many complex eigenvalues - cannot create commuting matrices') # augment lists ev1 = [] nev1 = 0 for elem in EL1: if elem.imag != 0.: ev1.append( array( [[elem.real, -elem.imag], [elem.imag, elem.real]])) nev1 += 2 else: ev1.append(elem) nev1 += 1 if nev1 != dim: raise ValueError( 'number of given eigenvalues #1 (complex: x2) does not match dim!') ev2 = [] nev2 = 0 for elem in EL2: if elem.imag != 0.: ev2.append( array( [[elem.real, -elem.imag], [elem.imag, elem.real]])) nev2 += 2 else: ev2.append(elem) nev2 += 1 if nev2 != dim: raise ValueError( 'number of given eigenvalues #2 (complex: x2) does not match dim!') u,s,v = svd(randn(dim,dim)) # create a coordinate system v that is not orthogonal but not too skew v = v + .2*rand(dim,dim) - .1 cm1 = dot(inv(v),dot(blockdiag(ev1),v)) cm2 = dot(inv(v),dot(blockdiag(ev2),v)) # create block diagonal matrices return cm1, cm2
def svd(self): self.U, self.s, self.Vh = pylab.svd(self.fish) self.decomposed = True return self.U , self.s , self.Vh
def svd(self): self.U, self.s, self.Vh = pylab.svd(self.fish) self.decomposed = True return self.U, self.s, self.Vh
def _fit_tec_screen(station_names, source_names, pp, airmass, rr, weights, times, height, order, r_0, beta, outQueue): """ Fits a screen to given TEC values using Karhunen-Lo`eve base vectors Parameters ---------- station_names: array Array of station names source_names: array Array of source names pp: array Array of piercepoint locations airmass: array Array of airmass values (note: not currently used) rr: array Array of TEC values to fit screen to weights: array Array of weights times: array Array of times height: float Height of screen (m) order: int Order of screen (i.e., number of KL base vectors to keep) r_0: float Scale size of phase fluctuations (m) beta: float Power-law index for phase structure function (5/3 => pure Kolmogorov turbulence) """ import numpy as np from pylab import kron, concatenate, pinv, norm, newaxis, find, amin, svd, eye logging.info('Fitting screens...') # Initialize arrays N_stations = len(station_names) N_sources = len(source_names) N_times = len(times) N_piercepoints = N_sources * N_stations tec_fit_white_all = np.zeros((N_times, N_sources, N_stations)) tec_residual_all = np.zeros((N_times, N_sources, N_stations)) for k in range(N_times): D = np.resize(pp[k, :, :], (N_piercepoints, N_piercepoints, 3)) D = np.transpose(D, (1, 0, 2)) - D D2 = np.sum(D**2, axis=2) C = -(D2 / r_0**2)**(beta / 2.0) / 2.0 pinvC = pinv(C, rcond=1e-3) U, S, V = svd(C) invU = pinv(np.dot(np.transpose(U[:, :order]), np.dot(weights[:, :, k], U[:, :order])), rcond=1e-3) # Calculate screen rr1 = np.dot(np.transpose(U[:, :order]), np.dot(weights[:, :, k], rr[:, k])) tec_fit = np.dot(pinvC, np.dot(U[:, :order], np.dot(invU, rr1))) tec_fit_white_all[k, :, :] = tec_fit.reshape((N_sources, N_stations)) residual = rr - np.dot(C, tec_fit)[:, newaxis] tec_residual_all[k, :, :] = residual.reshape((N_sources, N_stations)) outQueue.put([tec_fit_white_all, tec_residual_all, times])
# generate random data with random uncertainties in y x = pylab.arange(0.5 / N, 1.0, 1.0 / N) dy = pylab.array([0.1 * random.lognormvariate(0.0, 1.0) for i in range(N)]) y = [sum(a[j] * x[i]**j for j in range(M)) for i in range(N)] y = pylab.array([random.gauss(y[i], dy[i]) for i in range(N)]) # construct vector b and design matrix X b = pylab.zeros(N) X = pylab.zeros((N, M)) for i in range(N): b[i] = y[i] / dy[i] for j in range(M): X[i, j] = x[i]**j / dy[i] # compute fit parameters ahat and covariance matrix Sigma (U, w, VT) = pylab.svd(X) wmax = max(w) Winv = pylab.zeros((M, N)) Sigma = pylab.zeros((M, M)) eps = 1e-6 for j in range(M): if w[j] > eps * wmax: Winv[j, j] = 1.0 / w[j] else: Winv[j, j] = 0.0 Sigma[j, j] = Winv[j, j]**2 ahat = pylab.dot(VT.T, pylab.dot(Winv, pylab.dot(U.T, b))) Sigma = pylab.dot(VT.T, pylab.dot(Sigma, VT)) # compute chi-square and p-value of the fit chisq = pylab.norm(pylab.dot(X, ahat) - b)**2
def robust_combined_algo(y, u, f, p, s_tol, dt): """ Subspace Identification for stochastic systems with input Robust combined algorithm from chapter 4 of (1) assuming a system of the form: x(k+1) = A x(k) + B u(k) + w(k) y(k) = C x(k) + D u(k) + v(k) E[(w_p; v_p) (w_q^T v_q^T)] = (Q S; S^T R) delta_pq and given y and u. Find the order of the system and A, B, C, D, Q, S, R See page 131, and generally chapter 4, of (1) A different implementation of the algorithm is presented in 6.1 of (1) (1) Subspace Identification for Linear Systems, by Van Overschee and Moor. 1996 """ #pylint: disable=too-many-arguments, too-many-locals # for this algorithm, we need future and past # to be more than 1 assert f > 1 assert p > 1 # setup matrices y = pl.matrix(y) n_y = y.shape[0] u = pl.matrix(u) n_u = u.shape[0] w = pl.vstack([y, u]) n_w = w.shape[0] # make sure the input is column vectors assert y.shape[0] < y.shape[1] assert u.shape[0] < u.shape[1] W = block_hankel(w, f + p) U = block_hankel(u, f + p) Y = block_hankel(y, f + p) W_p = W[:n_w*p, :] W_pp = W[:n_w*(p+1), :] Y_f = Y[n_y*f:, :] U_f = U[n_y*f:, :] Y_fm = Y[n_y*(f+1):, :] U_fm = U[n_u*(f+1):, :] # step 1, calculate the oblique and orthogonal projections #------------------------------------------ #TODO fix explanation # Y_p = G_i Xd_p + Hd_i U_p # After the oblique projection, U_p component is eliminated, # without changing the Xd_p component: # Proj_perp_(U_p) Y_p = W1 O_i W2 = G_i Xd_p O_i = Y_f*project_oblique(U_f, W_p) Z_i = Y_f*project(pl.vstack(W_p, U_f)) Z_ip = Y_fm*project(pl.vstack(W_pp, U_fm)) #TODO fix explanation # step 2, calculate the SVD of the weighted oblique projection #------------------------------------------ # given: W1 O_i W2 = G_i Xd_p # want to solve for G_i, but know product, and not Xd_p # so can only find Xd_p up to a similarity transformation U0, s0, VT0 = pl.svd(O_i*project_perp(U_f)) #pylint: disable=unused-variable # step 3, determine the order by inspecting the singular #------------------------------------------ # values in S and partition the SVD accordingly to obtain U1, S1 #print s0 n_x = pl.find(s0/s0.max() > s_tol)[-1] + 1 U1 = U0[:, :n_x] S1 = pl.matrix(pl.diag(s0[:n_x])) # VT1 = VT0[:n_x, :n_x] # step 4, determine Gi and Gim #------------------------------------------ G_i = U1*pl.matrix(pl.diag(pl.sqrt(s1[:n_x]))) G_im = G_i[:-n_y, :] # step 5, solve the linear equations for A and C #------------------------------------------ # Recompute G_i and G_im from A and C #TODO figure out what K (contains B and D) and the rhos (residuals) are in terms of knowns AC_stack = (pl.vstack(G_im.I*Z_ip,Y_f(1,:))-K*U_f-pl.vstack(rho_w, rho_v))*(G_i.I*Z_i).I #TODO not done
def _fit_phase_screen(station_names, source_names, pp, airmass, rr, weights, times, height, order, r_0, beta, outQueue): """ Fits a screen to given phase values using Karhunen-Lo`eve base vectors Parameters ---------- station_names: array Array of station names source_names: array Array of source names pp: array Array of piercepoint locations airmass: array Array of airmass values (note: not currently used) rr: array Array of phase values to fit screen to weights: array Array of weights times: array Array of times height: float Height of screen (m) order: int Order of screen (i.e., number of KL base vectors to keep) r_0: float Scale size of phase fluctuations (m) beta: float Power-law index for phase structure function (5/3 => pure Kolmogorov turbulence) """ import numpy as np from pylab import kron, concatenate, pinv, norm, newaxis, find, amin, svd, eye logging.info('Fitting screens...') # Initialize arrays N_stations = len(station_names) N_sources = len(source_names) N_times = len(times) N_piercepoints = N_sources * N_stations real_fit_white_all = np.zeros((N_times, N_sources, N_stations)) imag_fit_white_all = np.zeros((N_times, N_sources, N_stations)) phase_fit_white_all = np.zeros((N_times, N_sources, N_stations)) real_residual_all = np.zeros((N_times, N_sources, N_stations)) imag_residual_all = np.zeros((N_times, N_sources, N_stations)) phase_residual_all = np.zeros((N_times, N_sources, N_stations)) # Change phase to real/imag rr_real = np.cos(rr) rr_imag = np.sin(rr) for k in range(N_times): try: D = np.resize(pp[k, :, :], (N_piercepoints, N_piercepoints, 3)) D = np.transpose(D, (1, 0, 2)) - D D2 = np.sum(D**2, axis=2) C = -(D2 / r_0**2)**(beta / 2.0) / 2.0 pinvC = pinv(C, rcond=1e-3) U, S, V = svd(C) invU = pinv(np.dot(np.transpose(U[:, :order]), np.dot(weights[:, :, k], U[:, :order])), rcond=1e-3) # Calculate real screen rr1 = np.dot(np.transpose(U[:, :order]), np.dot(weights[:, :, k], rr_real[:, k])) real_fit = np.dot(pinvC, np.dot(U[:, :order], np.dot(invU, rr1))) real_fit_white_all[k, :, :] = real_fit.reshape( (N_sources, N_stations)) residual = rr_real - np.dot(C, real_fit)[:, newaxis] real_residual_all[k, :, :] = residual.reshape( (N_sources, N_stations)) # Calculate imag screen rr1 = np.dot(np.transpose(U[:, :order]), np.dot(weights[:, :, k], rr_imag[:, k])) imag_fit = np.dot(pinvC, np.dot(U[:, :order], np.dot(invU, rr1))) imag_fit_white_all[k, :, :] = imag_fit.reshape( (N_sources, N_stations)) residual = rr_imag - np.dot(C, imag_fit)[:, newaxis] imag_residual_all[k, :, :] = residual.reshape( (N_sources, N_stations)) # Calculate phase screen phase_fit = np.dot( pinvC, np.arctan2(np.dot(C, imag_fit), np.dot(C, real_fit))) phase_fit_white_all[k, :, :] = phase_fit.reshape( (N_sources, N_stations)) residual = rr - np.dot(C, phase_fit)[:, newaxis] phase_residual_all[k, :, :] = residual.reshape( (N_sources, N_stations)) except: # Set screen to zero if fit did not work logging.debug('Screen fit failed for timeslot {}'.format(k)) real_fit_white_all[k, :, :] = np.zeros((N_sources, N_stations)) real_residual_all[k, :, :] = np.ones((N_sources, N_stations)) imag_fit_white_all[k, :, :] = np.zeros((N_sources, N_stations)) imag_residual_all[k, :, :] = np.ones((N_sources, N_stations)) phase_fit_white_all[k, :, :] = np.zeros((N_sources, N_stations)) phase_residual_all[k, :, :] = np.ones((N_sources, N_stations)) outQueue.put([ real_fit_white_all, real_residual_all, imag_fit_white_all, imag_residual_all, phase_fit_white_all, phase_residual_all, times ])
def subspace_det_algo1(y, u, f, p, s_tol, dt): """ Subspace Identification for deterministic systems algorithm 1 from (1) assuming a system of the form: x(k+1) = A x(k) + B u(k) y(k) = C x(k) + D u(k) and given y and u. Find A, B, C, D See page 52. of (1) (1) Subspace Identification for Linear Systems, by Van Overschee and Moor. 1996 """ # pylint: disable=too-many-arguments, too-many-locals # for this algorithm, we need future and past # to be more than 1 assert f > 1 assert p > 1 # setup matrices y = np.matrix(y) n_y = y.shape[0] u = np.matrix(u) n_u = u.shape[0] w = pl.vstack([y, u]) n_w = w.shape[0] # make sure the input is column vectors assert y.shape[0] < y.shape[1] assert u.shape[0] < u.shape[1] W = block_hankel(w, f + p) U = block_hankel(u, f + p) Y = block_hankel(y, f + p) W_p = W[:n_w*p, :] W_pp = W[:n_w*(p+1), :] Y_f = Y[n_y*f:, :] U_f = U[n_y*f:, :] Y_fm = Y[n_y*(f+1):, :] U_fm = U[n_u*(f+1):, :] # step 1, calculate the oblique projections # ------------------------------------------ # Y_p = G_i Xd_p + Hd_i U_p # After the oblique projection, U_p component is eliminated, # without changing the Xd_p component: # Proj_perp_(U_p) Y_p = W1 O_i W2 = G_i Xd_p O_i = Y_f*project_oblique(U_f, W_p) O_im = Y_fm*project_oblique(U_fm, W_pp) # step 2, calculate the SVD of the weighted oblique projection # ------------------------------------------ # given: W1 O_i W2 = G_i Xd_p # want to solve for G_i, but know product, and not Xd_p # so can only find Xd_p up to a similarity transformation W1 = np.matrix(pl.eye(O_i.shape[0])) W2 = np.matrix(pl.eye(O_i.shape[1])) U0, s0, VT0 = pl.svd(W1*O_i*W2) # pylint: disable=unused-variable # step 3, determine the order by inspecting the singular # ------------------------------------------ # values in S and partition the SVD accordingly to obtain U1, S1 # print s0 n_x = pl.where(s0/s0.max() > s_tol)[0][-1] + 1 U1 = U0[:, :n_x] # S1 = np.matrix(pl.diag(s0[:n_x])) # VT1 = VT0[:n_x, :n_x] # step 4, determine Gi and Gim # ------------------------------------------ G_i = W1.I*U1*np.matrix(pl.diag(pl.sqrt(s0[:n_x]))) G_im = G_i[:-n_y, :] # check # step 5, determine Xd_ip and Xd_p # ------------------------------------------ # only know Xd up to a similarity transformation Xd_i = G_i.I*O_i Xd_ip = G_im.I*O_im # step 6, solve the set of linear eqs # for A, B, C, D # ------------------------------------------ Y_ii = Y[n_y*p:n_y*(p+1), :] U_ii = U[n_u*p:n_u*(p+1), :] a_mat = np.matrix(pl.vstack([Xd_ip, Y_ii])) b_mat = np.matrix(pl.vstack([Xd_i, U_ii])) ss_mat = a_mat*b_mat.I A_id = ss_mat[:n_x, :n_x] B_id = ss_mat[:n_x, n_x:] assert B_id.shape[0] == n_x assert B_id.shape[1] == n_u C_id = ss_mat[n_x:, :n_x] assert C_id.shape[0] == n_y assert C_id.shape[1] == n_x D_id = ss_mat[n_x:, n_x:] assert D_id.shape[0] == n_y assert D_id.shape[1] == n_u if np.linalg.matrix_rank(C_id) == n_x: T = C_id.I # try to make C identity, want it to look like state feedback else: T = np.matrix(pl.eye(n_x)) Q_id = pl.zeros((n_x, n_x)) R_id = pl.zeros((n_y, n_y)) sys = ss.StateSpaceDiscreteLinear( A=T.I*A_id*T, B=T.I*B_id, C=C_id*T, D=D_id, Q=Q_id, R=R_id, dt=dt) return sys
def subspace_det_algo1(y, u, f, p, s_tol, dt): """ Subspace Identification for deterministic systems deterministic algorithm 1 from (1) assuming a system of the form: x(k+1) = A x(k) + B u(k) y(k) = C x(k) + D u(k) and given y and u. Find A, B, C, D See page 52. of (1) (1) Subspace Identification for Linear Systems, by Van Overschee and Moor. 1996 """ #pylint: disable=too-many-arguments, too-many-locals # for this algorithm, we need future and past # to be more than 1 assert f > 1 assert p > 1 # setup matrices y = pl.matrix(y) n_y = y.shape[0] u = pl.matrix(u) n_u = u.shape[0] w = pl.vstack([y, u]) n_w = w.shape[0] # make sure the input is column vectors assert y.shape[0] < y.shape[1] assert u.shape[0] < u.shape[1] W = block_hankel(w, f + p) U = block_hankel(u, f + p) Y = block_hankel(y, f + p) W_p = W[:n_w*p, :] W_pp = W[:n_w*(p+1), :] Y_f = Y[n_y*f:, :] U_f = U[n_y*f:, :] Y_fm = Y[n_y*(f+1):, :] U_fm = U[n_u*(f+1):, :] # step 1, calculate the oblique projections #------------------------------------------ # Y_p = G_i Xd_p + Hd_i U_p # After the oblique projection, U_p component is eliminated, # without changing the Xd_p component: # Proj_perp_(U_p) Y_p = W1 O_i W2 = G_i Xd_p O_i = Y_f*project_oblique(U_f, W_p) O_im = Y_fm*project_oblique(U_fm, W_pp) # step 2, calculate the SVD of the weighted oblique projection #------------------------------------------ # given: W1 O_i W2 = G_i Xd_p # want to solve for G_i, but know product, and not Xd_p # so can only find Xd_p up to a similarity transformation W1 = pl.matrix(pl.eye(O_i.shape[0])) W2 = pl.matrix(pl.eye(O_i.shape[1])) U0, s0, VT0 = pl.svd(W1*O_i*W2) #pylint: disable=unused-variable # step 3, determine the order by inspecting the singular #------------------------------------------ # values in S and partition the SVD accordingly to obtain U1, S1 #print s0 n_x = pl.find(s0/s0.max() > s_tol)[-1] + 1 U1 = U0[:, :n_x] # S1 = pl.matrix(pl.diag(s0[:n_x])) # VT1 = VT0[:n_x, :n_x] # step 4, determine Gi and Gim #------------------------------------------ G_i = W1.I*U1*pl.matrix(pl.diag(pl.sqrt(s0[:n_x]))) G_im = G_i[:-n_y, :] # step 5, determine Xd_ip and Xd_p #------------------------------------------ # only know Xd up to a similarity transformation Xd_i = G_i.I*O_i Xd_ip = G_im.I*O_im # step 6, solve the set of linear eqs # for A, B, C, D #------------------------------------------ Y_ii = Y[n_y*p:n_y*(p+1), :] U_ii = U[n_u*p:n_u*(p+1), :] a_mat = pl.matrix(pl.vstack([Xd_ip, Y_ii])) b_mat = pl.matrix(pl.vstack([Xd_i, U_ii])) ss_mat = a_mat*b_mat.I A_id = ss_mat[:n_x, :n_x] B_id = ss_mat[:n_x, n_x:] assert B_id.shape[0] == n_x assert B_id.shape[1] == n_u C_id = ss_mat[n_x:, :n_x] assert C_id.shape[0] == n_y assert C_id.shape[1] == n_x D_id = ss_mat[n_x:, n_x:] assert D_id.shape[0] == n_y assert D_id.shape[1] == n_u if pl.matrix_rank(C_id) == n_x: T = C_id.I # try to make C identity, want it to look like state feedback else: T = pl.matrix(pl.eye(n_x)) Q_id = pl.zeros((n_x, n_x)) R_id = pl.zeros((n_y, n_y)) sys = ss.StateSpaceDiscreteLinear( A=T.I*A_id*T, B=T.I*B_id, C=C_id*T, D=D_id, Q=Q_id, R=R_id, dt=dt) return sys
def _fit_phase_screen(station_names, source_names, pp, airmass, rr, weights, times, height, order, r_0, beta, outQueue): """ Fits a screen to given phase values using Karhunen-Lo`eve base vectors Parameters ---------- station_names: array Array of station names source_names: array Array of source names pp: array Array of piercepoint locations airmass: array Array of airmass values (note: not currently used) rr: array Array of phase values to fit screen to weights: array Array of weights times: array Array of times height: float Height of screen (m) order: int Order of screen (i.e., number of KL base vectors to keep) r_0: float Scale size of phase fluctuations (m) beta: float Power-law index for phase structure function (5/3 => pure Kolmogorov turbulence) """ import numpy as np from pylab import kron, concatenate, pinv, norm, newaxis, find, amin, svd, eye logging.info('Fitting screens...') # Initialize arrays N_stations = len(station_names) N_sources = len(source_names) N_times = len(times) N_piercepoints = N_sources * N_stations real_fit_white_all = np.zeros((N_times, N_sources, N_stations)) imag_fit_white_all = np.zeros((N_times, N_sources, N_stations)) phase_fit_white_all = np.zeros((N_times, N_sources, N_stations)) real_residual_all = np.zeros((N_times, N_sources, N_stations)) imag_residual_all = np.zeros((N_times, N_sources, N_stations)) phase_residual_all = np.zeros((N_times, N_sources, N_stations)) # Change phase to real/imag rr_real = np.cos(rr) rr_imag = np.sin(rr) for k in range(N_times): try: D = np.resize(pp[k, :, :], (N_piercepoints, N_piercepoints, 3)) D = np.transpose(D, (1, 0, 2)) - D D2 = np.sum(D**2, axis=2) C = -(D2 / r_0**2)**(beta / 2.0) / 2.0 pinvC = pinv(C, rcond=1e-3) U, S, V = svd(C) invU = pinv(np.dot(np.transpose(U[:, :order]), np.dot(weights[:, :, k], U[:, :order])), rcond=1e-3) # Calculate real screen rr1 = np.dot(np.transpose(U[:, :order]), np.dot(weights[:, :, k], rr_real[:, k])) real_fit = np.dot(pinvC, np.dot(U[:, :order], np.dot(invU, rr1))) real_fit_white_all[k, :, :] = real_fit.reshape((N_sources, N_stations)) residual = rr_real - np.dot(C, real_fit)[:, newaxis] real_residual_all[k, :, :] = residual.reshape((N_sources, N_stations)) # Calculate imag screen rr1 = np.dot(np.transpose(U[:, :order]), np.dot(weights[:, :, k], rr_imag[:, k])) imag_fit = np.dot(pinvC, np.dot(U[:, :order], np.dot(invU, rr1))) imag_fit_white_all[k, :, :] = imag_fit.reshape((N_sources, N_stations)) residual = rr_imag - np.dot(C, imag_fit)[:, newaxis] imag_residual_all[k, :, :] = residual.reshape((N_sources, N_stations)) # Calculate phase screen phase_fit = np.dot(pinvC, np.arctan2(np.dot(C, imag_fit), np.dot(C, real_fit))) phase_fit_white_all[k, :, :] = phase_fit.reshape((N_sources, N_stations)) residual = rr - np.dot(C, phase_fit)[:, newaxis] phase_residual_all[k, :, :] = residual.reshape((N_sources, N_stations)) except: # Set screen to zero if fit did not work logging.debug('Screen fit failed for timeslot {}'.format(k)) real_fit_white_all[k, :, :] = np.zeros((N_sources, N_stations)) real_residual_all[k, :, :] = np.ones((N_sources, N_stations)) imag_fit_white_all[k, :, :] = np.zeros((N_sources, N_stations)) imag_residual_all[k, :, :] = np.ones((N_sources, N_stations)) phase_fit_white_all[k, :, :] = np.zeros((N_sources, N_stations)) phase_residual_all[k, :, :] = np.ones((N_sources, N_stations)) outQueue.put([real_fit_white_all, real_residual_all, imag_fit_white_all, imag_residual_all, phase_fit_white_all, phase_residual_all, times])
def fit_screen_to_tec(station_names, source_names, pp, airmass, rr, times, height, order, r_0, beta): """ Fits a screen to given TEC values using Karhunen-Lo`eve base vectors Keyword arguments: station_names -- array of station names source_names -- array of source names pp -- array of piercepoint locations airmass -- array of airmass values rr -- array of TEC solutions times -- array of times height -- height of screen (m) order -- order of screen (i.e., number of KL base vectors to keep) r_0 -- scale size of phase fluctuations (m) beta -- power-law index for phase structure function (5/3 => pure Kolmogorov turbulence) """ import numpy as np from pylab import kron, concatenate, pinv, norm, newaxis, find, amin, svd, eye try: import progressbar except ImportError: import losoto.progressbar as progressbar logging.info('Fitting screens to TEC values...') N_stations = len(station_names) N_sources = len(source_names) N_times = len(times) tec_fit_all = np.zeros((N_times, N_sources, N_stations)) residual_all = np.zeros((N_times, N_sources, N_stations)) A = concatenate([ kron(eye(N_sources), np.ones((N_stations, 1))), kron(np.ones((N_sources, 1)), eye(N_stations)) ], axis=1) N_piercepoints = N_sources * N_stations P = eye(N_piercepoints) - np.dot(np.dot(A, pinv(np.dot(A.T, A))), A.T) pbar = progressbar.ProgressBar(maxval=N_times).start() ipbar = 0 for k in range(N_times): try: D = np.resize(pp[k, :, :], (N_piercepoints, N_piercepoints, 3)) D = np.transpose(D, (1, 0, 2)) - D D2 = np.sum(D**2, axis=2) C = -(D2 / r_0**2)**(beta / 2.0) / 2.0 P1 = eye(N_piercepoints) - np.ones( (N_piercepoints, N_piercepoints)) / N_piercepoints C1 = np.dot(np.dot(P1, C), P1) U, S, V = svd(C1) B = np.dot(P, np.dot(np.diag(airmass[k, :]), U[:, :order])) pinvB = pinv(B, rcond=1e-3) rr1 = np.dot(P, rr[:, k]) tec_fit = np.dot(U[:, :order], np.dot(pinvB, rr1)) tec_fit_all[k, :, :] = tec_fit.reshape((N_sources, N_stations)) residual = rr1 - np.dot(P, tec_fit) residual_all[k, :, :] = residual.reshape((N_sources, N_stations)) except: # Set screen to zero if fit did not work logging.debug('Tecscreen fit failed for timeslot {0}'.format(k)) tec_fit_all[k, :, :] = np.zeros((N_sources, N_stations)) residual_all[k, :, :] = np.ones((N_sources, N_stations)) pbar.update(ipbar) ipbar += 1 pbar.finish() return tec_fit_all, residual_all
def create_cm(dim, eigList1=None, eigList2=None): """ returns two real-valued commuting matrices of dimension dim x dim the eigenvalues of each matrix can be given; single complex numbers will be interpreted as pair of complex conjuates. With this restriction, the (internally augmented) lists must have the length of dim """ if eigList1 is None: eigList1 = rand(dim) if eigList2 is None: eigList2 = rand(dim) # order 1st array such that complex numbers are first EL1 = array(eigList1) imPos1 = find(iscomplex(EL1)) rePos1 = find(isreal(EL1)) # shorter than set comparisons :D EL1 = hstack([EL1[imPos1], EL1[rePos1]]) # order 2nd array such that complex numbers are last EL2 = array(eigList2) imPos2 = find(iscomplex(EL2)) rePos2 = find(isreal(EL2)) # shorter than set comparisons :D EL2 = hstack([EL2[rePos2], EL2[imPos2]]) # now: make eigenvalues of list #2, where a block is in list #1, # pairwise equal, and other way round EL2[1:2 * len(imPos1):2] = EL2[0:2 * len(imPos1):2] EL1[-2 * len(imPos2) + 1::2] = EL1[-2 * len(imPos2)::2] if len(imPos2) * 2 + len(imPos1) * 2 > dim: raise ValueError( 'too many complex eigenvalues - cannot create commuting matrices') # augment lists ev1 = [] nev1 = 0 for elem in EL1: if elem.imag != 0.: ev1.append(array([[elem.real, -elem.imag], [elem.imag, elem.real]])) nev1 += 2 else: ev1.append(elem) nev1 += 1 if nev1 != dim: raise ValueError( 'number of given eigenvalues #1 (complex: x2) does not match dim!') ev2 = [] nev2 = 0 for elem in EL2: if elem.imag != 0.: ev2.append(array([[elem.real, -elem.imag], [elem.imag, elem.real]])) nev2 += 2 else: ev2.append(elem) nev2 += 1 if nev2 != dim: raise ValueError( 'number of given eigenvalues #2 (complex: x2) does not match dim!') u, s, v = svd(randn(dim, dim)) # create a coordinate system v that is not orthogonal but not too skew v = v + .2 * rand(dim, dim) - .1 cm1 = dot(inv(v), dot(blockdiag(ev1), v)) cm2 = dot(inv(v), dot(blockdiag(ev2), v)) # create block diagonal matrices return cm1, cm2
def fit_screen_to_tec(station_names, source_names, pp, airmass, rr, times, height, order, r_0, beta): """ Fits a screen to given TEC values using Karhunen-Lo`eve base vectors Keyword arguments: station_names -- array of station names source_names -- array of source names pp -- array of piercepoint locations airmass -- array of airmass values rr -- array of TEC solutions times -- array of times height -- height of screen (m) order -- order of screen (i.e., number of KL base vectors to keep) r_0 -- scale size of phase fluctuations (m) beta -- power-law index for phase structure function (5/3 => pure Kolmogorov turbulence) """ import numpy as np from pylab import kron, concatenate, pinv, norm, newaxis, find, amin, svd, eye try: import progressbar except ImportError: import losoto.progressbar as progressbar logging.info('Fitting screens to TEC values...') N_stations = len(station_names) N_sources = len(source_names) N_times = len(times) tec_fit_all = np.zeros((N_times, N_sources, N_stations)) residual_all = np.zeros((N_times, N_sources, N_stations)) A = concatenate([kron(eye(N_sources), np.ones((N_stations, 1))), kron(np.ones((N_sources, 1)), eye(N_stations))], axis=1) N_piercepoints = N_sources * N_stations P = eye(N_piercepoints) - np.dot(np.dot(A, pinv(np.dot(A.T, A))), A.T) pbar = progressbar.ProgressBar(maxval=N_times).start() ipbar = 0 for k in range(N_times): try: D = np.resize(pp[k, :, :], (N_piercepoints, N_piercepoints, 3)) D = np.transpose(D, (1, 0, 2)) - D D2 = np.sum(D**2, axis=2) C = -(D2 / r_0**2)**(beta / 2.0) / 2.0 P1 = eye(N_piercepoints) - np.ones((N_piercepoints, N_piercepoints)) / N_piercepoints C1 = np.dot(np.dot(P1, C), P1) U, S, V = svd(C1) B = np.dot(P, np.dot(np.diag(airmass[k, :]), U[:, :order])) pinvB = pinv(B, rcond=1e-3) rr1 = np.dot(P, rr[:, k]) tec_fit = np.dot(U[:, :order], np.dot(pinvB, rr1)) tec_fit_all[k, :, :] = tec_fit.reshape((N_sources, N_stations)) residual = rr1 - np.dot(P, tec_fit) residual_all[k, :, :] = residual.reshape((N_sources, N_stations)) except: # Set screen to zero if fit did not work logging.debug('Tecscreen fit failed for timeslot {0}'.format(k)) tec_fit_all[k, :, :] = np.zeros((N_sources, N_stations)) residual_all[k, :, :] = np.ones((N_sources, N_stations)) pbar.update(ipbar) ipbar += 1 pbar.finish() return tec_fit_all, residual_all