def nearest_rotation_matrix(M): """ Compute the orthogonal matrix which is closest to *M*. Used to constrain an estimate of a rotation matrix. The approach is given by Zhang in Appendix C. """ # "V" is actually the (conjugate) transpose of the canonical "V" of SVD. U, _, V = svd(M) return U*V
def rvs(self, mean=None, cov=1, size=1, random_state=None): """ Draw random samples from a multivariate laplace distribution. Parameters ---------- %(_mvl_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. Notes ----- %(_mvl_doc_callparams_note)s """ # Check preconditions on arguments mean = np.array(mean) cov = np.array(cov) if size is None: shape = [] elif isinstance(size, (int, np.integer)): shape = [size] else: shape = size if len(mean.shape) != 1: raise ValueError("mean must be 1 dimensional") if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): raise ValueError("cov must be 2 dimensional and square") if mean.shape[0] != cov.shape[0]: raise ValueError("mean and cov must have same length") # Compute shape of output and create a matrix of independent # standard normally distributed random numbers. The matrix has rows # with the same length as mean and as many rows are necessary to # form a matrix of shape final_shape. final_shape = list(shape[:]) final_shape.append(mean.shape[0]) random_state = self._get_random_state(random_state) # Standard laplace x = random_state.laplace(loc=0.0, scale=1.0, size=final_shape).reshape(-1, mean.shape[0]) dim, mean, cov = self._process_parameters(None, mean, cov) (u, s, v) = svd(cov) x = np.dot(x, np.sqrt(s)[:, None] * v) x += mean return x
def inv_cov(cov): U, S, V = svd(cov) eps = 0.0 oc = np.max(S) / np.min(S) if oc > 1e8: nc = np.min([oc, 1e8]) eps = np.min(S) * (oc - nc) / (nc - 1.0) LI = np.dot(np.diag(1.0 / (np.sqrt(np.absolute(S) + eps))), U.T) covI = np.dot(LI.T, LI) return covI
def inv_cov(cov): U, S, V = svd(cov) eps = 0.0 oc = np.max(S)/np.min(S) if oc > 1e8: nc = np.min([oc, 1e8]) eps = np.min(S)*(oc-nc)/(nc-1.0) LI = np.dot(np.diag(1.0/(np.sqrt(np.absolute(S) + eps))), U.T) covI= np.dot(LI.T, LI) return covI
def estimate_camera_params(homographies): # Stack the constraints from each homography into a design matrix n = len(homographies) design_matrix = np.empty((2*n, 6)) for i, homography in enumerate(homographies): design_matrix[2*i] = homography_vector(homography, 0, 1) design_matrix[2*i+1] = homography_vector(homography, 0, 0) \ - homography_vector(homography, 1, 1) # Extract the last right singular vector (ordered by decreasing singular value) and # unflatten it to get the image of the absolute conic _, _, V = svd(design_matrix) B = unflatten_symmetric(V[-1]) return extract_camera_params(B)
def robust_log_det(c): """ Computes the logarithm of the determinant of a positive definite matrix in a fashion that is more robust to ill-conditioning than taking the logarithm of np.linalg.det. .. note:: Specifically, we compute the SVD of c, and return the sum of the log of eigenvalues. np.linalg.det on the other hand computes the Cholesky decomposition of c, which is more likely to fail than its SVD, and takes the product of its diagonal elements, which could be subject to underflow error when diagonal elements are small. Parameters ---------- c: (d, d) np.array Square input matrix for computing log-determinant. Returns ------- d : float Log-determinant of the input matrix. """ u, s, v = svd(c) return np.sum(np.log(s))
def SVDFactorise(cov, max_cn=1e8): U, S, V = svd(cov) eps = 0.0 oc = np.max(S) / np.min(S) if oc > max_cn: nc = np.min([oc, max_cn]) eps = np.min(S) * (oc - nc) / (nc - 1.0) L = np.dot(U, np.diag(np.sqrt(S + eps))) LI = np.dot(np.diag(1.0 / (np.sqrt(np.absolute(S) + eps))), U.T) covI = np.dot(LI.T, LI) res = {} res['inv'] = covI.copy() res['L'] = L.copy() res['det'] = np.prod(S + eps) res['log_det'] = np.sum(np.log(S + eps)) res['LI'] = LI.copy() res['eigen_vals'] = S + eps res['u'] = U.copy() res['v'] = V.copy() return res
def SVDFactorise(cov, max_cn=1e8): U, S, V = svd(cov) eps = 0.0 oc = np.max(S)/np.min(S) if oc > max_cn: nc = np.min([oc, max_cn]) eps = np.min(S)*(oc-nc)/(nc-1.0) L = np.dot(U, np.diag(np.sqrt(S+eps))) LI = np.dot(np.diag(1.0/(np.sqrt(np.absolute(S) + eps))), U.T) covI= np.dot(LI.T, LI) res = {} res['inv'] = covI.copy() res['L'] = L.copy() res['det'] = np.prod(S+eps) res['log_det'] = np.sum(np.log(S+eps)) res['LI'] = LI.copy() res['eigen_vals'] = S+eps res['u'] = U.copy() res['v'] = V.copy() return res
import numpy as np from numpy.dual import svd from data_utility import load_cifar10 X, Y, validation_X, validation_Y, test_X, test_Y = load_cifar10(center=True) print 'computing zca matrix' covariance = np.dot(X.T, X) / len(X) U, S, V = svd(covariance) epsilon = 1E-4 zca_matrix = np.dot(np.dot(U, np.diag(1.0 / (S + epsilon) ** 0.5)), U.T) print 'whitening' X = np.dot(X, zca_matrix) X = X.astype(np.float32) validation_X = np.dot(validation_X, zca_matrix) validation_X = validation_X.astype(np.float32) test_X = np.dot(test_X, zca_matrix) test_X = test_X.astype(np.float32) import cPickle as pickle print 'dumping' path = 'whitened-cifar' BATCH_SIZE = 10000 for i in range(len(X) / BATCH_SIZE): pickle.dump( { 'data' : X[i * BATCH_SIZE : (i + 1) * BATCH_SIZE], 'labels' : Y[i * BATCH_SIZE : (i + 1) * BATCH_SIZE] }, open('%s/training%d' % (path, i), 'wb')