def _execute(self, x): #---------------------------------------------------- # similar algorithm to that within self.stop_training() # refer there for notes & comments on code #---------------------------------------------------- N = self.data.shape[0] Nx = x.shape[0] W = numx.zeros((Nx, N), dtype=self.dtype) k, r = self.k, self.r d_out = self.output_dim Q_diag_idx = numx.arange(k) for row in range(Nx): #find nearest neighbors of x in M M_xi = self.data - x[row] nbrs = numx.argsort((M_xi**2).sum(1))[:k] M_xi = M_xi[nbrs] #find corrected covariance matrix Q Q = mult(M_xi, M_xi.T) if r is None and k > d_out: sig2 = (svd(M_xi, compute_uv=0))**2 r = numx.sum(sig2[d_out:]) Q[Q_diag_idx, Q_diag_idx] += r if r is not None: Q[Q_diag_idx, Q_diag_idx] += r #solve for weights w = self._refcast(numx_linalg.solve(Q, numx.ones(k))) w /= w.sum() W[row, nbrs] = w #multiply weights by result of SVD from training return numx.dot(W, self.training_projection)
def _execute(self, x): #---------------------------------------------------- # similar algorithm to that within self.stop_training() # refer there for notes & comments on code #---------------------------------------------------- N = self.data.shape[0] Nx = x.shape[0] W = numx.zeros((Nx, N), dtype=self.dtype) k, r = self.k, self.r d_out = self.output_dim Q_diag_idx = numx.arange(k) for row in range(Nx): #find nearest neighbors of x in M M_xi = self.data-x[row] nbrs = numx.argsort( (M_xi**2).sum(1) )[:k] M_xi = M_xi[nbrs] #find corrected covariance matrix Q Q = mult(M_xi, M_xi.T) if r is None and k > d_out: sig2 = (svd(M_xi, compute_uv=0))**2 r = numx.sum(sig2[d_out:]) Q[Q_diag_idx, Q_diag_idx] += r if r is not None: Q[Q_diag_idx, Q_diag_idx] += r #solve for weights w = self._refcast(numx_linalg.solve(Q , numx.ones(k))) w /= w.sum() W[row, nbrs] = w #multiply weights by result of SVD from training return numx.dot(W, self.training_projection)
def _stop_training(self): Cumulator._stop_training(self) if self.verbose: msg = ('training LLE on %i points' ' in %i dimensions...' % (self.data.shape[0], self.data.shape[1])) print msg # some useful quantities M = self.data N = M.shape[0] k = self.k r = self.r # indices of diagonal elements W_diag_idx = numx.arange(N) Q_diag_idx = numx.arange(k) if k > N: err = ('k=%i must be less than or ' 'equal to number of training points N=%i' % (k, N)) raise TrainingException(err) # determines number of output dimensions: if desired_variance # is specified, we need to learn it from the data. Otherwise, # it's easy learn_outdim = False if self.output_dim is None: if self.desired_variance is None: self.output_dim = self.input_dim else: learn_outdim = True # do we need to automatically determine the regularization term? auto_reg = r is None # determine number of output dims, precalculate useful stuff if learn_outdim: Qs, sig2s, nbrss = self._adjust_output_dim() # build the weight matrix #XXX future work: #XXX for faster implementation, W should be a sparse matrix W = numx.zeros((N, N), dtype=self.dtype) if self.verbose: print ' - constructing [%i x %i] weight matrix...' % W.shape for row in range(N): if learn_outdim: Q = Qs[row, :, :] nbrs = nbrss[row, :] else: # ----------------------------------------------- # find k nearest neighbors # ----------------------------------------------- M_Mi = M - M[row] nbrs = numx.argsort((M_Mi**2).sum(1))[1:k + 1] M_Mi = M_Mi[nbrs] # compute covariance matrix of distances Q = mult(M_Mi, M_Mi.T) # ----------------------------------------------- # compute weight vector based on neighbors # ----------------------------------------------- #Covariance matrix may be nearly singular: # add a diagonal correction to prevent numerical errors if auto_reg: # automatic mode: correction is equal to the sum of # the (d_in-d_out) unused variances (as in deRidder & # Duin) if learn_outdim: sig2 = sig2s[row, :] else: sig2 = svd(M_Mi, compute_uv=0)**2 r = numx.sum(sig2[self.output_dim:]) Q[Q_diag_idx, Q_diag_idx] += r else: # Roweis et al instead use "a correction that # is small compared to the trace" e.g.: # r = 0.001 * float(Q.trace()) # this is equivalent to assuming 0.1% of the variance is unused Q[Q_diag_idx, Q_diag_idx] += r * Q.trace() #solve for weight # weight is w such that sum(Q_ij * w_j) = 1 for all i # XXX refcast is due to numpy bug: floats become double w = self._refcast(numx_linalg.solve(Q, numx.ones(k))) w /= w.sum() #update row of the weight matrix W[nbrs, row] = w if self.verbose: msg = (' - finding [%i x %i] null space of weight matrix\n' ' (may take a while)...' % (self.output_dim, N)) print msg self.W = W.copy() #to find the null space, we need the bottom d+1 # eigenvectors of (W-I).T*(W-I) #Compute this using the svd of (W-I): W[W_diag_idx, W_diag_idx] -= 1. #XXX future work: #XXX use of upcoming ARPACK interface for bottom few eigenvectors #XXX of a sparse matrix will significantly increase the speed #XXX of the next step if self.svd: sig, U = nongeneral_svd(W.T, range=(2, self.output_dim + 1)) else: # the following code does the same computation, but uses # symeig, which computes only the required eigenvectors, and # is much faster. However, it could also be more unstable... WW = mult(W, W.T) # regularizes the eigenvalues, does not change the eigenvectors: WW[W_diag_idx, W_diag_idx] += 0.1 sig, U = symeig(WW, range=(2, self.output_dim + 1), overwrite=True) self.training_projection = U
def _stop_training(self): Cumulator._stop_training(self) if self.verbose: msg = ('training LLE on %i points' ' in %i dimensions...' % (self.data.shape[0], self.data.shape[1])) print msg # some useful quantities M = self.data N = M.shape[0] k = self.k r = self.r # indices of diagonal elements W_diag_idx = numx.arange(N) Q_diag_idx = numx.arange(k) if k > N: err = ('k=%i must be less than or ' 'equal to number of training points N=%i' % (k, N)) raise TrainingException(err) # determines number of output dimensions: if desired_variance # is specified, we need to learn it from the data. Otherwise, # it's easy learn_outdim = False if self.output_dim is None: if self.desired_variance is None: self.output_dim = self.input_dim else: learn_outdim = True # do we need to automatically determine the regularization term? auto_reg = r is None # determine number of output dims, precalculate useful stuff if learn_outdim: Qs, sig2s, nbrss = self._adjust_output_dim() # build the weight matrix #XXX future work: #XXX for faster implementation, W should be a sparse matrix W = numx.zeros((N, N), dtype=self.dtype) if self.verbose: print ' - constructing [%i x %i] weight matrix...' % W.shape for row in range(N): if learn_outdim: Q = Qs[row, :, :] nbrs = nbrss[row, :] else: # ----------------------------------------------- # find k nearest neighbors # ----------------------------------------------- M_Mi = M-M[row] nbrs = numx.argsort((M_Mi**2).sum(1))[1:k+1] M_Mi = M_Mi[nbrs] # compute covariance matrix of distances Q = mult(M_Mi, M_Mi.T) # ----------------------------------------------- # compute weight vector based on neighbors # ----------------------------------------------- #Covariance matrix may be nearly singular: # add a diagonal correction to prevent numerical errors if auto_reg: # automatic mode: correction is equal to the sum of # the (d_in-d_out) unused variances (as in deRidder & # Duin) if learn_outdim: sig2 = sig2s[row, :] else: sig2 = svd(M_Mi, compute_uv=0)**2 r = numx.sum(sig2[self.output_dim:]) Q[Q_diag_idx, Q_diag_idx] += r else: # Roweis et al instead use "a correction that # is small compared to the trace" e.g.: # r = 0.001 * float(Q.trace()) # this is equivalent to assuming 0.1% of the variance is unused Q[Q_diag_idx, Q_diag_idx] += r*Q.trace() #solve for weight # weight is w such that sum(Q_ij * w_j) = 1 for all i # XXX refcast is due to numpy bug: floats become double w = self._refcast(numx_linalg.solve(Q, numx.ones(k))) w /= w.sum() #update row of the weight matrix W[nbrs, row] = w if self.verbose: msg = (' - finding [%i x %i] null space of weight matrix\n' ' (may take a while)...' % (self.output_dim, N)) print msg self.W = W.copy() #to find the null space, we need the bottom d+1 # eigenvectors of (W-I).T*(W-I) #Compute this using the svd of (W-I): W[W_diag_idx, W_diag_idx] -= 1. #XXX future work: #XXX use of upcoming ARPACK interface for bottom few eigenvectors #XXX of a sparse matrix will significantly increase the speed #XXX of the next step if self.svd: sig, U = nongeneral_svd(W.T, range=(2, self.output_dim+1)) else: # the following code does the same computation, but uses # symeig, which computes only the required eigenvectors, and # is much faster. However, it could also be more unstable... WW = mult(W, W.T) # regularizes the eigenvalues, does not change the eigenvectors: WW[W_diag_idx, W_diag_idx] += 0.1 sig, U = symeig(WW, range=(2, self.output_dim+1), overwrite=True) self.training_projection = U