def __test(d, m): # Generate a random dictionary X = numpy.random.randn(2 * d, m) # Convert to diagonals Xdiags = _cdl.columns_to_diags(X) # Normalize N_Xdiag = _cdl.normalize_dictionary(Xdiags) # Convert back N_X = _cdl.diags_to_columns(N_Xdiag) # 1. verify unit norms norms = numpy.sum(N_X**2, axis=0)**0.5 assert EQ(norms, numpy.ones_like(norms)) # 2. verify that directions are correct: # projection onto the normalized basis should equal norm # of the original basis norms_orig = numpy.sum(X**2, axis=0)**0.5 projection = numpy.sum(X * N_X, axis=0) assert EQ(norms_orig, projection) pass
def fit(self, X): '''Fit the model to the data in X. Parameters ---------- X : array-like, shape [n_samples, n_features_y, n_features_x] Training data patches. Returns ------- self: object Returns the instance itself. ''' width = X.shape[2] extra_args = {} if self.penalty == 'l1_space': extra_args['height'] = X.shape[1] extra_args['width'] = X.shape[2] extra_args['pad_data'] = self.pad_data extra_args['nonneg'] = self.nonneg encoder, D, diagnostics = _cdl.learn_dictionary( self.data_generator(X), self.n_atoms, reg = self.penalty, alpha = self.alpha, max_steps = self.n_iter, verbose = self.verbose, D = self.fft_components_, **extra_args) self.fft_components_ = D D = _cdl.diags_to_columns(D) D = _cdl.vectors_to_patches(D, width, pad_data=self.pad_data) self.components_ = D.swapaxes(1, 2).swapaxes(0, 1) self.encoder_ = encoder self.diagnostics_ = diagnostics return self
def __test(d, m): # Build a random dictionary in diagonal form X = numpy.random.randn(2 * d, m) # Normalize the dictionary and convert back to columns X_norm = _cdl.diags_to_columns(_cdl.normalize_dictionary(_cdl.columns_to_diags(X))) # Rescale the normalized dictionary to have some inside, some outside R = 2.0**numpy.linspace(-4, 4, m) X_scale = X_norm * R # Vectorize V_scale = _cdl.columns_to_vector(X_scale) # Project V_proj = _cdl.proj_l2_ball(V_scale, m) # Rearrange the projected matrix into columns X_proj = _cdl.vector_to_columns(V_proj, m) # Compute norms X_scale_norms = numpy.sum(X_scale**2, axis=0)**0.5 X_proj_norms = numpy.sum(X_proj**2, axis=0)**0.5 Xdot = numpy.sum(X_scale * X_proj, axis=0) for k in range(m): # 1. verify norm is at most 1 # allow some fudge factor here for numerical instability assert X_proj_norms[k] <= 1.0 + 1e-10 # 2. verify that points with R < 1 were untouched assert R[k] > 1.0 or EQ(X_proj[:, k], X_scale[:, k]) # 3. verify that points with R >= 1.0 preserve direction assert R[k] < 1.0 or EQ(Xdot[k], X_scale_norms[k]) pass
def fit(self, X): '''Fit the model to the data in X. Parameters ---------- X : array-like, shape [n_samples, n_features_y, n_features_x] Training data patches. Returns ------- self: object Returns the instance itself. ''' width = X.shape[2] extra_args = {} if self.penalty == 'l1_space': extra_args['height'] = X.shape[1] extra_args['width'] = X.shape[2] extra_args['pad_data'] = self.pad_data extra_args['nonneg'] = self.nonneg encoder, D, diagnostics = _cdl.learn_dictionary(self.data_generator(X), self.n_atoms, reg=self.penalty, alpha=self.alpha, max_steps=self.n_iter, verbose=self.verbose, D=self.fft_components_, **extra_args) self.fft_components_ = D D = _cdl.diags_to_columns(D) D = _cdl.vectors_to_patches(D, width, pad_data=self.pad_data) self.components_ = D.swapaxes(1, 2).swapaxes(0, 1) self.encoder_ = encoder self.diagnostics_ = diagnostics return self
def __test(d, m): X = numpy.random.randn(2 * d, m) Q = _cdl.columns_to_diags(X) X_back = _cdl.diags_to_columns(Q) assert EQ(X, X_back) pass