def __test(d, m): # Generate a random dictionary X = numpy.random.randn(2 * d, m) # Convert to diagonals Xdiags = _cdl.columns_to_diags(X) # Normalize N_Xdiag = _cdl.normalize_dictionary(Xdiags) # Convert back N_X = _cdl.diags_to_columns(N_Xdiag) # 1. verify unit norms norms = numpy.sum(N_X**2, axis=0)**0.5 assert EQ(norms, numpy.ones_like(norms)) # 2. verify that directions are correct: # projection onto the normalized basis should equal norm # of the original basis norms_orig = numpy.sum(X**2, axis=0)**0.5 projection = numpy.sum(X * N_X, axis=0) assert EQ(norms_orig, projection) pass
def set_codebook(self, D): '''Clobber the existing codebook and encoder with a new one.''' self.components_ = D extra_args = {} if self.penalty == 'l1_space': extra_args['height'] = D.shape[1] extra_args['width'] = D.shape[2] extra_args['pad_data'] = self.pad_data extra_args['nonneg'] = self.nonneg D = D.swapaxes(0, 1).swapaxes(1, 2) D = _cdl.patches_to_vectors(D, pad_data=self.pad_data) D = _cdl.columns_to_diags(D) self.fft_components_ = D encoder, D, diagnostics = _cdl.learn_dictionary([], self.n_atoms, reg=self.penalty, alpha=self.alpha, max_steps=0, verbose=False, D=D, **extra_args) self.encoder_ = encoder return self
def set_codebook(self, D): '''Clobber the existing codebook and encoder with a new one.''' self.components_ = D extra_args = {} if self.penalty == 'l1_space': extra_args['height'] = D.shape[1] extra_args['width'] = D.shape[2] extra_args['pad_data'] = self.pad_data extra_args['nonneg'] = self.nonneg D = D.swapaxes(0, 1).swapaxes(1, 2) D = _cdl.patches_to_vectors(D, pad_data=self.pad_data) D = _cdl.columns_to_diags(D) self.fft_components_ = D encoder, D, diagnostics = _cdl.learn_dictionary( [], self.n_atoms, reg = self.penalty, alpha = self.alpha, max_steps = 0, verbose = False, D = D, **extra_args) self.encoder_ = encoder return self
def __test(d, m): # Build a random dictionary in diagonal form X = numpy.random.randn(2 * d, m) # Normalize the dictionary and convert back to columns X_norm = _cdl.diags_to_columns(_cdl.normalize_dictionary(_cdl.columns_to_diags(X))) # Rescale the normalized dictionary to have some inside, some outside R = 2.0**numpy.linspace(-4, 4, m) X_scale = X_norm * R # Vectorize V_scale = _cdl.columns_to_vector(X_scale) # Project V_proj = _cdl.proj_l2_ball(V_scale, m) # Rearrange the projected matrix into columns X_proj = _cdl.vector_to_columns(V_proj, m) # Compute norms X_scale_norms = numpy.sum(X_scale**2, axis=0)**0.5 X_proj_norms = numpy.sum(X_proj**2, axis=0)**0.5 Xdot = numpy.sum(X_scale * X_proj, axis=0) for k in range(m): # 1. verify norm is at most 1 # allow some fudge factor here for numerical instability assert X_proj_norms[k] <= 1.0 + 1e-10 # 2. verify that points with R < 1 were untouched assert R[k] > 1.0 or EQ(X_proj[:, k], X_scale[:, k]) # 3. verify that points with R >= 1.0 preserve direction assert R[k] < 1.0 or EQ(Xdot[k], X_scale_norms[k]) pass
def __test(d, m): # Generate a random 2d-by-n matrix X = numpy.random.randn(2 * d, m) # Cut it in half to get the real and imag components A = X[:d, :] B = X[d:, :] # Convert to its diagonal-block form Q = _cdl.columns_to_diags(X) for k in range(m): for j in range(d): # Verify A assert (EQ(A[j, k], Q[j, k * d + j]) and EQ(A[j, k], Q[d + j, m * d + k *d + j])) # Verify B assert (EQ(B[j, k], - Q[j, m * d + k * d + j]) and EQ(B[j, k], Q[d + j, k *d + j])) pass pass pass
def __test(d, m): X = numpy.random.randn(2 * d, m) Q = _cdl.columns_to_diags(X) X_back = _cdl.diags_to_columns(Q) assert EQ(X, X_back) pass