def test_ksvd_normal_input(self): n_nonzero_coefs = 4 n_samples = 512 n_features = 32 n_components = 24 max_iter = 500 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, n_nonzero_coefs) model = KSVD(n_components=n_components, transform_n_nonzero_coefs=n_nonzero_coefs, max_iter=max_iter, method='normal') model.fit(X) # check error of learning self.assertTrue(model.error_[-1] < 10) self.assertTrue(model.n_iter_ <= max_iter) # check estimated dictionary norm = np.linalg.norm(model.components_ - A0, ord='fro') self.assertTrue(norm < 15) # check reconstructed data code = model.transform(X) reconstructed = np.dot(code, model.components_) reconstruct_error = np.linalg.norm(reconstructed - X, ord='fro') self.assertTrue(reconstruct_error < 15)
def test_transform_with_mask(self): n_nonzero_coefs = 4 n_samples = 128 n_features = 32 n_components = 16 max_iter = 10 missing_value = 0 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, n_nonzero_coefs) X[X < 0.1] = missing_value mask = np.where(X == missing_value, 0, 1) model = KSVD(n_components=n_components, transform_n_nonzero_coefs=n_nonzero_coefs, max_iter=max_iter, missing_value=missing_value, method='normal') model.fit(X) # check error of learning code = model.transform(X) err = np.linalg.norm(mask * (X - code.dot(model.components_)), 'fro') self.assertTrue(err <= model.error_[0]) self.assertTrue(model.n_iter_ <= max_iter)
def test_sparse_encode_with_l21_norm(self): k0 = 5 n_samples = 128 n_features = 64 n_components = 32 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, k0) W = sparse_encode_with_l21_norm(X, A0) # check error of learning self.assertTrue(np.linalg.norm(X - W.dot(A0), 'fro') < 50)
def test_approximate_ksvd(self): n_nonzero_coefs = 5 n_samples = 128 n_features = 32 n_components = 16 max_iter = 10 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, n_nonzero_coefs) model = KSVD(n_components=n_components, transform_n_nonzero_coefs=n_nonzero_coefs, max_iter=max_iter, method='approximate') model.fit(X) # check error of learning self.assertTrue(model.error_[-1] <= model.error_[0]) self.assertTrue(model.n_iter_ <= max_iter)
def test_approximate_ksvd_warm_start(self): n_nonzero_coefs = 5 n_samples = 128 n_features = 32 n_components = 16 max_iter = 1 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, n_nonzero_coefs) model = KSVD(n_components=n_components, transform_n_nonzero_coefs=n_nonzero_coefs, max_iter=max_iter, method='approximate') prev_error = np.linalg.norm(X, 'fro') for i in range(10): model.fit(X) # print(model.error_) self.assertTrue(model.error_[-1] <= prev_error) prev_error = model.error_[-1]
def test_ksvd_input_with_missing_values(self): n_nonzero_coefs = 4 n_samples = 128 n_features = 32 n_components = 16 max_iter = 100 missing_value = 0 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, n_nonzero_coefs) X[X < 0.1] = missing_value model = KSVD(n_components=n_components, transform_n_nonzero_coefs=n_nonzero_coefs, max_iter=max_iter, missing_value=missing_value, method='normal') model.fit(X) # check error of learning self.assertTrue(model.error_[-1] <= model.error_[0]) self.assertTrue(model.n_iter_ <= max_iter)
def test_transform(self): n_nonzero_coefs = 4 n_samples = 128 n_features = 32 n_components = 24 max_iter = 500 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, n_nonzero_coefs) model = KSVD(n_components=n_components, transform_n_nonzero_coefs=n_nonzero_coefs, max_iter=max_iter, method='normal') model.fit(X) # check error of learning code = model.transform(X) err = np.linalg.norm(X - code.dot(model.components_), 'fro') self.assertTrue(err <= model.error_[-1]) self.assertTrue(model.n_iter_ <= max_iter)
def test_sparse_encode_with_mask(self): k0 = 5 n_samples = 128 n_features = 64 n_components = 32 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, k0) mask = np.random.rand(X.shape[0], X.shape[1]) mask = np.where(mask < 0.8, 1, 0) W = sparse_encode_with_mask(X, A0, mask, algorithm='omp', n_nonzero_coefs=k0) # check error of learning # print(np.linalg.norm(mask*(X-W.dot(A0)), 'fro')) self.assertTrue(np.linalg.norm(mask * (X - W.dot(A0)), 'fro') < 50)
def test_sparse_encode_with_no_mask(self): k0 = 3 n_samples = 64 n_features = 32 n_components = 10 A0, X = generate_dictionary_and_samples(n_samples, n_features, n_components, k0) mask = np.ones(X.shape) W1 = sparse_encode(X, A0, algorithm='omp', n_nonzero_coefs=k0) W2 = sparse_encode_with_mask(X, A0, mask, algorithm='omp', n_nonzero_coefs=k0) # check if W1 and W2 is almost same self.assertTrue( abs( np.linalg.norm(X - W1.dot(A0), 'fro') - np.linalg.norm(X - W2.dot(A0), 'fro')) < 1e-8)