def ksvd(Y, D, X, n_cycles=1, verbose=True): n_atoms = D.shape[1] n_features, n_samples = Y.shape unused_atoms = [] R = Y - fast_dot(D, X) for c in range(n_cycles): for k in range(n_atoms): if verbose: sys.stdout.write("\r" + "k-svd..." + ":%3.2f%%" % ((k / float(n_atoms)) * 100)) sys.stdout.flush() # find all the datapoints that use the kth atom omega_k = X[k, :] != 0 if not np.any(omega_k): unused_atoms.append(k) continue # the residual due to all the other atoms but k Rk = R[:, omega_k] + np.outer(D[:, k], X[k, omega_k]) U, S, V = randomized_svd(Rk, n_components=1, n_iter=10, flip_sign=False) D[:, k] = U[:, 0] X[k, omega_k] = V[0, :] * S[0] # update the residual R[:, omega_k] = Rk - np.outer(D[:, k], X[k, omega_k]) print "" return D, X, unused_atoms
def approx_ksvd(Y, D, X, n_cycles=1, verbose=True): # the approximate KSVD algorithm n_atoms = D.shape[1] n_features, n_samples = Y.shape unused_atoms = [] R = Y - fast_dot(D, X) for c in range(n_cycles): for k in range(n_atoms): if verbose: sys.stdout.write("\r" + "k-svd..." + ":%3.2f%%" % ((k / float(n_atoms)) * 100)) sys.stdout.flush() # find all the datapoints that use the kth atom omega_k = X[k, :] != 0 if not np.any(omega_k): # print "this atom is not used" unused_atoms.append(k) continue Rk = R[:, omega_k] + np.outer(D[:, k], X[k, omega_k]) # update of D[:,k] D[:, k] = np.dot(Rk, X[k, omega_k]) D[:, k] = normalize(D[:, k]) # update of X[:,k] X[k, omega_k] = np.dot(Rk.T, D[:, k]) # update the residual R[:, omega_k] = Rk - np.outer(D[:, k], X[k, omega_k]) print "" return D, X, unused_atoms
def nn_ksvd(Y, D, X, n_cycles=1, verbose=True): # the non-negative variant n_atoms = D.shape[1] n_features, n_samples = Y.shape unused_atoms = [] R = Y - fast_dot(D, X) for k in range(n_atoms): if verbose: sys.stdout.write("\r" + "k-svd..." + ":%3.2f%%" % ((k / float(n_atoms)) * 100)) sys.stdout.flush() # find all the datapoints that use the kth atom omega_k = X[k, :] != 0 if not np.any(omega_k): unused_atoms.append(k) continue # the residual due to all the other atoms but k Rk = R[:, omega_k] + np.outer(D[:, k], X[k, omega_k]) try: U, S, V = randomized_svd(Rk, n_components=1, n_iter=50, flip_sign=False) except: warnings.warn('SVD error') continue d = U[:, 0] x = V[0, :] * S[0] # projection to the constraint set d[d < 0] = 0 x[x < 0] = 0 dTd = np.dot(d, d) xTx = np.dot(x, x) if dTd <= np.finfo('float').eps or xTx <= np.finfo('float').eps: continue for j in range(n_cycles): d = np.dot(Rk, x) / np.dot(x, x) d[d < 0] = 0 x = np.dot(d.T, Rk) / np.dot(d, d) x[x < 0] = 0 _norm = norm(d) d = d / _norm x = x * _norm D[:, k] = d X[k, omega_k] = x # update the residual R[:, omega_k] = Rk - np.outer(D[:, k], X[k, omega_k]) print "" return D, X, unused_atoms
def global_error(X, D, sparse_coder, n_class_atoms, n_jobs=1): """ computes the approximation error of the dataset to each class-specific dictionary. The dataset is first encoded over the joint dictionary. """ Z = sparse_coder(X, D) n_samples = X.shape[1] n_classes = len(n_class_atoms) E = np.zeros((n_classes, n_samples)) if n_jobs > 1: set_openblas_threads(n_jobs) for c in range(n_classes): c_idx = get_class_atoms(c, n_class_atoms) E[c, :] = np.sum(np.power(fast_dot(D[:, c_idx], Z[c_idx, :]) - X, 2), axis=0) if n_jobs > 1: set_openblas_threads(1) return E
def approx_error_proc(X, Z, D): error = frobenius_squared(X - fast_dot(D, Z)) return error
def approx_error(D, Z, X, n_jobs=1): """computes the approximation error ||X-DZ||_{F}^{2} """ if n_jobs > 1: set_openblas_threads(n_jobs) error = frobenius_squared(X - fast_dot(D, Z)) return error