def sec_rot_invar(row_data, D_0 = None, n_filters = 20, n_theta = 6, n_sections = 6, eta = 1e-2, sparsity = 10, n_epochs = 4, EV_SCORE = True): ''' k: Number of dictionary items n_theta: Number of orientated realization of the filter ''' #Shuffle the data #data = shuffle(row_data).T data = row_data.T m, n = data.shape if D_0 is None: D_base = 1-2*np.random.rand(m,n_filters) D_base -= np.expand_dims(np.mean(D_base, axis=0), 0) D_base /= np.linalg.norm(D_base,axis=0) D_t = D_base else: D_t = D_0 Theta_t = np.zeros(n_filters,'int') D_r = shift_filters(D_t, n_theta, Theta_t, n_sections) D_r = D_r - np.expand_dims(np.mean(D_r, axis=0), 0) D_r /= np.expand_dims(np.linalg.norm(D_r, axis=0), axis=0) losses = [] for epoch in range(n_epochs): for t in range(n): x_t = data[:,t] # Selecting theta s that correlate most with x_t idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) # extract corresponding columns from B_t and rotate them according to theta t d_t = D_r[:,idx_t] D_t[:,idx_t/n_theta] = d_t Alpha_t = np.zeros((n_filters,1)) Alpha_t[idx_t/n_theta,0] = alphas_t ## Dictionary Update # ##LMS nnzero_coeff = len(alphas_t) for j in range(nnzero_coeff): D_t[:,idx_t[j]/n_theta] += eta * (x_t - D_t[:,idx_t[j]/n_theta]*alphas_t[j])*alphas_t[j] D_t[:,idx_t[j]/n_theta] /= max(np.linalg.norm(D_t[:,idx_t[j]/n_theta],ord=2),1.) # Rotate D_t back to generate D_r Theta_t[idx_t/n_theta] = theta_t D_r = shift_filters(D_t, n_theta, Theta_t, n_sections) if EV_SCORE and (t%500 == 0): loss = score_rot_invar_dic(data, D_r, n_theta, sparsity) losses.append(loss) data = shuffle(data.T).T return D_r, losses
def code_samples_rot_invar_dic(data, D_r, n_theta, sparsity): m, n = data.shape m, ktheta = D_r.shape k = ktheta/n_theta coef_samples = [] for t in range(n): x_t = data[:,t] coeffs = np.zeros((k, n_theta)) idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) for i in range(sparsity): coeffs[idx_t[i]/n_theta, idx_t[i]%n_theta] += np.abs(alphas_t[i]) #**2 coef_samples.append(coeffs.flatten()) return np.vstack(coef_samples)
def code_rot_invar_dic(data, D_r, n_theta, sparsity): m, n = data.shape m, ktheta = D_r.shape k = ktheta/n_theta activation_map = np.zeros((k, n_theta)) for t in range(n): x_t = data[:,t] idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) for i in range(sparsity): activation_map[idx_t[i]/n_theta, idx_t[i]%n_theta] += alphas_t[i]**2 return activation_map
def code_samples_rot_invar_dic(data, D_r, n_theta, sparsity): m, n = data.shape m, ktheta = D_r.shape k = ktheta / n_theta coef_samples = [] for t in range(n): x_t = data[:, t] coeffs = np.zeros((k, n_theta)) idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) for i in range(sparsity): coeffs[idx_t[i] / n_theta, idx_t[i] % n_theta] += np.abs(alphas_t[i]) #**2 coef_samples.append(coeffs.flatten()) return np.vstack(coef_samples)
def code_rot_invar_dic(data, D_r, n_theta, sparsity): m, n = data.shape m, ktheta = D_r.shape k = ktheta / n_theta activation_map = np.zeros((k, n_theta)) for t in range(n): x_t = data[:, t] idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) for i in range(sparsity): activation_map[idx_t[i] / n_theta, idx_t[i] % n_theta] += alphas_t[i]**2 return activation_map
def score_rot_invar_dic(data, D_r, n_theta, sparsity, mask=None): m, n = data.shape L = 0 if mask is None: mask_e = 1 else: mask_e = mask.flatten() for t in range(n): x_t = data[:, t] idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) d_t = D_r[:, idx_t] e_t = (x_t - np.dot(d_t, alphas_t)) * mask_e L += 1. / (2 * n) * np.dot(e_t.T, e_t).sum() return L
def score_rot_invar_dic(data, D_r, n_theta, sparsity, mask = None): m, n = data.shape L = 0 if mask is None: mask_e = 1 else: mask_e = mask.flatten() for t in range(n): x_t = data[:,t] idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) d_t = D_r[:,idx_t] e_t = (x_t - np.dot(d_t, alphas_t))*mask_e L += 1./(2*n)*np.dot(e_t.T,e_t).sum() return L
def sec_rot_invar(row_data, D_0=None, n_filters=20, n_theta=6, n_sections=6, eta=1e-2, sparsity=10, n_epochs=4, EV_SCORE=True): ''' k: Number of dictionary items n_theta: Number of orientated realization of the filter ''' #Shuffle the data #data = shuffle(row_data).T data = row_data.T m, n = data.shape if D_0 is None: D_base = 1 - 2 * np.random.rand(m, n_filters) D_base -= np.expand_dims(np.mean(D_base, axis=0), 0) D_base /= np.linalg.norm(D_base, axis=0) D_t = D_base else: D_t = D_0 Theta_t = np.zeros(n_filters, 'int') D_r = shift_filters(D_t, n_theta, Theta_t, n_sections) D_r = D_r - np.expand_dims(np.mean(D_r, axis=0), 0) D_r /= np.expand_dims(np.linalg.norm(D_r, axis=0), axis=0) losses = [] for epoch in range(n_epochs): for t in range(n): x_t = data[:, t] # Selecting theta s that correlate most with x_t idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) # extract corresponding columns from B_t and rotate them according to theta t d_t = D_r[:, idx_t] D_t[:, idx_t / n_theta] = d_t Alpha_t = np.zeros((n_filters, 1)) Alpha_t[idx_t / n_theta, 0] = alphas_t ## Dictionary Update # ##LMS nnzero_coeff = len(alphas_t) for j in range(nnzero_coeff): D_t[:, idx_t[j] / n_theta] += eta * (x_t - D_t[:, idx_t[j] / n_theta] * alphas_t[j]) * alphas_t[j] D_t[:, idx_t[j] / n_theta] /= max( np.linalg.norm(D_t[:, idx_t[j] / n_theta], ord=2), 1.) # Rotate D_t back to generate D_r Theta_t[idx_t / n_theta] = theta_t D_r = shift_filters(D_t, n_theta, Theta_t, n_sections) if EV_SCORE and (t % 500 == 0): loss = score_rot_invar_dic(data, D_r, n_theta, sparsity) losses.append(loss) data = shuffle(data.T).T return D_r, losses
def sq_rot_invar(row_data, mask, D_0=None, n_filters=20, n_theta=6, eta=1e-2, sparsity=10, n_epochs=4, EV_SCORE=True): ''' k: Number of dictionary items n_theta: Number of orientated realization of the filter ''' #Shuffle the data #data = shuffle(row_data).T data = row_data.T m, n = data.shape effective_dim = mask.sum() dummy_dim = mask.shape[0] * mask.shape[1] dim_ratio = float(dummy_dim) / effective_dim # Number of iterations patch_size = mask.shape mask_D = np.repeat(mask.reshape((m, 1)), n_filters, axis=1) if D_0 is None: D_base = 1 - 2 * np.random.rand(m, n_filters) D_base -= np.expand_dims(np.mean(D_base, axis=0), 0) * dim_ratio D_base *= mask_D D_base /= np.linalg.norm(D_base, axis=0) D_t = D_base else: D_t = mask_D * D_0 Theta_t = np.zeros(n_filters, 'int') D_r = rotate_filters(D_t, n_theta, Theta_t, patch_size) D_r = D_r - np.expand_dims(np.mean(D_r, axis=0), 0) * dim_ratio D_r /= np.expand_dims(np.linalg.norm(D_r, axis=0), axis=0) losses = [] for epoch in range(n_epochs): for t in range(n): x_t = data[:, t] # Selecting theta s that correlate most with x_t idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) # extract corresponding columns from B_t and rotate them according to theta t d_t = D_r[:, idx_t] ## Dictionary Update # ##LMS D_t[:, idx_t / n_theta] = d_t nnzero_coeff = len(alphas_t) for j in range(nnzero_coeff): D_t[:, idx_t[j] / n_theta] += eta * (x_t - D_t[:, idx_t[j] / n_theta] * alphas_t[j]) * alphas_t[j] D_t[:, idx_t[j] / n_theta] /= max( np.linalg.norm(D_t[:, idx_t[j] / n_theta], ord=2), 1.) # Rotate D_t back to generate D_r Theta_t[idx_t / n_theta] = theta_t D_r = rotate_filters(D_t, n_theta, Theta_t, patch_size) if EV_SCORE and (t % 500 == 0): loss = score_rot_invar_dic(data, D_r, n_theta, sparsity, mask) losses.append(loss) data = shuffle(data.T).T return D_r, losses
def sec_rot_invar(row_data, D_0 = None, n_filters = 20, n_theta = 6, n_sections = 6, eta = 0.0003, sparsity = 10, n_epochs = 4, EV_SCORE = True): ''' k: Number of dictionary items n_theta: Number of orientated realization of the filter ''' #Shuffle the data #data = shuffle(row_data).T data = row_data.T m, n = data.shape if D_0 is None: D_base = 1-2*np.random.rand(m,n_filters) D_base -= np.expand_dims(np.mean(D_base, axis=0), 0) D_base /= np.linalg.norm(D_base,axis=0) D_t = D_base else: D_t = D_0 Theta_t = np.zeros(n_filters,'int') D_r = shift_filters(D_t, n_theta, Theta_t, n_sections) D_r = D_r - np.expand_dims(np.mean(D_r, axis=0), 0) D_r /= np.expand_dims(np.linalg.norm(D_r, axis=0), axis=0) losses = [] for epoch in range(n_epochs): for t in range(n): x_t = data[:,t] # Selecting theta s that correlate most with x_t idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) # extract corresponding columns from B_t and rotate them according to theta t d_t = D_r[:,idx_t] D_t[:,idx_t/n_theta] = d_t Alpha_t = np.zeros((n_filters,1)) Alpha_t[idx_t/n_theta,0] = alphas_t ## Dictionary Update ##Rotation update eta_prime = eta*m y_t = np.dot(d_t,alphas_t) y_t /= np.linalg.norm(y_t,axis=0) lmbd = np.sqrt(1-(np.dot(y_t, x_t))**2) half_S = np.dot(np.expand_dims(x_t,1), np.expand_dims(y_t,0)) S = half_S - half_S.T update = np.identity(m) + np.sin(2 * eta_prime * lmbd)/lmbd * S + (1 - np.cos(2 * eta_prime * lmbd))/lmbd**2 * np.dot(S,S) D_t[:,idx_t/n_theta] = np.dot(update, d_t) # Rotate D_t back to generate D_r Theta_t[idx_t/n_theta] = theta_t D_r = shift_filters(D_t, n_theta, Theta_t, n_sections) if EV_SCORE and (t%500 == 0): loss = score_rot_invar_dic(data, D_r, n_theta, sparsity) losses.append(loss) data = shuffle(data.T).T return D_r, losses
def sec_rot_invar(row_data, D_0=None, n_filters=20, n_theta=6, n_sections=6, eta=0.0003, sparsity=10, n_epochs=4, EV_SCORE=True): ''' k: Number of dictionary items n_theta: Number of orientated realization of the filter ''' #Shuffle the data #data = shuffle(row_data).T data = row_data.T m, n = data.shape if D_0 is None: D_base = 1 - 2 * np.random.rand(m, n_filters) D_base -= np.expand_dims(np.mean(D_base, axis=0), 0) D_base /= np.linalg.norm(D_base, axis=0) D_t = D_base else: D_t = D_0 Theta_t = np.zeros(n_filters, 'int') D_r = shift_filters(D_t, n_theta, Theta_t, n_sections) D_r = D_r - np.expand_dims(np.mean(D_r, axis=0), 0) D_r /= np.expand_dims(np.linalg.norm(D_r, axis=0), axis=0) losses = [] for epoch in range(n_epochs): for t in range(n): x_t = data[:, t] # Selecting theta s that correlate most with x_t idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) # extract corresponding columns from B_t and rotate them according to theta t d_t = D_r[:, idx_t] D_t[:, idx_t / n_theta] = d_t Alpha_t = np.zeros((n_filters, 1)) Alpha_t[idx_t / n_theta, 0] = alphas_t ## Dictionary Update ##Rotation update eta_prime = eta * m y_t = np.dot(d_t, alphas_t) y_t /= np.linalg.norm(y_t, axis=0) lmbd = np.sqrt(1 - (np.dot(y_t, x_t))**2) half_S = np.dot(np.expand_dims(x_t, 1), np.expand_dims(y_t, 0)) S = half_S - half_S.T update = np.identity(m) + np.sin( 2 * eta_prime * lmbd) / lmbd * S + ( 1 - np.cos(2 * eta_prime * lmbd)) / lmbd**2 * np.dot(S, S) D_t[:, idx_t / n_theta] = np.dot(update, d_t) # Rotate D_t back to generate D_r Theta_t[idx_t / n_theta] = theta_t D_r = shift_filters(D_t, n_theta, Theta_t, n_sections) if EV_SCORE and (t % 500 == 0): loss = score_rot_invar_dic(data, D_r, n_theta, sparsity) losses.append(loss) data = shuffle(data.T).T return D_r, losses
def sq_rot_invar(row_data, mask, D_0 = None, n_filters = 20, n_theta = 6, eta = 1e-2, sparsity = 10, n_epochs = 4, EV_SCORE = True): ''' k: Number of dictionary items n_theta: Number of orientated realization of the filter ''' #Shuffle the data #data = shuffle(row_data).T data = row_data.T m, n = data.shape effective_dim = mask.sum() dummy_dim = mask.shape[0]*mask.shape[1] dim_ratio = float(dummy_dim)/effective_dim # Number of iterations patch_size = mask.shape mask_D = np.repeat(mask.reshape((m,1)),n_filters,axis=1) if D_0 is None: D_base = 1-2*np.random.rand(m,n_filters) D_base -= np.expand_dims(np.mean(D_base, axis=0), 0)*dim_ratio D_base *= mask_D D_base /= np.linalg.norm(D_base,axis=0) D_t = D_base else: D_t = mask_D*D_0 Theta_t = np.zeros(n_filters,'int') D_r = rotate_filters(D_t, n_theta, Theta_t, patch_size) D_r = D_r - np.expand_dims(np.mean(D_r, axis=0), 0)*dim_ratio D_r /= np.expand_dims(np.linalg.norm(D_r, axis=0), axis=0) losses = [] for epoch in range(n_epochs): for t in range(n): x_t = data[:,t] # Selecting theta s that correlate most with x_t idx_t, alphas_t, theta_t = rot_invar_omp(D_r, x_t, sparsity, n_theta) # extract corresponding columns from B_t and rotate them according to theta t d_t = D_r[:,idx_t] ## Dictionary Update # ##LMS D_t[:,idx_t/n_theta] = d_t nnzero_coeff = len(alphas_t) for j in range(nnzero_coeff): D_t[:,idx_t[j]/n_theta] += eta * (x_t - D_t[:,idx_t[j]/n_theta]*alphas_t[j])*alphas_t[j] D_t[:,idx_t[j]/n_theta] /= max(np.linalg.norm(D_t[:,idx_t[j]/n_theta],ord=2),1.) # Rotate D_t back to generate D_r Theta_t[idx_t/n_theta] = theta_t D_r = rotate_filters(D_t, n_theta, Theta_t, patch_size) if EV_SCORE and (t%500 == 0): loss = score_rot_invar_dic(data, D_r, n_theta, sparsity, mask ) losses.append(loss) data = shuffle(data.T).T return D_r, losses