def update_L(func, x, L, beta=0.9): value, gradient, hessian = func(x, 2) # largest eigen value # https://stackoverflow.com/questions/12167654/fastest-way-to-compute-k-largest-eigenvalues-and-corresponding-eigenvectors-with evals_large, _ = largest_eigh(hessian, eigvals=(n - 1, n - 1)) evals_small, _ = largest_eigh(hessian, eigvals=(0, 0)) return max([L, abs(evals_large[0]), abs(evals_small[0])])
def largest_eigenvalue(mat): D = np.dot(mat.T, mat) N = len(D) k = 1 eigvalues = largest_eigh(D, eigvals=(N - k, N - 1), eigvals_only=True) print 'Q shape: ' + str(D.shape) return eigvalues[0]
def get_eig(S, m): val, vec = largest_eigh(S, eigvals=(len(S) - m, len(S) - 1)) # get the eigen value and vector sorted_vec = np.fliplr(vec) # sort the vector val = np.flip(val) # flip it to the right order matrix = np.diag(val) # make the matrix return matrix, sorted_vec
def spectralCluster(s, k, sigma=.12): pointSet = np.asarray(list(zip(s[0], s[1]))) n = len(pointSet) #sigma tuning param sweep over this param to find the val ### Step 1 ### # Generate a, the Affinity matrix a = np.zeros((len(pointSet), len(pointSet))) for i in range(len(pointSet)): for j in range(len(pointSet)): a[i][j] = np.power( np.linalg.norm(np.subtract(pointSet[i], pointSet[j])), 2) a = np.exp(np.divide(np.multiply(a, -1), 2 * sigma**2)) #make diag of 0 for i in range(len(pointSet)): a[i][i] = 0 ### Step 2 ### # define d a diag matrix of sum of a's rows d = np.zeros((len(pointSet), len(pointSet))) for i in range(len(pointSet)): d[i][i] = np.sum(a[i]) #sum of rows of a # define L (eq given in the paper) da = np.matmul(np.power(np.linalg.inv(d), 0.5), a) L = np.matmul(da, np.power(np.linalg.inv(d), 0.5)) #np.linalg.inv(d) ### Step 3 #### # find the k biggest eigenvalues and vectors w, x = largest_eigh(L, eigvals=(n - k, n - 1)) #gives largest k eigenvals ### step 4 ### #create y matrix (x normalized ) y = np.zeros((len(pointSet), len(pointSet))) norms = np.linalg.norm(x, axis=1) #find norm of each row y = x / norms[:, None] #divide each row by its norm ### step 5 ### # Perform clustering on the y matrix kmeans = KMeans(n_clusters=k, max_iter=3000).fit(y) lab = np.expand_dims(np.asarray(kmeans.labels_), 1) ### step 6 ### #assign the elements to the proper cluster labeledPoints = np.append(pointSet, lab, axis=1) points = labeledPoints[labeledPoints[:, 2].argsort()] split = np.bincount(points[:, 2].astype(int)) i = 0 k = 0 plot = [] for j in split: #split for plotting k += j plot.append(points[i:k]) i = k return plot, y #k arrays of items belonging to their cluster and the ymatrix
def get_k_largest_eigenfaces(X, N, k, average, gil_sized, johnny_sized): vals_good, vecs_good = largest_eigh( ((np.dot(np.transpose(X), X)) / num_of_pics), eigvals=(N - k, N - 1)) good_first_gil = average good_first_johnny = average for col in range(0, k): eigenvec_pic_good = np.ndarray.reshape(vecs_good[:, col], (50, 45)) # eigenvec_pic_final_good = eigenvec_pic_good * (255 / np.max(eigenvec_pic_good)) good_first_gil = good_first_gil + ( gil_sized * eigenvec_pic_good) * eigenvec_pic_good good_first_johnny = good_first_johnny + ( johnny_sized * eigenvec_pic_good) * eigenvec_pic_good plt.gray() matplotlib.image.imsave( path + "\johnny.jpg" + "_%d_first_johnny_rec.jpg" % (k), good_first_johnny) matplotlib.image.imsave(path + "\gil.jpg" + "_%d_first_gil_rec.jpg" % (k), good_first_gil)
def MDS(codebook, dimensions): num_points = len(codebook) # matrix of SQUARED distances (really this is P^2) P = dist.squareform(dist.pdist(codebook, 'euclidean'))**2 I = np.identity(num_points) ONE = np.ones((num_points, num_points)) J = I - (1. / num_points) * ONE # Centering matrix for MDS B = -0.5 * J * P * J # Calculate the d largest eigenvalues (L) and corresponding eigenvectors (E) (L, E) = largest_eigh(B, eigvals=(num_points - dimensions, num_points - 1)) # Sort from highest to lowest and square root L = np.sqrt(np.flipud(L)) # Sort from largest to smallest positive eigenvectors E = np.fliplr(E) return -1 * (E * L) # don't know why we need the -1...
start = time.time() print((time.time()-start)*100) # Using dot product we implement the new method np.set_printoptions(suppress=True) np.random.seed(3)# to calculate the values of time with random seed in this case we have given # 0 and we can chnage it to many other things like. let sy to value 2 or we can implement for loop # to iterate the function and get required values. N=5000 k=10 X = np.random.random((N,N)) - 2.5 X = np.dot(X, X.T) #create a symmetric matrix # Benchmark the dense routine start = clock() evals_large, evecs_large = largest_eigh(X, eigvals=(N-k,N-1)) elapsed = (clock() - start) print("eigh elapsed time: ", elapsed) print(X) # Benchmark the sparse routine start = clock() evals_large_sparse, evecs_large_sparse = largest_eigsh(X, k, which='LM') elapsed = (clock() - start) print("eigsh elapsed time: ", elapsed) #################333 # approach 2 Numpy universial functions import numpy as np import time import sys # library to see the memoray occupied by list and numpy array.
times = np.zeros((N,4)) H = np.random.random((N,N)) - 0.5 H = np.dot(H, H.T) #create a symmetric matrix for i in range(N-50): print i i = i+15 n = i conv = 1 times[i-2][0] = i # Benchmark the dense routine start = clock() evals_large, evecs_large = largest_eigh(H[0:n,0:n], eigvals=(n-k,n-1)) elapsed = (clock() - start) times[i-2][1] = elapsed # print "eigh elapsed time: ", elapsed # Benchmark the sparse routine start = clock() evals_large_sparse, evecs_large_sparse = largest_eigsh(H[0:n,0:n], k, which='LM') elapsed = (clock() - start) times[i-2][2] = elapsed # print "eigsh elapsed time: ", elapsed start = clock() phi0 = np.random.rand(n) # print la.eig(H)[1].T
current_pic_vec = np.ndarray.reshape(current_pic, (1, d)) if X.all() == 1: X = current_pic_vec continue else: X = np.vstack((X, current_pic_vec)) continue continue continue continue print("Calc Pn : number of images = ", num_of_pics) # calculate first 10 eigenfaces N = d k = 10 vals, vecs = largest_eigh(((np.dot(np.transpose(X), X)) / num_of_pics), eigvals=(N - k, N - 1)) print("calculate first 10 eigenfaces : dim of the first eigenvector is: ", vecs.shape) plt.gray() for col in range(0, 10): eigenvec_pic = np.ndarray.reshape(vecs[:, col], (50, 45)) eigenvec_pic_final = eigenvec_pic * (255 / np.max(eigenvec_pic)) newpath = path + "\%d_eigenvector.jpg" % (col + 1) matplotlib.image.imsave(newpath + "_lower_contrast.jpg", eigenvec_pic_final) plt.imsave(newpath, eigenvec_pic_final, vmin=0, vmax=255) # pre-processing own pics print("Pre Processing own images") johnny = io.imread(path + "\johnny.jpg", as_grey=True) gil = io.imread(path + "\gil.jpg", as_grey=True)
data = np.genfromtxt('./toy_data/traizines.csv', delimiter=',', skip_header=True) A = data[:, 1:] b = data[:, 0] poly = PolynomialFeatures(poly_degree) A = poly.fit_transform(A)[:, 1:] A = (A - A.mean(axis=0)) / A.std(axis=0) b = b - b.mean(axis=0) m, n = A.shape print('') print(' * dim A =', A.shape) print(' * max_lam(AAt) = %.4e' % largest_eigh(np.dot(A, A.transpose()), eigvals=(m - 1, m - 1))[0][0]) print('') # find lam1_max, and determine lam1 and lam2 Atb = np.dot(A.transpose(), b) lam1_max = LA.norm(Atb, np.inf) / alpha lam1 = alpha * c_lam * lam1_max lam2 = (1 - alpha) * c_lam * lam1_max # -------------------- # # ssnal_elastic_core # # -------------------- # print('') print(' * start ssnal_elastic') out_core = ssnal_elastic_core(A=A,
def calculate_eigs(square_matrix): N = len(square_matrix) sorted_eigen_values, sorted_eigen_vectors = largest_eigh(square_matrix, eigvals=(0, N - 1)) return sorted_eigen_values, sorted_eigen_vectors
def calculate_K_largest_eigs(square_matrix, K): N = len(square_matrix) evalues_large, evectors_large = largest_eigh(square_matrix, eigvals=(N - K, N - 1)) return evalues_large, evectors_large
import numpy as np from time import clock from scipy.linalg import eigh as largest_eigh from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh np.set_printoptions(suppress=True) np.random.seed(0) N=500 k=10 X = np.random.random((N,N)) - 0.5 X = np.dot(X, X.T) #create a symmetric matrix # Benchmark the dense routine start = clock() evals_large, evecs_large = largest_eigh(X, eigvals=(N-k,N-1)) elapsed = (clock() - start) v # Benchmark the sparse routine start = clock() evals_large_sparse, evecs_large_sparse = largest_eigsh(X, k, which='LM') elapsed = (clock() - start) print "eigsh elapsed time: ", elapsed
data = np.genfromtxt('./toy_data/traizines.csv', delimiter=',', skip_header=True) A = data[:, 1:] b = data[:, 0] poly = PolynomialFeatures(poly_degree) A = poly.fit_transform(A)[:, 1:] A = (A - A.mean(axis=0)) / A.std(axis=0) b -= b.mean(axis=0) m, n = A.shape print('') print(' * dim A =', A.shape) print(' * max_lam(AAt) = %.4e' % largest_eigh(np.dot(A, A.T), eigvals=(m - 1, m - 1))[0][0]) # TODO MM use np.linalg.norm(A, ord=2) ** 2? # TODO TB largest_eigh is more efficient. With a 1000x1000 matrix it took 0.3 sec while np.linalg.norm took 1.7sec print('') # find lam1_max, and determine lam1 and lam2 lam1_max = LA.norm(A.T @ b, ord=np.inf) / alpha lam1 = alpha * c_lam * lam1_max lam2 = (1 - alpha) * c_lam * lam1_max # -------------------- # # ssnal_elastic_core # # -------------------- # print('') print(' * start ssnal_elastic')