def run(backend=SUPPORTED_BACKENDS[0], quiet=True): m, n, rank = 5, 4, 2 matrix = rnd.randn(m, n) cost, egrad = create_cost_egrad(backend, matrix, rank) manifold = FixedRankEmbedded(m, n, rank) problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad) if quiet: problem.verbosity = 0 solver = ConjugateGradient() left_singular_vectors, singular_values, right_singular_vectors = \ solver.solve(problem) low_rank_approximation = (left_singular_vectors @ np.diag(singular_values) @ right_singular_vectors) if not quiet: u, s, vt = la.svd(matrix, full_matrices=False) indices = np.argsort(s)[-rank:] low_rank_solution = ( u[:, indices] @ np.diag(s[indices]) @ vt[indices, :]) print("Analytic low-rank solution:") print() print(low_rank_solution) print() print("Rank-{} approximation:".format(rank)) print() print(low_rank_approximation) print() print("Frobenius norm error:", la.norm(low_rank_approximation - low_rank_solution)) print()
def fixedrank(A, YT, r): """ Solves the AX=YT problem on the manifold of r-rank matrices with """ # Instantiate a manifold manifold = FixedRankEmbedded(N, r, r) # Define the cost function (here using autograd.numpy) def cost(X): U = X[0] cst = 0 for n in range(N): cst = cst + huber(U[n, :]) Mat = np.matmul(np.matmul(X[0], np.diag(X[1])), X[2]) fidelity = LA.norm(np.subtract(np.matmul(A, Mat), YT)) return cst + lambd * fidelity**2 problem = Problem(manifold=manifold, cost=cost) solver = ConjugateGradient(maxiter=maxiter) # Let Pymanopt do the rest Xopt = solver.solve(problem) #Solve Sol = np.dot(np.dot(Xopt[0], np.diag(Xopt[1])), Xopt[2]) return Sol
def setUp(self): self.m = m = 10 self.n = n = 5 self.k = k = 3 self.manifold = FixedRankEmbedded(m, n, k) u, s, vt = self.manifold.random_point() matrix = (u * s) @ vt @pymanopt.function.autograd(self.manifold) def cost(u, s, vt): return np.linalg.norm((u * s) @ vt - matrix) ** 2 self.cost = cost
def setUp(self): self.m = m = 20 self.n = n = 10 self.rank = rank = 3 A = np.random.normal(size=(m, n)) self.manifold = Product([FixedRankEmbedded(m, n, rank), Euclidean(n)]) @pymanopt.function.autograd(self.manifold) def cost(u, s, vt, x): return np.linalg.norm(((u * s) @ vt - A) @ x) ** 2 self.cost = cost self.gradient = self.cost.get_gradient_operator() self.hessian = self.cost.get_hessian_operator() self.problem = pymanopt.Problem(self.manifold, self.cost)
def setUp(self): self.m = m = 20 self.n = n = 10 self.rank = rank = 3 A = np.random.randn(m, n) @pymanopt.function.Autograd def cost(u, s, vt, x): return np.linalg.norm(((u * s) @ vt - A) @ x)**2 self.cost = cost self.gradient = self.cost.compute_gradient() self.hvp = self.cost.compute_hessian_vector_product() self.manifold = Product([FixedRankEmbedded(m, n, rank), Euclidean(n)]) self.problem = pymanopt.Problem(self.manifold, self.cost)
def run(backend=SUPPORTED_BACKENDS[0], quiet=True): m, n, rank = 5, 4, 2 matrix = np.random.normal(size=(m, n)) manifold = FixedRankEmbedded(m, n, rank) cost, euclidean_gradient = create_cost_and_derivates( manifold, matrix, backend ) problem = pymanopt.Problem( manifold, cost, euclidean_gradient=euclidean_gradient ) optimizer = ConjugateGradient( verbosity=2 * int(not quiet), beta_rule="PolakRibiere" ) ( left_singular_vectors, singular_values, right_singular_vectors, ) = optimizer.run(problem).point low_rank_approximation = ( left_singular_vectors @ np.diag(singular_values) @ right_singular_vectors ) if not quiet: u, s, vt = np.linalg.svd(matrix, full_matrices=False) indices = np.argsort(s)[-rank:] low_rank_solution = ( u[:, indices] @ np.diag(s[indices]) @ vt[indices, :] ) print("Analytic low-rank solution:") print() print(low_rank_solution) print() print(f"Rank-{rank} approximation:") print() print(low_rank_approximation) print() print( "Frobenius norm error:", np.linalg.norm(low_rank_approximation - low_rank_solution), ) print()
import autograd.numpy as np from pymanopt import Problem from pymanopt.solvers import ConjugateGradient # Let A be a (5 x 4) matrix to be approximated A = np.random.randn(5, 4) k = 2 # (a) Instantiation of a manifold # points on the manifold are parameterized via their singular value # decomposition (u, s, vt) where # u is a 5 x 2 matrix with orthonormal columns, # s is a vector of length 2, # vt is a 2 x 4 matrix with orthonormal rows, # so that u*diag(s)*vt is a 5 x 4 matrix of rank 2. manifold = FixedRankEmbedded(A.shape[0], A.shape[1], k) # (b) Definition of a cost function (here using autograd.numpy) # Note that the cost must be defined in terms of u, s and vt, where # X = u * diag(s) * vt. def cost(usv): delta = .5 u = usv[0] s = usv[1] vt = usv[2] X = np.dot(np.dot(u, np.diag(s)), vt) return np.sum(np.sqrt((X - A)**2 + delta**2) - delta) # define the Pymanopt problem
def setUp(self): self.m = m = 10 self.n = n = 5 self.k = k = 3 self.man = FixedRankEmbedded(m, n, k)
m = 4 n = 2 k = 2 X = np.random.rand(m, n) U, S, Vt = np.linalg.svd(X, full_matrices=False) U = U[:, :k] S = S[:k] Vt = Vt[:k, :] grad_f = grad(f) grad_g = grad(g) man = FixedRankEmbedded(m, n, k) dU, dS, dVt = grad_g((U, S, Vt)) Up, M, Vp = man.egrad2rgrad((U, S, Vt), (dU, dS, dVt)) U_, S_, V_ = man.tangent2ambient((U, S, Vt), (Up, M, Vp)) tangent_grad = U_.dot(S_).dot(V_.T) print print print 'X:' print X print print 'U,S,Vt:' print U, S, Vt
#e_correct_prediction = tf.equal(tf.argmax(e_y, 1), tf.argmax(y_, 1)) #e_accuracy = tf.reduce_mean(tf.cast(e_correct_prediction, "float")) #e_accuracy_summary =tf.summary.scalar("e_accuracy", e_accuracy) summaries = tf.summary.merge_all() manifold_b = Euclidean(1,10) if use_parameterization: manifold_A = Euclidean(784, k) manifold_B = Euclidean(k, 10) manifold_W = Product([manifold_A,manifold_B]) arg = [A, B, b] else: manifold_W = FixedRankEmbedded(784, 10, k) #manifold_W = Product([Stiefel(784,k), Euclidean(k), Stiefel(k,10)]) arg = [A, M, B, b] manifold = Product([manifold_W, manifold_b]) e_manifold_W = Euclidean(784, 10) e_arg = [eW, b] e_manifold = Product([e_manifold_W, manifold_b]) problem = Problem(manifold=manifold, cost=loss, accuracy=accuracy, summary=summaries, arg=arg, data=[x,y_], verbosity=1) e_problem = Problem(manifold=e_manifold, cost=e_loss, accuracy=accuracy, summary=summaries, arg=e_arg, data=[x, y_], verbosity=1) solver = SGD(maxiter=10000000,logverbosity=10)
if not os.path.isdir('result'): os.makedirs('result') path = os.path.join('result', experiment_name + '.csv') m, n, rank = 10, 8, 4 p = 1 / 2 for i in range(n_exp): matrix = rnd.randn(m, n) P_omega = np.zeros_like(matrix) for j in range(m): for k in range(n): P_omega[j][k] = random.choice([0, 1]) cost = create_cost(matrix, P_omega) manifold = FixedRankEmbedded(m, n, rank) problem = pymanopt.Problem(manifold, cost=cost, egrad=None) res_list = [] for beta_type in BetaTypes: solver = ConjugateGradient(beta_type=beta_type, maxiter=10000) res = solver.solve(problem) res_list.append(res[1]) res_list.append(res[2]) with open(path, 'a') as f: writer = csv.writer(f) writer.writerow(res_list)