def test_nucnorm(): pen = 1. rho = 0.1 tol = 1. op = proxops.nucnorm(pen) X = 2*np.outer(np.random.randn(50), np.random.randn(25)) V = X + 0.5 * np.random.randn(50,25) nn = lambda A: np.linalg.svd(A, compute_uv=False).sum() assert np.abs(nn(X) - nn(op(X, rho)) - pen / rho) <= tol
def test_lowrank_matrix_approx(): """ Test low rank matrix approximation """ Xobs, Xtrue = generate_lowrank_matrix() # helper function to test relative error of given parameters def test_error(Xhat): test_err = np.linalg.norm(Xhat - Xtrue, 'fro') naive_err = np.linalg.norm(Xobs - Xtrue, 'fro') err_ratio = test_err / naive_err assert err_ratio <= 0.5 # proximal algorithm for low rank matrix approximation opt = ProximalConsensus(Xobs) opt.add('squared_error', Xobs) opt.add('nucnorm', 0.2) opt.display = None opt.storage = None opt.run(maxiter=100) # test error test_error(opt.theta) # Proximal gradient descent and Accelerated proximal gradient descent for algorithm in [ProximalGradientDescent, AcceleratedProximalGradient]: # objective def f_df(X): grad = X - Xtrue obj = 0.5 * np.linalg.norm(grad.ravel()) ** 2 return obj, grad # sparsity penalty proxop = nucnorm(0.2) # optimizer opt = algorithm(f_df, Xobs, proxop, learning_rate=0.005) opt.display = None opt.storage = None opt.run(maxiter=5000) # test test_error(opt.theta)