def test_sparse(): pen = 0.1 rho = 0.1 v = np.linspace(-5, 5, 1e3) gamma = pen / rho x = (v - gamma * np.sign(v)) * (np.abs(v) > gamma) op = proxops.sparse(pen) assert np.allclose(op(v, rho), x)
def test_sparse_regression(): """ Test sparse regression """ A, y, x_true = generate_sparse_system() # least squares solution xls = np.linalg.lstsq(A, y)[0] # helper function to test relative error def test_error(xhat): test_err = np.linalg.norm(xhat - x_true, 2) naive_err = np.linalg.norm(xls - x_true, 2) err_ratio = test_err / naive_err assert err_ratio <= 0.01 # ProximalConsensus opt = ProximalConsensus(xls) opt.add('linsys', A=A, b=y) opt.add('sparse', 1.) opt.display = None opt.storage = None opt.run(maxiter=100) # test error test_error(opt.theta) # Proximal gradient descent and Accelerated proximal gradient descent for algorithm in [ProximalGradientDescent, AcceleratedProximalGradient]: # objective def f_df(x): err = A.dot(x) - y obj = 0.5 * np.linalg.norm(err) ** 2 grad = A.T.dot(err) return obj, grad # sparsity penalty proxop = sparse(1.) # optimizer opt = algorithm(f_df, xls, proxop, learning_rate=0.005) opt.display = None opt.storage = None opt.run(maxiter=5000) # test test_error(opt.theta)