def test_fb_l1_denoising(): n = 1000 # Use a very sparse vector for the test x = np.zeros((n,1)) x[1,:] = 100 y = x + 0.06 * np.random.randn(n,1) la = 0.2 prox_f = lambda x,tau: soft_thresholding(x, la*tau) grad_g = lambda x: x - y for method in methods: xRec = forward_backward(prox_f, grad_g, y, 1, method=method) #TODO ugly test to change assert_array_almost_equal(x, xRec, decimal=0)
def test_fb_l1_denoising(): n = 1000 # Use a very sparse vector for the test x = np.zeros((n, 1)) x[1, :] = 100 y = x + 0.06 * np.random.randn(n, 1) la = 0.2 prox_f = lambda x, tau: soft_thresholding(x, la * tau) grad_g = lambda x: x - y for method in methods: x_rec = forward_backward(prox_f, grad_g, y, 1, method=method) #TODO ugly test to change assert_array_almost_equal(x, x_rec, decimal=0)
resy = py - np.roll(py, 1, axis=1) return -(resx + resy) # Minimization of F(K*x) + G(x) K = gradient K.T = divergence amp = lambda u: np.sqrt(np.sum(u ** 2, axis=2)) F = lambda u: alpha * np.sum(amp(u)) G = lambda x: 1 / 2 * lin.norm(y - x, 'fro') ** 2 # Proximity operators normalize = lambda u: u / np.tile( (np.maximum(amp(u), 1e-10))[:, :, np.newaxis], (1, 1, 2)) prox_f = lambda u, tau: np.tile( soft_thresholding(amp(u), alpha * tau)[:, :, np.newaxis], (1, 1, 2)) * normalize(u) prox_fs = dual_prox(prox_f) prox_g = lambda x, tau: (x + tau * y) / (1 + tau) # context ctx = Context(full_output=True, maxiter=300) ctx.callback = lambda x: G(x) + F(K(x)) t1 = time.time() x_rec, cx = admm(prox_fs, prox_g, K, y, context=ctx) t2 = time.time() print("Performed 300 iterations in " + str(t2 - t1) + " seconds.")
py = p[:, :, 1] resx = px - np.roll(px, 1, axis=0) resy = py - np.roll(py, 1, axis=1) return -(resx + resy) # Minimization of F(K*x) + G(x) K = gradient K.T = divergence amp = lambda u: np.sqrt(np.sum(u ** 2, axis=2)) F = lambda u: alpha * np.sum(amp(u)) G = lambda x: 1 / 2 * lin.norm(y - x, "fro") ** 2 # Proximity operators normalize = lambda u: u / np.tile((np.maximum(amp(u), 1e-10))[:, :, np.newaxis], (1, 1, 2)) prox_f = lambda u, tau: np.tile(soft_thresholding(amp(u), alpha * tau)[:, :, np.newaxis], (1, 1, 2)) * normalize(u) prox_fs = dual_prox(prox_f) prox_g = lambda x, tau: (x + tau * y) / (1 + tau) callback = lambda x: G(x) + F(K(x)) t1 = time.time() x_rec, cx = admm(prox_fs, prox_g, K, y, maxiter=300, full_output=1, callback=callback) t2 = time.time() print "Performed 300 iterations in " + str(t2 - t1) + " seconds." pl.subplot(221) pl.imshow(im, cmap="gray") pl.title("Original") pl.axis("off")
resy = py - np.roll(py, 1, axis=1) return -(resx + resy) # Minimization of F(K*x) + G(x) K = gradient K.T = divergence amp = lambda u: np.sqrt(np.sum(u**2, axis=2)) F = lambda u: alpha * np.sum(amp(u)) G = lambda x: 1 / 2 * lin.norm(y - x, 'fro')**2 # Proximity operators normalize = lambda u: u / np.tile( (np.maximum(amp(u), 1e-10))[:, :, np.newaxis], (1, 1, 2)) prox_f = lambda u, tau: np.tile( soft_thresholding(amp(u), alpha * tau)[:, :, np.newaxis], (1, 1, 2)) * normalize(u) prox_fs = dual_prox(prox_f) prox_g = lambda x, tau: (x + tau * y) / (1 + tau) callback = lambda x: G(x) + F(K(x)) t1 = time.time() x_rec, cx = admm(prox_fs, prox_g, K, y, maxiter=300, full_output=1, callback=callback) t2 = time.time()