m1, m2 = np.meshgrid(m1, m2, indexing='ij') mgrid = np.vstack((m1.ravel(), m2.ravel())) J = 0.5 * np.sum(mgrid * np.dot(G, mgrid), axis=0) - np.dot(d, mgrid) J = J.reshape(nm1, nm2) ############################################################################### # We can now define the upper and lower bounds of the box and again we create # a grid to display alongside the solution lower = 1.5 upper = 3 indic = (mgrid > lower) & (mgrid < upper) indic = indic[0].reshape(nm1, nm2) & indic[1].reshape(nm1, nm2) ############################################################################### # We can now define both the quadratic functional and the box l2 = pyproximal.L2(Op=pylops.MatrixMult(G), b=d, niter=2) ind = pyproximal.Box(lower, upper) ############################################################################### # We are now ready to solve our problem. All we need to do is to choose an # initial guess for the proximal gradient algorithm def callback(x): mhist.append(x) m0 = np.array([4, 3]) mhist = [ m0, ]
n = np.random.normal(0, sigman, img.shape) noise_img = img + n ############################################################################### # We can now define a :class:`pylops.Gradient` operator as well as the # different proximal operators to be passed to our solvers # Gradient operator sampling = 1. Gop = pylops.Gradient(dims=(ny, nx), sampling=sampling, edge=False, kind='forward', dtype='float64') L = 8. / sampling ** 2 # maxeig(Gop^H Gop) # L2 data term lamda = .04 l2 = pyproximal.L2(b=noise_img.ravel(), sigma=lamda) # L1 regularization (isotropic TV) l1iso = pyproximal.L21(ndim=2) ############################################################################### # To start, we solve our denoising problem with the original Primal-Dual # algorithm # Primal-dual tau = 0.95 / np.sqrt(L) mu = 0.95 / np.sqrt(L) cost_fixed = [] err_fixed = [] iml12_fixed = \
# Gradient operator sampling = 1. Gop = pylops.Gradient(dims=(ny, nx), sampling=sampling, edge=False, kind='forward', dtype='float64') L = 8. / sampling**2 # maxeig(Gop^H Gop) ############################################################################### # We then consider the first regularization (L2 norm on Gradient). We expect # to get a smooth image where noise is suppressed by sharp edges in the # original image are however lost. # L2 data term l2 = pyproximal.L2(b=noise_img.ravel()) # L2 regularization sigma = 2. thik = pyproximal.L2(sigma=sigma) # Solve tau = 1. mu = 1. / (tau * L) iml2 = pyproximal.optimization.primal.LinearizedADMM(l2, thik, Gop, tau=tau, mu=mu, x0=np.zeros_like(
plt.semilogy(Sx, 'k', label=r'$||X||_*$=%.2f' % np.sum(Sx)) plt.semilogy(Sy, 'r', label=r'$||Y||_*$=%.2f' % np.sum(Sy)) plt.legend() plt.tight_layout() ############################################################################### # We observe that removing some samples from the image has led to an overall # increase in the eigenvalues of :math:`\mathbf{X}`, especially # those that are originally very small. As a consequence the nuclear norm of # :math:`\mathbf{Y}` (the masked image) is larger than that of # :math:`\mathbf{X}`. # # Let's now set up the inverse problem using the Proximal gradient algorithm mu = .8 f = pyproximal.L2(Rop, y) g = pyproximal.Nuclear((ny, nx), mu) Xpg = pyproximal.optimization.primal.AcceleratedProximalGradient(f, g, np.zeros(ny*nx), tau=1., niter=100, show=True) Xpg = Xpg.reshape(ny, nx) # Recompute SVD and see how the eigenvalues look like Upg, Spg, Vhpg = np.linalg.svd(Xpg, full_matrices=False) ############################################################################### # Let's do the same with the constrained version mu1 = 0.8 * np.sum(Sx) g = pyproximal.proximal.NuclearBall((ny, nx), mu1) Xpgc = pyproximal.optimization.primal.AcceleratedProximalGradient(f, g, np.zeros(ny*nx),
tau = 2 xp = eucl.prox(x, tau) xdp = eucl.proxdual(x, tau) plt.figure(figsize=(7, 2)) plt.plot(x, x, 'k', lw=2, label='x') plt.plot(x, xp, 'r', lw=2, label='prox(x)') plt.plot(x, xdp, 'b', lw=2, label='dualprox(x)') plt.xlabel('x') plt.title(r'$||x||_2$') plt.legend() plt.tight_layout() ############################################################################### # Similarly we can do the same for the L2 norm (i.e., square of Euclidean norm) l2 = pyproximal.L2(sigma=2.) x = np.arange(-1, 1, 0.1) print('||x||_2^2: ', l2(x)) tau = 2 xp = l2.prox(x, tau) xdp = l2.proxdual(x, tau) plt.figure(figsize=(7, 2)) plt.plot(x, x, 'k', lw=2, label='x') plt.plot(x, xp, 'r', lw=2, label='prox(x)') plt.plot(x, xdp, 'b', lw=2, label='dualprox(x)') plt.xlabel('x') plt.title(r'$||x||_2^2$') plt.legend()