Exemplo n.º 1
0
# sphinx_gallery_thumbnail_number = 5
import numpy as np

import pylops

plt.close("all")
np.random.seed(1)

###############################################################################
# Let's start by creating the model and data
nx = 101
x = np.zeros(nx)
x[:nx // 2] = 10
x[nx // 2:3 * nx // 4] = -5

Iop = pylops.Identity(nx)

n = np.random.normal(0, 1, nx)
y = Iop * (x + n)

plt.figure(figsize=(10, 5))
plt.plot(x, "k", lw=3, label="x")
plt.plot(y, ".k", label="y=x+n")
plt.legend()
plt.title("Model and data")

###############################################################################
# To start we will try to use a simple L2 regularization that enforces
# smoothness in the solution. We can see how denoising is succesfully achieved
# but the solution is much smoother than we wish for.
D2op = pylops.SecondDerivative(nx, edge=True)
Exemplo n.º 2
0
into data and viceversa.
"""
import matplotlib.gridspec as pltgs
import matplotlib.pyplot as plt
import numpy as np

import pylops

plt.close("all")

###############################################################################
# Let's define an identity operator :math:`\mathbf{Iop}` with same number of
# elements for data and model (:math:`N=M`).
N, M = 5, 5
x = np.arange(M)
Iop = pylops.Identity(M, dtype="int")

y = Iop * x
xadj = Iop.H * y

gs = pltgs.GridSpec(1, 6)
fig = plt.figure(figsize=(7, 3))
ax = plt.subplot(gs[0, 0:3])
im = ax.imshow(np.eye(N), cmap="rainbow")
ax.set_title("A", size=20, fontweight="bold")
ax.set_xticks(np.arange(N - 1) + 0.5)
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 3])
Exemplo n.º 3
0
# Let's define now the sampling operator as well as create our covariance
# matrices in terms of linear operators. This may not be strictly necessary
# here but shows how even Bayesian-type of inversion can very easily scale to
# large model and data spaces.

# Sampling operator
perc_subsampling = 0.2
ntsub = int(np.round(nt * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(nt))[:ntsub])
iava[-1] = nt - 1  # assume we have the last sample to avoid instability
Rop = pylops.Restriction(nt, iava, dtype='float64')

# Covariance operators
Cm_op = \
    pylops.signalprocessing.Convolve1D(nt, diag_ave, offset=N)
Cd_op = sigmad**2 * pylops.Identity(ntsub)

###############################################################################
# We model now our data and add noise that respects our prior definition
n = np.random.normal(0, sigmad, nt)
y = Rop * x
yn = Rop * (x + n)
ymask = Rop.mask(x)
ynmask = Rop.mask(x + n)

###############################################################################
# First we apply the Bayesian inversion equation
xbayes = x0 + Cm_op * Rop.H * (lsqr(
    Rop * Cm_op * Rop.H + Cd_op, yn - Rop * x0, iter_lim=400)[0])

# Visualize
Exemplo n.º 4
0
print("xinv = ", x)

###############################################################################
# We can also use :py:class:`pylops.Kronecker` to do something more
# interesting. Any operator can in fact be applied on a single direction of a
# multi-dimensional input array if combined with an :py:class:`pylops.Identity`
# operator via Kronecker product. We apply here the
# :py:class:`pylops.FirstDerivative` to the second dimension of the model.
#
# Note that for those operators whose implementation allows their application
# to a single axis via the ``dir`` parameter, using the Kronecker product
# would lead to slower performance. Nevertheless, the Kronecker product allows
# any other operator to be applied to a single dimension.
Nv, Nh = 11, 21

Iop = pylops.Identity(Nv, dtype="float32")
D2hop = pylops.FirstDerivative(Nh, dtype="float32")

X = np.zeros((Nv, Nh))
X[Nv // 2, Nh // 2] = 1
D2hop = pylops.Kronecker(Iop, D2hop)

Y = D2hop * X.ravel()
Y = Y.reshape(Nv, Nh)

fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Kronecker", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
Exemplo n.º 5
0
print('xinv = ', x)

###############################################################################
# We can also use :py:class:`pylops.Kronecker` to do something more
# interesting. Any operator can in fact be applied on a single direction of a
# multi-dimensional input array if combined with an :py:class:`pylops.Identity`
# operator via Kronecker product. We apply here the
# :py:class:`pylops.FirstDerivative` to the second dimension of the model.
#
# Note that for those operators whose implementation allows their application
# to a single axis via the ``dir`` parameter, using the Kronecker product
# would lead to slower performance. Nevertheless, the Kronecker product allows
# any other operator to be applied to a single dimension.
Nv, Nh = 11, 21

Iop = pylops.Identity(Nv, dtype='float32')
D2hop = pylops.FirstDerivative(Nh, dtype='float32')

X = np.zeros((Nv, Nh))
X[Nv // 2, Nh // 2] = 1
D2hop = pylops.Kronecker(Iop, D2hop)

Y = D2hop * X.ravel()
Y = Y.reshape(Nv, Nh)

fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle('Kronecker', fontsize=14, fontweight='bold', y=0.95)
im = axs[0].imshow(X, interpolation='nearest')
axs[0].axis('tight')
axs[0].set_title(r'$x$')
plt.colorbar(im, ax=axs[0])
Exemplo n.º 6
0
into data and viceversa.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as pltgs

import pylops

plt.close('all')

###############################################################################
# Let's define an identity operator :math:`\mathbf{Iop}` with same number of
# elements for data and model (:math:`N=M`).
N, M = 5, 5
x = np.arange(M)
Iop = pylops.Identity(M, dtype='int')

y = Iop * x
xadj = Iop.H * y

gs = pltgs.GridSpec(1, 6)
fig = plt.figure(figsize=(7, 3))
ax = plt.subplot(gs[0, 0:3])
im = ax.imshow(np.eye(N), cmap='rainbow')
ax.set_title('A', size=20, fontweight='bold')
ax.set_xticks(np.arange(N - 1) + 0.5)
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color='white')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 3])
Exemplo n.º 7
0
    def solve(self, f: np.ndarray):
        """
        Description of main primal-dual iteration.
        :return: None
        """

        (primal_n, primal_m) = self.image_size

        v = w = 0
        g = f.ravel()
        p = p_bar = np.zeros(primal_n*primal_m)
        q = q_bar = np.zeros(2*primal_n*primal_m)

        if self.reg_mode != 'tik':
            grad = pylops.Gradient(dims=(primal_n, primal_m), dtype='float64', edge=True, kind="backward")
        else:
            grad = pylops.Identity(np.prod(self.image_size))

        grad1 = pylops.BlockDiag([grad, grad])  # symmetric dxdy <-> dydx not necessary (expensive) but easy and functional

        proj_0 = IndicatorL2((primal_n, primal_m), upper_bound=self.alpha[0])
        proj_1 = IndicatorL2((2 * primal_n, primal_m), upper_bound=self.alpha[1])

        if not self.silent_mode:
            progress = progressbar.ProgressBar(max_value=self.max_iter)

        k = 0

        while (self.tol < self.sens or k == 0) and (k < self.max_iter):

            p_old = p
            q_old = q

            # Dual Update
            g = self.lam / (self.tau + self.lam) * (g + self.tau * (self.A*(p_bar ) - f)) #- self.alpha[0]*self.breg_p

            if self.reg_mode != 'tik':
                v = proj_0.prox(v + self.tau * (grad * p_bar - q_bar))
            else:
                v = self.alpha[0] / (self.tau + self.alpha[0]) * \
                    (v + self.tau * (grad*p_bar - self.data))


            if self.reg_mode == 'tgv':
                w = proj_1.prox(w + self.tau * grad1*q_bar)

            # Primal Update
            p = p - self.tau * (-self.alpha[0]*self.breg_p + self.A.H*g + grad.H * v)


            if self.reg_mode == 'tgv':
                q = q + self.tau * (v - grad1.H * w)

            # Extragradient Update

            p_bar = 2 * p - p_old
            q_bar = 2 * q - q_old


            if k % 50 == 0:
                p_gap = p - p_old
                self.sens = np.linalg.norm(p_gap)/np.linalg.norm(p_old)
                print(self.sens)

            if self.gamma:
                raise NotImplementedError("The adjustment of the step size in the "
                                          "Primal-Dual is not yet fully developed.")
                thetha  = 1 / np.sqrt(1 + 2*self.gamma * self.G.prox_param)
                self.G.prox_param = thetha * self.G.prox_param
                self.F_star.prox_param = self.F_star.prox_param / thetha

            k += 1
            if not self.silent_mode:
                progress.update(k)

        self.x = p

        if k <= self.max_iter:
            print(" Early stopping.")

        return self.x