Exemple #1
0
###############################################################################
# We now create a signal filled with zero and a single one at its center and
# apply the derivative matrix by means of a dot product
x = np.zeros(nx)
x[int(nx/2)] = 1

y_dir = np.dot(D, x)
xadj_dir = np.dot(D.T, y_dir)

###############################################################################
# Let's now do the same using the :py:class:`pylops.FirstDerivative` operator
# and compare its
# outputs after applying the forward and adjoint operators to those from the dense matrix.

D1op = pylops.FirstDerivative(nx, dtype='float32')

y_lop = D1op*x
xadj_lop = D1op.H*y_lop

fig, axs = plt.subplots(3, 1, figsize=(13, 8))
axs[0].stem(np.arange(nx), x, linefmt='k', markerfmt='ko')
axs[0].set_title('Input', size=20, fontweight='bold')
axs[1].stem(np.arange(nx), y_dir, linefmt='k', markerfmt='ko', label='direct')
axs[1].stem(np.arange(nx), y_lop, linefmt='--r', markerfmt='ro', label='lop')
axs[1].set_title('Forward', size=20, fontweight='bold')
axs[1].legend()
axs[2].stem(np.arange(nx), xadj_dir, linefmt='k',
            markerfmt='ko', label='direct')
axs[2].stem(np.arange(nx), xadj_lop, linefmt='--r',
            markerfmt='ro', label='lop')
Exemple #2
0
D2op = pylops.SecondDerivative(nx, edge=True)
lamda = 1e2

xinv = pylops.optimization.leastsquares.RegularizedInversion(
    Iop, [D2op], y, epsRs=[np.sqrt(lamda / 2)], **dict(iter_lim=30))

plt.figure(figsize=(10, 5))
plt.plot(x, "k", lw=3, label="x")
plt.plot(y, ".k", label="y=x+n")
plt.plot(xinv, "r", lw=5, label="xinv")
plt.legend()
plt.title("L2 inversion")

###############################################################################
# Now we impose blockiness in the solution using the Split Bregman solver
Dop = pylops.FirstDerivative(nx, edge=True, kind="backward")
mu = 0.01
lamda = 0.3
niter_out = 50
niter_in = 3

xinv, niter = pylops.optimization.sparsity.SplitBregman(Iop, [Dop],
                                                        y,
                                                        niter_out,
                                                        niter_in,
                                                        mu=mu,
                                                        epsRL1s=[lamda],
                                                        tol=1e-4,
                                                        tau=1.0,
                                                        **dict(iter_lim=30,
                                                               damp=1e-10))
Exemple #3
0
# We first apply the blurring operator to the sharp image. We then
# try to recover the sharp input image by inverting the convolution operator
# from the blurred image. Note that when we perform inversion without any
# regularization, the deblurred image will show some ringing due to the
# instabilities of the inverse process. Using a L1 solver with a DWT
# preconditioner or TV regularization allows to recover sharper contrasts.
imblur = Cop * im.flatten()

imdeblur = \
    pylops.optimization.leastsquares.NormalEquationsInversion(Cop, None,
                                                              imblur,
                                                              maxiter=50)

Wop = pylops.signalprocessing.DWT2D((Nz, Nx), wavelet='haar', level=3)
Dop = [
    pylops.FirstDerivative(Nz * Nx, dims=(Nz, Nx), dir=0, edge=False),
    pylops.FirstDerivative(Nz * Nx, dims=(Nz, Nx), dir=1, edge=False)
]
DWop = Dop + [
    Wop,
]

imdeblurfista = \
    pylops.optimization.sparsity.FISTA(Cop * Wop.H, imblur, eps=1e-1,
                                       niter=100)[0]
imdeblurfista = Wop.H * imdeblurfista

imdeblurtv = \
    pylops.optimization.sparsity.SplitBregman(Cop, Dop, imblur.flatten(),
                                              niter_outer=10, niter_inner=5,
                                              mu=1.5, epsRL1s=[2e0, 2e0],
Exemple #4
0
plt.close('all')
np.random.seed(0)

###############################################################################
# Let's start by looking at a simple first-order centered derivative. We
# chunck the vector in 3 chunks.
nx = 100
nchunks = 3

x = np.zeros(nx)
x[int(nx / 2)] = 1
xd = da.from_array(x, chunks=nx // nchunks + 1)
print('x:', xd)

Dop = pylops.FirstDerivative(nx, dtype='float32')
dDop = pylops_distributed.FirstDerivative(nx,
                                          compute=(True, True),
                                          dtype='float32')

y = Dop * x
xadj = Dop.H * y

yd = Dop * xd
xadjd = Dop.H * yd

fig, axs = plt.subplots(3, 1, figsize=(13, 8))
axs[0].stem(np.arange(nx),
            x,
            linefmt='k',
            markerfmt='ko',
Exemple #5
0
xinv = \
    pylops.optimization.leastsquares.RegularizedInversion(Iop, [D2op], y,
                                                          epsRs=[np.sqrt(lamda/2)],
                                                          **dict(iter_lim=30))

plt.figure(figsize=(10, 5))
plt.plot(x, 'k', lw=3, label='x')
plt.plot(y, '.k', label='y=x+n')
plt.plot(xinv, 'r', lw=5, label='xinv')
plt.legend()
plt.title('L2 inversion')

###############################################################################
# Now we impose blockiness in the solution using the Split Bregman solver
Dop = pylops.FirstDerivative(nx, edge=True, kind='backward')
mu = 0.01
lamda = 0.3
niter_out = 50
niter_in = 3

xinv, niter = \
    pylops.optimization.sparsity.SplitBregman(Iop, [Dop], y, niter_out,
                                              niter_in, mu=mu, epsRL1s=[lamda],
                                              tol=1e-4, tau=1.,
                                              **dict(iter_lim=30, damp=1e-10))

plt.figure(figsize=(10, 5))
plt.plot(x, 'k', lw=3, label='x')
plt.plot(y, '.k', label='y=x+n')
plt.plot(xinv, 'r', lw=5, label='xinv')
Exemple #6
0
axs[0].axis('tight')
axs[1].imshow(y, cmap='gray')
axs[1].set_title('Data')
axs[1].axis('tight')
axs[2].imshow(xrec, cmap='gray')
axs[2].set_title('Adjoint model')
axs[2].axis('tight')
fig.tight_layout()

###############################################################################
# Finally we take advantage of our different solvers and try to invert the
# modelling operator both in a least-squares sense and using TV-reg.
Dop = [
    pylops.FirstDerivative(ny * nx,
                           dims=(nx, ny),
                           dir=0,
                           edge=True,
                           dtype=np.float),
    pylops.FirstDerivative(ny * nx,
                           dims=(nx, ny),
                           dir=1,
                           edge=True,
                           dtype=np.float)
]
D2op = pylops.Laplacian(dims=(nx, ny), edge=True, dtype=np.float)

# L2
xinv_sm = \
    pylops.optimization.leastsquares.RegularizedInversion(RLop.H,
                                                          [D2op],
                                                          y.T.flatten(),
#
# Note that, as explained in details in :py:class:`pylops.CausalIntegration`,
# integration has no unique solution, as any constant :math:`c` can be added
# to the integrated signal :math:`y`, for example if :math:`x(t)=t^2` the
# :math:`y(t) = \int t^2 dt = \frac{t^3}{3} + c`. We thus subtract first
# sample from the analytical integral to obtain the same result as the
# numerical one.

Cop = pylops.CausalIntegration(nt, sampling=dt, halfcurrent=True)

yana = -np.cos(t) + np.cos(t[0])
y = Cop * x
xinv = Cop / y

# Numerical derivative
Dop = pylops.FirstDerivative(nt, sampling=dt)
xder = Dop * y

# Visualize data and inversion
fig, axs = plt.subplots(1, 2, figsize=(18, 5))
axs[0].plot(t, yana, 'r', lw=5, label='analytic integration')
axs[0].plot(t, y, '--g', lw=3, label='numerical integration')
axs[0].legend()
axs[0].set_title('Causal integration')

axs[1].plot(t, x, 'k', lw=8, label='original')
axs[1].plot(t[1:-1], xder[1:-1], 'r', lw=5, label='numerical')
axs[1].plot(t, xinv, '--g', lw=3, label='inverted')
axs[1].legend()
axs[1].set_title('Inverse causal integration = Derivative')
Exemple #8
0
def FirstDerivative(size: int, shape: Optional[tuple] = None, axis: int = 0, step: float = 1.0, edge: bool = True,
                    dtype: str = 'float64', kind: str = 'forward') -> PyLopLinearOperator:
    r"""
    First derivative.

    *This docstring was adapted from ``pylops.FirstDerivative``.*

    Approximates the first derivative of a multi-dimensional array along a specific ``axis`` using finite-differences.

    Parameters
    ----------
    size: int
        Size of the input array.
    shape: tuple
        Shape of the input array.
    axis: int
        Axis along which to differentiate.
    step: float
        Step size.
    edge: bool
        For ``kind='centered'``, use reduced order derivative at edges (``True``) or ignore them (``False``).
    dtype: str
        Type of elements in input array.
    kind: str
        Derivative kind (``forward``, ``centered``, or ``backward``).

    Returns
    -------
    :py:class:`~pycsou.linop.base.PyLopLinearOperator`
        First derivative operator.

    Raises
    ------
    ValueError
        If ``shape`` and ``size`` are not compatible.
    NotImplementedError
        If ``kind`` is not one of: ``forward``, ``centered``, or ``backward``.

    Examples
    --------

    .. testsetup::

       import numpy as np
       from pycsou.linop.diff import FirstDerivative

    .. doctest::

       >>> x = np.repeat([0,2,1,3,0,2,0], 10)
       >>> Dop = FirstDerivative(size=x.size)
       >>> y = Dop * x
       >>> np.sum(np.abs(y) > 0)
       6
       >>> np.allclose(y, np.diff(x, append=0))
       True

    .. plot::

       import numpy as np
       import matplotlib.pyplot as plt
       from pycsou.linop.diff import FirstDerivative

       x = np.repeat([0,2,1,3,0,2,0], 10)
       Dop_bwd = FirstDerivative(size=x.size, kind='backward')
       Dop_fwd = FirstDerivative(size=x.size, kind='forward')
       Dop_cent = FirstDerivative(size=x.size, kind='centered')
       y_bwd = Dop_bwd * x
       y_cent = Dop_cent * x
       y_fwd = Dop_fwd * x
       plt.figure()
       plt.plot(np.arange(x.size), x)
       plt.plot(np.arange(x.size), y_bwd)
       plt.plot(np.arange(x.size), y_cent)
       plt.plot(np.arange(x.size), y_fwd)
       plt.legend(['Signal', 'Backward', 'Centered', 'Forward'])
       plt.title('First derivative')
       plt.show()

    Notes
    -----
    The ``FirstDerivative`` operator applies a first derivative along a given axis
    of a multi-dimensional array using either a *second-order centered stencil* or *first-order forward/backward stencils*.

    For simplicity, given a one dimensional array, the second-order centered
    first derivative is:

    .. math::
        y[i] = (0.5x[i+1] - 0.5x[i-1]) / \text{step}

    while the first-order forward stencil is:

    .. math::
        y[i] = (x[i+1] - x[i]) / \text{step}

    and the first-order backward stencil is:

    .. math::
        y[i] = (x[i] - x[i-1]) / \text{step}.

    See Also
    --------
    :py:func:`~pycsou.linop.diff.SecondDerivative`, :py:func:`~pycsou.linop.diff.GeneralisedDerivative`

    """
    first_derivative = pylops.FirstDerivative(N=size, dims=shape, dir=axis, sampling=step, edge=edge, dtype=dtype,
                                              kind=kind)
    return PyLopLinearOperator(first_derivative)
Exemple #9
0
###############################################################################
# We can also use :py:class:`pylops.Kronecker` to do something more
# interesting. Any operator can in fact be applied on a single direction of a
# multi-dimensional input array if combined with an :py:class:`pylops.Identity`
# operator via Kronecker product. We apply here the
# :py:class:`pylops.FirstDerivative` to the second dimension of the model.
#
# Note that for those operators whose implementation allows their application
# to a single axis via the ``dir`` parameter, using the Kronecker product
# would lead to slower performance. Nevertheless, the Kronecker product allows
# any other operator to be applied to a single dimension.
Nv, Nh = 11, 21

Iop = pylops.Identity(Nv, dtype="float32")
D2hop = pylops.FirstDerivative(Nh, dtype="float32")

X = np.zeros((Nv, Nh))
X[Nv // 2, Nh // 2] = 1
D2hop = pylops.Kronecker(Iop, D2hop)

Y = D2hop * X.ravel()
Y = Y.reshape(Nv, Nh)

fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Kronecker", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
Exemple #10
0
axs[0].axis("tight")
axs[1].imshow(y.T, cmap="gray")
axs[1].set_title("Data")
axs[1].axis("tight")
axs[2].imshow(xrec.T, cmap="gray")
axs[2].set_title("Adjoint model")
axs[2].axis("tight")
fig.tight_layout()


###############################################################################
# Finally we take advantage of our different solvers and try to invert the
# modelling operator both in a least-squares sense and using TV-reg.
Dop = [
    pylops.FirstDerivative(
        ny * nx, dims=(nx, ny), dir=0, edge=True, kind="backward", dtype=np.float64
    ),
    pylops.FirstDerivative(
        ny * nx, dims=(nx, ny), dir=1, edge=True, kind="backward", dtype=np.float64
    ),
]
D2op = pylops.Laplacian(dims=(nx, ny), edge=True, dtype=np.float64)

# L2
xinv_sm = pylops.optimization.leastsquares.RegularizedInversion(
    RLop.H, [D2op], y.ravel(), epsRs=[1e1], **dict(iter_lim=20)
)
xinv_sm = np.real(xinv_sm.reshape(nx, ny))

# TV
mu = 1.5
Exemple #11
0
dt = -1 * np.cumsum(dv) * sampt

base = np.convolve(rick, refl, 'same')

mon = estimateResult(base, dt, t, 'dt', sampt, rickD, halfalpha)

plt.plot(t, base)
plt.plot(t, mon)
plt.legend(['base', 'mon'])
plt.show()
########## finish creating input and output
#########################################################################

# # Create regularization operator
D2op = pylops.SecondDerivative(len(base), dims=None, dtype='float64')
Dop = pylops.FirstDerivative(len(base), dims=None, dtype='float64')
eye = sparse.spdiags(np.ones(len(t)), 0, len(t), len(t))

model_type = 'dt'
regs = [D2op, Dop]
# regularization coefficients
reg_coeffs = [1000, 1000]

# run warping
m = warpingInversion(t,
                     base,
                     mon,
                     rick,
                     halfalpha,
                     model_type,
                     Nepochs=51,
Exemple #12
0
xinv = \
    pylops.optimization.leastsquares.RegularizedInversion(Iop, [D2op], y,
                                                          epsRs=[np.sqrt(lamda/2)],
                                                          **dict(iter_lim=30))

plt.figure(figsize=(10, 5))
plt.plot(x, 'k', lw=3, label='x')
plt.plot(y, '.k', label='y=x+n')
plt.plot(xinv, 'r', lw=5, label='xinv')
plt.legend()
plt.title('L2 inversion')

###############################################################################
# Now we impose blockiness in the solution using the Split Bregman solver
Dop = pylops.FirstDerivative(nx, edge=True)
mu = 0.01
lamda = 0.3
niter_out = 50
niter_in = 3

xinv, niter = \
    pylops.optimization.sparsity.SplitBregman(Iop, [Dop], y, niter_out,
                                              niter_in, mu=mu, epsRL1s=[lamda],
                                              tol=1e-4, tau=1.,
                                              **dict(iter_lim=30, damp=1e-10))

plt.figure(figsize=(10, 5))
plt.plot(x, 'k', lw=3, label='x')
plt.plot(y, '.k', label='y=x+n')
plt.plot(xinv, 'r', lw=5, label='xinv')