예제 #1
0
axs[0].axis('tight')
axs[0].set_title('x')
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation='nearest', cmap='rainbow')
axs[1].axis('tight')
axs[1].set_title('y')
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)

###############################################################################
# We can now do the same for the second derivative
A = np.zeros((nx, ny))
A[nx//2, ny//2] = 1.

D2op = pylops.SecondDerivative(nx * ny, dims=(nx, ny), dir=0, dtype='float64')
B = np.reshape(D2op * A.flatten(), (nx, ny))

fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle('Second Derivative in 1st direction', fontsize=12,
             fontweight='bold', y=0.95)
im = axs[0].imshow(A, interpolation='nearest', cmap='rainbow')
axs[0].axis('tight')
axs[0].set_title('x')
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation='nearest', cmap='rainbow')
axs[1].axis('tight')
axs[1].set_title('y')
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
예제 #2
0
Iop = pylops.Identity(nx)

n = np.random.normal(0, 1, nx)
y = Iop * (x + n)

plt.figure(figsize=(10, 5))
plt.plot(x, "k", lw=3, label="x")
plt.plot(y, ".k", label="y=x+n")
plt.legend()
plt.title("Model and data")

###############################################################################
# To start we will try to use a simple L2 regularization that enforces
# smoothness in the solution. We can see how denoising is succesfully achieved
# but the solution is much smoother than we wish for.
D2op = pylops.SecondDerivative(nx, edge=True)
lamda = 1e2

xinv = pylops.optimization.leastsquares.RegularizedInversion(
    Iop, [D2op], y, epsRs=[np.sqrt(lamda / 2)], **dict(iter_lim=30))

plt.figure(figsize=(10, 5))
plt.plot(x, "k", lw=3, label="x")
plt.plot(y, ".k", label="y=x+n")
plt.plot(xinv, "r", lw=5, label="xinv")
plt.legend()
plt.title("L2 inversion")

###############################################################################
# Now we impose blockiness in the solution using the Split Bregman solver
Dop = pylops.FirstDerivative(nx, edge=True, kind="backward")
예제 #3
0
# form of regularization (or preconditioning). This can be done in two
# different ways
#
# * regularization via :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion`
#   or :py:func:`pylops.optimization.leastsquares.RegularizedInversion`)
# * preconditioning via :py:func:`pylops.optimization.leastsquares.PreconditionedInversion`
#
# Let's start by regularizing the normal equations using a second
# derivative operator
#
#   .. math::
#       \mathbf{x} = (\mathbf{R^TR}+\epsilon_\nabla^2\nabla^T\nabla)^{-1}
#                    \mathbf{R^Ty}

# Create regularization operator
D2op = pylops.SecondDerivative(N, dims=None, dtype='float64')

# Regularized inversion
epsR = np.sqrt(0.1)
epsI = np.sqrt(1e-4)

xne = \
    pylops.optimization.leastsquares.NormalEquationsInversion(Rop, [D2op], y,
                                                              epsI=epsI,
                                                              epsRs=[epsR],
                                                              returninfo=False,
                                                              **dict(maxiter=50))

###############################################################################
# Note that in case we have access to a fast implementation for the chain of
# forward and adjoint for the regularization operator
axs[1].legend()
axs[1].set_title('Inverse causal integration = Derivative')

###############################################################################
# As expected we obtain the same result. Let's see what happens if we now
# add some random noise to our data.

# Add noise
yn = y + np.random.normal(0, 4e-1, y.shape)

# Numerical derivative
Dop = pylops.FirstDerivative(nt, sampling=dt)
xder = Dop * yn

# Regularized derivative
Rop = pylops.SecondDerivative(nt)
xreg = pylops.RegularizedInversion(Cop, [Rop],
                                   yn,
                                   epsRs=[1e0],
                                   **dict(iter_lim=100, atol=1e-5))

# Preconditioned derivative
Sop = pylops.Smoothing1D(41, nt)
xp = pylops.PreconditionedInversion(Cop, Sop, yn, **dict(iter_lim=10,
                                                         atol=1e-3))

# Visualize data and inversion
fig, axs = plt.subplots(1, 2, figsize=(18, 5))
axs[0].plot(t, y, 'k', LineWidth=3, label='data')
axs[0].plot(t, yn, '--g', LineWidth=3, label='noisy data')
axs[0].legend()
예제 #5
0
axs[2].imshow(dn, cmap='gray', extent=(theta[0], theta[-1], t0[-1], t0[0]),
              vmin=-0.1, vmax=0.1)
axs[2].axis('tight')
axs[2].set_title('Noisy Data with zero-phase wavelet', fontsize=10)
axs[2].set_xlabel(r'$\Theta$')

###############################################################################
# We can invert the data. First we will consider noise-free data, subsequently
# we will add some noise and add a regularization terms in the inversion process to obtain
# a well-behaved wavelet also under noise conditions.
wav_est = Wavesop / d.T.flatten()
wav_phase_est = Wavesop_phase / d_phase.T.flatten()
wavn_est = Wavesop / dn.T.flatten()

# Create regularization operator
D2op = pylops.SecondDerivative(ntwav, dtype='float64')

# Invert for interpolated signal
wavn_reg_est, istop, itn, r1norm, r2norm = \
    pylops.optimization.leastsquares.RegularizedInversion(Wavesop, [D2op], dn.T.flatten(),
                                                          epsRs=[np.sqrt(0.1)], returninfo=True,
                                                          **dict(damp=np.sqrt(1e-4),
                                                               iter_lim=200, show=0))

###############################################################################
# Finally, we visualize the retrieved wavelets and compare with the true wavelet.

# sphinx_gallery_thumbnail_number = 3
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(8, 6))
axs[0].plot(wav, 'k', lw=6, label='True')
axs[0].plot(wav_est, '--r', lw=4, label='Estimated (noise-free)')
예제 #6
0
# * regularized inversion with second derivative along the spatial axis
#
#   .. math::
#        J = ||\mathbf{y} - \mathbf{R} \mathbf{x}||_2 +
#        \epsilon_\nabla ^2 ||\nabla \mathbf{x}||_2
#
# * sparsity-promoting inversion with :py:class:`pylops.FFT2` operator used
#   as sparsyfing transform
#
#   .. math::
#        J = ||\mathbf{y} - \mathbf{R} \mathbf{F}^H \mathbf{x}||_2 +
#        \epsilon ||\mathbf{F}^H \mathbf{x}||_1

# smooth inversion
D2op = pylops.SecondDerivative(par['nx'] * par['nt'],
                               dims=(par['nx'], par['nt']),
                               dir=0,
                               dtype='float64')

xsmooth, _, _ = \
    pylops.waveeqprocessing.SeismicInterpolation(y, par['nx'], iava,
                                                 kind='spatial',
                                                 **dict(epsRs=[np.sqrt(0.1)],
                                                        damp=np.sqrt(1e-4),
                                                        iter_lim=50, show=0))

# sparse inversion with FFT2
nfft = 2**8
FFTop = pylops.signalprocessing.FFT2D(dims=[par['nx'], par['nt']],
                                      nffts=[nfft, nfft],
                                      sampling=[par['dx'], par['dt']])
X = FFTop * x.flatten()
예제 #7
0
# * regularized inversion with second derivative along the spatial axis
#
#   .. math::
#        J = \|\mathbf{y} - \mathbf{R} \mathbf{x}\|_2 +
#        \epsilon_\nabla ^2 \|\nabla \mathbf{x}\|_2
#
# * sparsity-promoting inversion with :py:class:`pylops.FFT2` operator used
#   as sparsyfing transform
#
#   .. math::
#        J = \|\mathbf{y} - \mathbf{R} \mathbf{F}^H \mathbf{x}\|_2 +
#        \epsilon \|\mathbf{F}^H \mathbf{x}\|_1

# smooth inversion
D2op = pylops.SecondDerivative(par["nx"] * par["nt"],
                               dims=(par["nx"], par["nt"]),
                               dir=0,
                               dtype="float64")

xsmooth, _, _ = pylops.waveeqprocessing.SeismicInterpolation(
    y,
    par["nx"],
    iava,
    kind="spatial",
    **dict(epsRs=[np.sqrt(0.1)], damp=np.sqrt(1e-4), iter_lim=50, show=0))

# sparse inversion with FFT2
nfft = 2**8
FFTop = pylops.signalprocessing.FFT2D(dims=[par["nx"], par["nt"]],
                                      nffts=[nfft, nfft],
                                      sampling=[par["dx"], par["dt"]])
X = FFTop * x.ravel()
예제 #8
0
def SecondDerivative(size: int, shape: Optional[tuple] = None, axis: int = 0, step: float = 1.0, edge: bool = True,
                     dtype: str = 'float64') -> PyLopLinearOperator:
    r"""
    Second derivative.

    *This docstring was adapted from ``pylops.SecondDerivative``.*

    Approximates the second derivative of a multi-dimensional array along a specific ``axis`` using finite-differences.

    Parameters
    ----------
    size: int
        Size of the input array.
    shape: tuple
        Shape of the input array.
    axis: int
        Axis along which to differentiate.
    step: float
        Step size.
    edge: bool
        Use reduced order derivative at edges (``True``) or ignore them (``False``).
    dtype: str
        Type of elements in input array.

    Returns
    -------
    :py:class:`~pycsou.linop.base.PyLopLinearOperator`
        Second derivative operator.

    Raises
    ------
    ValueError
        If ``shape`` and ``size`` are not compatible.

    Examples
    --------

    .. testsetup::

       import numpy as np
       from pycsou.linop.diff import SecondDerivative

    .. doctest::

       >>> x = np.linspace(-2.5, 2.5, 100)
       >>> z = np.piecewise(x, [x < -1, (x >= - 1) * (x<0), x>=0], [lambda x: -x, lambda x: 3 * x + 4, lambda x: -0.5 * x + 4])
       >>> Dop = SecondDerivative(size=x.size)
       >>> y = Dop * z


    .. plot::

       import numpy as np
       import matplotlib.pyplot as plt
       from pycsou.linop.diff import SecondDerivative

       x = np.linspace(-2.5, 2.5, 200)
       z = np.piecewise(x, [x < -1, (x >= - 1) * (x<0), x>=0], [lambda x: -x, lambda x: 3 * x + 4, lambda x: -0.5 * x + 4])
       Dop = SecondDerivative(size=x.size)
       y = Dop * z
       plt.figure()
       plt.plot(np.arange(x.size), z)
       plt.title('Signal')
       plt.show()
       plt.figure()
       plt.plot(np.arange(x.size), y)
       plt.title('Second Derivative')
       plt.show()

    Notes
    -----
    The ``SecondDerivative`` operator applies a second derivative to any chosen
    direction of a multi-dimensional array.

    For simplicity, given a one dimensional array, the second-order centered
    second derivative is given by:

    .. math::
        y[i] = (x[i+1] - 2x[i] + x[i-1]) / \text{step}^2.

    See Also
    --------
    :py:func:`~pycsou.linop.diff.FirstDerivative`, :py:func:`~pycsou.linop.diff.GeneralisedDerivative`

    """
    return PyLopLinearOperator(
        pylops.SecondDerivative(N=size, dims=shape, dir=axis, sampling=step, edge=edge, dtype=dtype))
예제 #9
0
Some of this operators naturally lend to embarassingly parallel computations.
Within PyLops we leverage the multiprocessing module to run multiple processes
at the same time evaluating a subset of the operators involved in one of the
stacking operations.
"""
import matplotlib.pyplot as plt
import numpy as np

import pylops

plt.close("all")

###############################################################################
# Let's start by defining two second derivatives :py:class:`pylops.SecondDerivative`
# that we will be using in this example.
D2hop = pylops.SecondDerivative(11 * 21, dims=[11, 21], dir=1, dtype="float32")
D2vop = pylops.SecondDerivative(11 * 21, dims=[11, 21], dir=0, dtype="float32")

###############################################################################
# Chaining of operators represents the simplest concatenation that
# can be performed between two or more linear operators.
# This can be easily achieved using the ``*`` symbol
#
#    .. math::
#       \mathbf{D_{cat}}=  \mathbf{D_v} \mathbf{D_h}
Nv, Nh = 11, 21
X = np.zeros((Nv, Nh))
X[int(Nv / 2), int(Nh / 2)] = 1

D2op = D2vop * D2hop
Y = np.reshape(D2op * X.ravel(), (Nv, Nh))
예제 #10
0
dv[700:750] = 0.01
dt = -1 * np.cumsum(dv) * sampt

base = np.convolve(rick, refl, 'same')

mon = estimateResult(base, dt, t, 'dt', sampt, rickD, halfalpha)

plt.plot(t, base)
plt.plot(t, mon)
plt.legend(['base', 'mon'])
plt.show()
########## finish creating input and output
#########################################################################

# # Create regularization operator
D2op = pylops.SecondDerivative(len(base), dims=None, dtype='float64')
Dop = pylops.FirstDerivative(len(base), dims=None, dtype='float64')
eye = sparse.spdiags(np.ones(len(t)), 0, len(t), len(t))

model_type = 'dt'
regs = [D2op, Dop]
# regularization coefficients
reg_coeffs = [1000, 1000]

# run warping
m = warpingInversion(t,
                     base,
                     mon,
                     rick,
                     halfalpha,
                     model_type,