Example #1
0
axs[2].imshow(dn, cmap='gray', extent=(theta[0], theta[-1], t0[-1], t0[0]),
              vmin=-0.1, vmax=0.1)
axs[2].axis('tight')
axs[2].set_title('Noisy Data with zero-phase wavelet', fontsize=10)
axs[2].set_xlabel(r'$\Theta$')

###############################################################################
# We can invert the data. First we will consider noise-free data, subsequently
# we will add some noise and add a regularization terms in the inversion process to obtain
# a well-behaved wavelet also under noise conditions.
wav_est = Wavesop / d.T.flatten()
wav_phase_est = Wavesop_phase / d_phase.T.flatten()
wavn_est = Wavesop / dn.T.flatten()

# Create regularization operator
D2op = lops.SecondDerivative(ntwav, dtype='float64')

# Invert for interpolated signal
wavn_reg_est, istop, itn, r1norm, r2norm = \
    lops.optimization.leastsquares.RegularizedInversion(Wavesop, [D2op], dn.T.flatten(),
                                                        epsRs=[np.sqrt(0.1)], returninfo=True,
                                                        **dict(damp=np.sqrt(1e-4),
                                                               iter_lim=200, show=0))

###############################################################################
# Finally, we visualize the retrieved wavelets and compare with the true wavelet.

# sphinx_gallery_thumbnail_number = 3
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(8, 6))
axs[0].plot(wav, 'k', lw=6, label='True')
axs[0].plot(wav_est, '--r', lw=4, label='Estimated (noise-free)')
Example #2
0
These operators allow for different combinations of multiple linear operators in
a single operator. Such functionalities are used within PyLops as the basis for
the creation of complex operators as well as in the definition of various types
of optimization problems with regularization or preceonditioning.
"""
import numpy as np
import matplotlib.pyplot as plt

import lops

plt.close('all')

###############################################################################
# Let's start by defining two second derivatives :py:class:`lops.SecondDerivative`
# that we will be using in this example.
D2hop = lops.SecondDerivative(11 * 21, dims=[11, 21], dir=1, dtype='float32')
D2vop = lops.SecondDerivative(11 * 21, dims=[11, 21], dir=0, dtype='float32')

###############################################################################
# Chaining of operators represents the simplest concatenation that
# can be performed between two or more linear operators.
# This can be easily achieved using the ``*`` symbol
#
#    .. math::
#       \mathbf{D_{cat}}=  \mathbf{D_v} \mathbf{D_h}
Nv, Nh = 11, 21
X = np.zeros((Nv, Nh))
X[int(Nv / 2), int(Nh / 2)] = 1

D2op = D2vop * D2hop
Y = np.reshape(D2op * X.flatten(), (Nv, Nh))
Example #3
0
axs[0].axis('tight')
axs[0].set_title('x')
plt.colorbar(im, ax=axs[0])
im = ax.imshow(B, interpolation='nearest', cmap='rainbow')
axs[1].axis('tight')
axs[1].set_title('y')
plt.colorbar(im, ax=axs[0])
fig.tight_layout()

#############################################
# We can now do the same for the second derivative

A = np.zeros((11, 21))
A[5, 10] = 1

D1op = lops.SecondDerivative(nx * ny, dims=(nx, ny), dir=0, dtype='float64')
B = np.reshape(D1op * np.ndarray.flatten(A), (nx, ny))

fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle('Second Derivative in 1st direction',
             fontsize=12,
             fontweight='bold')
im = axs[0].imshow(A, interpolation='nearest', cmap='rainbow')
axs[0].axis('tight')
axs[0].set_title('x')
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation='nearest', cmap='rainbow')
axs[1].axis('tight')
axs[1].set_title('y')
plt.colorbar(im, ax=axs[1])
Example #4
0
# solve is highly ill-posed and requires some prior knowledge from the user.
#
# We will now see how to add prior information to the inverse process in the
# form of regularization (or preconditioning). This can be done in two different ways
#
# * regularization via :py:func:`lops.optimization.leastsquares.NormalEquationsInversion` or
#   :py:func:`lops.optimization.leastsquares.RegularizedInversion`)
# * preconditioning via :py:func:`lops.optimization.leastsquares.PreconditionedInversion`
#
# Let's start by regularizing the normal equations using a second derivative operator
#
#   .. math::
#       \mathbf{x} = (\mathbf{R^TR}+\epsilon_\nabla^2\nabla^T\nabla)^{-1} \mathbf{R^Ty}

# Create regularization operator
D2op = lops.SecondDerivative(N, dims=None, dtype='float64')

# Regularized inversion
epsR = np.sqrt(0.1)
epsI = np.sqrt(1e-4)

xne = lops.optimization.leastsquares.NormalEquationsInversion(Rop, [D2op], y, epsI=epsI,
                                                              epsRs=[epsR], returninfo=False,
                                                              **dict(maxiter=50))

###############################################################################
# We can do the same while using :py:func:`lops.optimization.leastsquares.RegularizedInversion`
# which solves the following augmented problem
#
#   .. math::
#       \begin{bmatrix}