Пример #1
0
def PrestackInversion(data,
                      theta,
                      wav,
                      m0=None,
                      linearization='akirich',
                      explicit=False,
                      simultaneous=False,
                      epsI=None,
                      epsR=None,
                      dottest=False,
                      returnres=False,
                      epsRL1=None,
                      kind='centered',
                      **kwargs_solver):
    r"""Pre-stack linearized seismic inversion.

    Invert pre-stack seismic operator to retrieve a set of elastic property
    profiles from band-limited seismic pre-stack data (i.e., angle gathers).
    Depending on the choice of input parameters, inversion can be
    trace-by-trace with explicit operator or global with either
    explicit or linear operator.

    Parameters
    ----------
    data : :obj:`np.ndarray`
        Band-limited seismic post-stack data of size
        :math:`[(n_{lins} \times) n_{t0} \times n_{\theta} (\times n_x \times n_y)]`
    theta : :obj:`np.ndarray`
        Incident angles in degrees
    wav : :obj:`np.ndarray`
        Wavelet in time domain (must had odd number of elements
        and centered to zero)
    m0 : :obj:`np.ndarray`, optional
        Background model of size :math:`[n_{t0} \times n_{m}
        (\times n_x \times n_y)]`
    linearization : :obj:`str` or :obj:`list`, optional
        choice of linearization, ``akirich``: Aki-Richards, ``fatti``: Fatti
        ``PS``: PS or a combination of them (required only when ``m0``
        is ``None``).
    explicit : :obj:`bool`, optional
        Create a chained linear operator (``False``, preferred for large data)
        or a ``MatrixMult`` linear operator with dense matrix
        (``True``, preferred for small data)
    simultaneous : :obj:`bool`, optional
        Simultaneously invert entire data (``True``) or invert
        trace-by-trace (``False``) when using ``explicit`` operator
        (note that the entire data is always inverted when working
        with linear operator)
    epsI : :obj:`float` or :obj:`list`, optional
        Damping factor(s) for Tikhonov regularization term. If a list of
        :math:`n_{m}` elements is provided, the regularization term will have
        different strenght for each elastic property
    epsR : :obj:`float`, optional
        Damping factor for additional Laplacian regularization term
    dottest : :obj:`bool`, optional
        Apply dot-test
    returnres : :obj:`bool`, optional
        Return residuals
    epsRL1 : :obj:`float`, optional
        Damping factor for additional blockiness regularization term
    kind : :obj:`str`, optional
        Derivative kind (``forward`` or ``centered``).
    **kwargs_solver
        Arbitrary keyword arguments for :py:func:`scipy.linalg.lstsq`
        solver (if ``explicit=True`` and  ``epsR=None``)
        or :py:func:`scipy.sparse.linalg.lsqr` solver (if ``explicit=False``
        and/or ``epsR`` is not ``None``))

    Returns
    -------
    minv : :obj:`np.ndarray`
        Inverted model of size :math:`[n_{t0} \times n_{m}
        (\times n_x \times n_y)]`
    datar : :obj:`np.ndarray`
        Residual data (i.e., data - background data) of
        size :math:`[n_{t0} \times n_{\theta} (\times n_x \times n_y)]`

    Notes
    -----
    The different choices of cost functions and solvers used in the
    seismic pre-stack inversion module follow the same convention of the
    seismic post-stack inversion module.

    Refer to :py:func:`pylops.avo.poststack.PoststackInversion` for
    more details.
    """
    ncp = get_array_module(data)

    # find out dimensions
    if m0 is None and linearization is None:
        raise NotImplementedError('either m0 or linearization '
                                  'must be provided')
    elif m0 is None:
        if isinstance(linearization, str):
            nm = _linearizations[linearization]
        else:
            nm = _linearizations[linearization[0]]
    else:
        nm = m0.shape[1]

    data_shape = data.shape
    data_ndim = data.ndim
    n_lins = 1
    multi = 0
    if not isinstance(linearization, str):
        n_lins = data_shape[0]
        data_shape = data_shape[1:]
        data_ndim -= 1
        multi = 1

    if data_ndim == 2:
        dims = 1
        nt0, ntheta = data_shape
        nspat = None
        nspatprod = nx = 1
    elif data_ndim == 3:
        dims = 2
        nt0, ntheta, nx = data_shape
        nspat = (nx, )
        nspatprod = nx
    else:
        dims = 3
        nt0, ntheta, nx, ny = data_shape
        nspat = (nx, ny)
        nspatprod = nx * ny
        data = data.reshape(nt0, ntheta, nspatprod)

    # check if background model and data have same shape
    if m0 is not None:
        if nt0 != m0.shape[0] or\
        (dims >= 2 and nx != m0.shape[2]) or\
        (dims == 3 and ny != m0.shape[3]):
            raise ValueError('data and m0 must have same time and space axes')

    # create operator
    if isinstance(linearization, str):
        # single operator
        PPop = PrestackLinearModelling(wav,
                                       theta,
                                       nt0=nt0,
                                       spatdims=nspat,
                                       linearization=linearization,
                                       explicit=explicit,
                                       kind=kind)
    else:
        # multiple operators
        if not isinstance(wav, (list, tuple)):
            wav = [
                wav,
            ] * n_lins
        PPop = [
            PrestackLinearModelling(w,
                                    theta,
                                    nt0=nt0,
                                    spatdims=nspat,
                                    linearization=lin,
                                    explicit=explicit)
            for w, lin in zip(wav, linearization)
        ]
        if explicit:
            PPop = MatrixMult(np.vstack([Op.A for Op in PPop]),
                              dims=nspat,
                              dtype=PPop[0].A.dtype)
        else:
            PPop = VStack(PPop)

    if dottest:
        Dottest(PPop,
                n_lins * nt0 * ntheta * nspatprod,
                nt0 * nm * nspatprod,
                raiseerror=True,
                verb=True,
                backend=get_module_name(ncp))

    # swap axes for explicit operator
    if explicit:
        data = data.swapaxes(0 + multi, 1 + multi)
        if m0 is not None:
            m0 = m0.swapaxes(0, 1)

    # invert model
    if epsR is None:
        # create and remove background data from original data
        datar = data.flatten() if m0 is None else \
            data.flatten() - PPop * m0.flatten()
        # inversion without spatial regularization
        if explicit:
            if epsI is None and not simultaneous:
                # solve unregularized equations indipendently trace-by-trace
                minv = \
                    get_lstsq(data)(PPop.A,
                                    datar.reshape(n_lins*nt0*ntheta, nspatprod).squeeze(),
                                    **kwargs_solver)[0]
            elif epsI is None and simultaneous:
                # solve unregularized equations simultaneously
                if ncp == np:
                    minv = lsqr(PPop, datar, **kwargs_solver)[0]
                else:
                    minv = cgls(PPop,
                                datar,
                                x0=ncp.zeros(int(PPop.shape[1]), PPop.dtype),
                                **kwargs_solver)[0]
            elif epsI is not None:
                # create regularized normal equations
                PP = ncp.dot(PPop.A.T, PPop.A) + \
                     epsI * ncp.eye(nt0*nm, dtype=PPop.A.dtype)
                datarn = np.dot(PPop.A.T,
                                datar.reshape(nt0 * ntheta, nspatprod))
                if not simultaneous:
                    # solve regularized normal eqs. trace-by-trace
                    minv = get_lstsq(data)(PP, datarn, **kwargs_solver)[0]
                else:
                    # solve regularized normal equations simultaneously
                    PPop_reg = MatrixMult(PP, dims=nspatprod)
                    if ncp == np:
                        minv = lsqr(PPop_reg, datarn.ravel(),
                                    **kwargs_solver)[0]
                    else:
                        minv = cgls(PPop_reg,
                                    datarn.ravel(),
                                    x0=ncp.zeros(int(PPop_reg.shape[1]),
                                                 PPop_reg.dtype),
                                    **kwargs_solver)[0]
            #else:
            #    # create regularized normal eqs. and solve them simultaneously
            #    PP = np.dot(PPop.A.T, PPop.A) + epsI * np.eye(nt0*nm)
            #    datarn = PPop.A.T * datar.reshape(nt0*ntheta, nspatprod)
            #    PPop_reg = MatrixMult(PP, dims=ntheta*nspatprod)
            #    minv = lstsq(PPop_reg, datarn.flatten(), **kwargs_solver)[0]
        else:
            # solve unregularized normal equations simultaneously with lop
            if ncp == np:
                minv = lsqr(PPop, datar, **kwargs_solver)[0]
            else:
                minv = cgls(PPop,
                            datar,
                            x0=ncp.zeros(int(PPop.shape[1]), PPop.dtype),
                            **kwargs_solver)[0]
    else:
        # Create Thicknov regularization
        if epsI is not None:
            if isinstance(epsI, (list, tuple)):
                if len(epsI) != nm:
                    raise ValueError('epsI must be a scalar or a list of'
                                     'size nm')
                RegI = Diagonal(np.array(epsI),
                                dims=(nt0, nm, nspatprod),
                                dir=1)
            else:
                RegI = epsI * Identity(nt0 * nm * nspatprod)

        if epsRL1 is None:
            # L2 inversion with spatial regularization
            if dims == 1:
                Regop = SecondDerivative(nt0 * nm,
                                         dtype=PPop.dtype,
                                         dims=(nt0, nm))
            elif dims == 2:
                Regop = Laplacian((nt0, nm, nx), dirs=(0, 2), dtype=PPop.dtype)
            else:
                Regop = Laplacian((nt0, nm, nx, ny),
                                  dirs=(2, 3),
                                  dtype=PPop.dtype)
            if epsI is None:
                Regop = (Regop, )
                epsR = (epsR, )
            else:
                Regop = (Regop, RegI)
                epsR = (epsR, 1)
            minv = \
                RegularizedInversion(PPop, Regop, data.ravel(),
                                     x0=m0.flatten() if m0 is not None
                                     else None, epsRs=epsR,
                                     returninfo=False, **kwargs_solver)
        else:
            # Blockiness-promoting inversion with spatial regularization
            if dims == 1:
                RegL1op = FirstDerivative(nt0 * nm, dtype=PPop.dtype)
                RegL2op = None
            elif dims == 2:
                RegL1op = FirstDerivative(nt0 * nx * nm,
                                          dims=(nt0, nm, nx),
                                          dir=0,
                                          dtype=PPop.dtype)
                RegL2op = SecondDerivative(nt0 * nx * nm,
                                           dims=(nt0, nm, nx),
                                           dir=2,
                                           dtype=PPop.dtype)
            else:
                RegL1op = FirstDerivative(nt0 * nx * ny * nm,
                                          dims=(nt0, nm, nx, ny),
                                          dir=0,
                                          dtype=PPop.dtype)
                RegL2op = Laplacian((nt0, nm, nx, ny),
                                    dirs=(2, 3),
                                    dtype=PPop.dtype)
            if dims == 1:
                if epsI is not None:
                    RegL2op = (RegI, )
                    epsR = (1, )
            else:
                if epsI is None:
                    RegL2op = (RegL2op, )
                    epsR = (epsR, )
                else:
                    RegL2op = (RegL2op, RegI)
                    epsR = (epsR, 1)
            epsRL1 = (epsRL1, )
            if 'mu' in kwargs_solver.keys():
                mu = kwargs_solver['mu']
                kwargs_solver.pop('mu')
            else:
                mu = 1.
            if 'niter_outer' in kwargs_solver.keys():
                niter_outer = kwargs_solver['niter_outer']
                kwargs_solver.pop('niter_outer')
            else:
                niter_outer = 3
            if 'niter_inner' in kwargs_solver.keys():
                niter_inner = kwargs_solver['niter_inner']
                kwargs_solver.pop('niter_inner')
            else:
                niter_inner = 5
            minv = SplitBregman(PPop, (RegL1op, ),
                                data.ravel(),
                                RegsL2=RegL2op,
                                epsRL1s=epsRL1,
                                epsRL2s=epsR,
                                mu=mu,
                                niter_outer=niter_outer,
                                niter_inner=niter_inner,
                                x0=None if m0 is None else m0.flatten(),
                                **kwargs_solver)[0]

    # compute residual
    if returnres:
        if epsR is None:
            datar -= PPop * minv.ravel()
        else:
            datar = data.ravel() - PPop * minv.ravel()

    # re-swap axes for explicit operator
    if explicit:
        if m0 is not None:
            m0 = m0.swapaxes(0, 1)

    # reshape inverted model and residual data
    if dims == 1:
        if explicit:
            minv = minv.reshape(nm, nt0).swapaxes(0, 1)
            if returnres:
                datar = datar.reshape(n_lins, ntheta, nt0).squeeze().swapaxes(
                    0 + multi, 1 + multi)
        else:
            minv = minv.reshape(nt0, nm)
            if returnres:
                datar = datar.reshape(n_lins, nt0, ntheta).squeeze()
    elif dims == 2:
        if explicit:
            minv = minv.reshape(nm, nt0, nx).swapaxes(0, 1)
            if returnres:
                datar = datar.reshape(n_lins, ntheta, nt0,
                                      nx).squeeze().swapaxes(
                                          0 + multi, 1 + multi)
        else:
            minv = minv.reshape(nt0, nm, nx)
            if returnres:
                datar = datar.reshape(n_lins, nt0, ntheta, nx).squeeze()
    else:
        if explicit:
            minv = minv.reshape(nm, nt0, nx, ny).swapaxes(0, 1)
            if returnres:
                datar = datar.reshape(n_lins, ntheta, nt0, nx,
                                      ny).squeeze().swapaxes(
                                          0 + multi, 1 + multi)
        else:
            minv = minv.reshape(nt0, nm, nx, ny)
            if returnres:
                datar = datar.reshape(n_lins, nt0, ntheta, nx, ny).squeeze()

    if m0 is not None and epsR is None:
        minv = minv + m0

    if returnres:
        return minv, datar
    else:
        return minv
Пример #2
0
def FISTA(Op, data, niter, eps=0.1, alpha=None, eigsiter=None, eigstol=0,
          tol=1e-10, returninfo=False, show=False, threshkind='soft',
          perc=None, callback=None):
    r"""Fast Iterative Shrinkage-Thresholding Algorithm (FISTA).

    Solve an optimization problem with :math:`L1` regularization function given
    the operator ``Op`` and data ``y``. The operator can be real or complex,
    and should ideally be either square :math:`N=M` or underdetermined
    :math:`N<M`.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    niter : :obj:`int`
        Number of iterations
    eps : :obj:`float`, optional
        Sparsity damping
    alpha : :obj:`float`, optional
        Step size (:math:`\alpha \le 1/\lambda_{max}(\mathbf{Op}^H\mathbf{Op})`
        guarantees convergence. If ``None``, the maximum eigenvalue is
        estimated and the optimal step size is chosen. If provided, the
        condition will not be checked internally).
    eigsiter : :obj:`int`, optional
        Number of iterations for eigenvalue estimation if ``alpha=None``
    eigstol : :obj:`float`, optional
        Tolerance for eigenvalue estimation if ``alpha=None``
    tol : :obj:`float`, optional
        Tolerance. Stop iterations if difference between inverted model
        at subsequent iterations is smaller than ``tol``
    returninfo : :obj:`bool`, optional
        Return info of FISTA solver
    show : :obj:`bool`, optional
        Display iterations log
    threshkind : :obj:`str`, optional
        Kind of thresholding ('hard', 'soft', 'half', 'soft-percentile', or
        'half-percentile' - 'soft' used as default)
    perc : :obj:`float`, optional
        Percentile, as percentage of values to be kept by thresholding (to be
        provided when thresholding is soft-percentile or half-percentile)
    callback : :obj:`callable`, optional
        Function with signature (``callback(x)``) to call after each iteration
        where ``x`` is the current model vector

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    niter : :obj:`int`
        Number of effective iterations
    cost : :obj:`numpy.ndarray`, optional
        History of cost function

    Raises
    ------
    NotImplementedError
        If ``threshkind`` is different from hard, soft, half, soft-percentile,
        or half-percentile
    ValueError
        If ``perc=None`` when ``threshkind`` is soft-percentile or
        half-percentile

    See Also
    --------
    OMP: Orthogonal Matching Pursuit (OMP).
    ISTA: Iterative Shrinkage-Thresholding Algorithm (ISTA).
    SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1).
    SplitBregman: Split Bregman for mixed L2-L1 norms.

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
        J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_2^2 +
            \epsilon ||\mathbf{x}||_p

    using the Fast Iterative Shrinkage-Thresholding Algorithm (FISTA) [1]_,
    where :math:`p=0, 1, 1/2`. This is a modified version of ISTA solver with
    improved convergence properties and limited additional computational cost.
    Similarly to the ISTA solver, the choice of the thresholding algorithm to
    apply at every iteration is based on the choice of :math:`p`.

    .. [1] Beck, A., and Teboulle, M., “A Fast Iterative Shrinkage-Thresholding
       Algorithm for Linear Inverse Problems”, SIAM Journal on
       Imaging Sciences, vol. 2, pp. 183-202. 2009.

    """
    if not threshkind in ['hard', 'soft', 'half', 'hard-percentile',
                          'soft-percentile', 'half-percentile']:
        raise NotImplementedError('threshkind should be hard, soft, half,'
                                  'hard-percentile, soft-percentile, '
                                  'or half-percentile')
    if threshkind in ['hard-percentile',
                      'soft-percentile',
                      'half-percentile'] and perc is None:
        raise ValueError('Provide a percentile when choosing hard-percentile,'
                         'soft-percentile, or half-percentile thresholding')

    # choose thresholding function
    if threshkind == 'soft':
        threshf = _softthreshold
    elif threshkind == 'hard':
        threshf = _hardthreshold
    elif threshkind == 'half':
        threshf = _halfthreshold
    elif threshkind == 'hard-percentile':
        threshf = _hardthreshold_percentile
    elif threshkind == 'soft-percentile':
        threshf = _softthreshold_percentile
    else:
        threshf = _halfthreshold_percentile

    # identify backend to use
    ncp = get_array_module(data)

    if show:
        tstart = time.time()
        print('FISTA optimization (%s thresholding)\n'
              '-----------------------------------------------------------\n'
              'The Operator Op has %d rows and %d cols\n'
              'eps = %10e\ttol = %10e\tniter = %d' % (threshkind,
                                                      Op.shape[0],
                                                      Op.shape[1],
                                                      eps, tol, niter))
    # step size
    if alpha is None:
        if not isinstance(Op, LinearOperator):
            Op = LinearOperator(Op, explicit=False)
        # compute largest eigenvalues of Op^H * Op
        Op1 = LinearOperator(Op.H * Op, explicit=False)
        if get_module_name(ncp) == 'numpy':
            maxeig = np.abs(Op1.eigs(neigs=1, symmetric=True, niter=eigsiter,
                                     **dict(tol=eigstol, which='LM')))[0]
        else:
            maxeig = np.abs(power_iteration(Op1, niter=eigsiter,
                                            tol=eigstol, dtype=Op1.dtype,
                                            backend='cupy')[0])
        alpha = 1. / maxeig

    # define threshold
    thresh = eps * alpha * 0.5

    if show:
        if perc is None:
            print('alpha = %10e\tthresh = %10e' % (alpha, thresh))
        else:
            print('alpha = %10e\tperc = %.1f' % (alpha, perc))
        print('-----------------------------------------------------------\n')
        head1 = '   Itn       x[0]        r2norm     r12norm     xupdate'
        print(head1)

    # initialize model and cost function
    xinv = ncp.zeros(int(Op.shape[1]), dtype=Op.dtype)
    zinv = xinv.copy()
    t = 1
    if returninfo:
        cost = np.zeros(niter + 1)

    # iterate
    for iiter in range(niter):
        xinvold = xinv.copy()

        # compute residual
        resz = data - Op.matvec(zinv)

        # compute gradient
        grad = alpha * Op.rmatvec(resz)

        # update inverted model
        xinv_unthesh = zinv + grad
        if perc is None:
            xinv = threshf(xinv_unthesh, thresh)
        else:
            xinv = threshf(xinv_unthesh, 100 - perc)

        # update auxiliary coefficients
        told = t
        t = (1. + np.sqrt(1. + 4. * t ** 2)) / 2.
        zinv = xinv + ((told - 1.) / t) * (xinv - xinvold)

        # model update
        xupdate = np.linalg.norm(xinv - xinvold)

        if returninfo or show:
            costdata = 0.5 * np.linalg.norm(data - Op.matvec(xinv)) ** 2
            costreg = eps * np.linalg.norm(xinv, ord=1)
        if returninfo:
            cost[iiter] = costdata + costreg

        # run callback
        if callback is not None:
            callback(xinv)

        if show:
            if iiter < 10 or niter - iiter < 10 or iiter % 10 == 0:
                msg = '%6g  %12.5e  %10.3e   %9.3e  %10.3e' % \
                      (iiter + 1, to_numpy(xinv[:2])[0], costdata,
                       costdata + costreg, xupdate)
                print(msg)

        # check tolerance
        if xupdate < tol:
            niter = iiter
            break

    # get values pre-threshold  at locations where xinv is different from zero
    # xinv = np.where(xinv != 0, xinv_unthesh, xinv)

    if show:
        print('\nIterations = %d        Total time (s) = %.2f'
              % (niter, time.time() - tstart))
        print('---------------------------------------------------------\n')
    if returninfo:
        return xinv, niter, cost[:niter]
    else:
        return xinv, niter
Пример #3
0
def MDD(
    G,
    d,
    dt=0.004,
    dr=1.0,
    nfmax=None,
    wav=None,
    twosided=True,
    causality_precond=False,
    adjoint=False,
    psf=False,
    dtype="float64",
    dottest=False,
    saveGt=True,
    add_negative=True,
    smooth_precond=0,
    fftengine="numpy",
    **kwargs_solver
):
    r"""Multi-dimensional deconvolution.

    Solve multi-dimensional deconvolution problem using
    :py:func:`scipy.sparse.linalg.lsqr` iterative solver.

    Parameters
    ----------
    G : :obj:`numpy.ndarray`
        Multi-dimensional convolution kernel in time domain of size
        :math:`[n_s \times n_r \times n_t]` for ``twosided=False`` or
        ``twosided=True`` and ``add_negative=True``
        (with only positive times) or size
        :math:`[n_s \times n_r \times 2n_t-1]` for ``twosided=True`` and
        ``add_negative=False``
        (with both positive and negative times)
    d : :obj:`numpy.ndarray`
        Data in time domain :math:`[n_s \,(\times n_{vs}) \times n_t]` if
        ``twosided=False`` or ``twosided=True`` and ``add_negative=True``
        (with only positive times) or size
        :math:`[n_s \,(\times n_{vs}) \times 2n_t-1]` if ``twosided=True``
    dt : :obj:`float`, optional
        Sampling of time integration axis
    dr : :obj:`float`, optional
        Sampling of receiver integration axis
    nfmax : :obj:`int`, optional
        Index of max frequency to include in deconvolution process
    wav : :obj:`numpy.ndarray`, optional
        Wavelet to convolve to the inverted model and psf
        (must be centered around its index in the middle of the array).
        If ``None``, the outputs of the inversion are returned directly.
    twosided : :obj:`bool`, optional
        MDC operator and data both negative and positive time (``True``)
        or only positive (``False``)
    add_negative : :obj:`bool`, optional
        Add negative side to MDC operator and data (``True``) or not
        (``False``)- operator and data are already provided with both positive
        and negative sides. To be used only with ``twosided=True``.
    causality_precond : :obj:`bool`, optional
        Apply causality mask (``True``) or not (``False``)
    smooth_precond : :obj:`int`, optional
        Lenght of smoothing to apply to causality preconditioner
    adjoint : :obj:`bool`, optional
        Compute and return adjoint(s)
    psf : :obj:`bool`, optional
        Compute and return Point Spread Function (PSF) and its inverse
    dtype : :obj:`bool`, optional
        Type of elements in input array.
    dottest : :obj:`bool`, optional
        Apply dot-test
    saveGt : :obj:`bool`, optional
        Save ``G`` and ``G.H`` to speed up the computation of adjoint of
        :class:`pylops.signalprocessing.Fredholm1` (``True``) or create
        ``G.H`` on-the-fly (``False``) Note that ``saveGt=True`` will be
        faster but double the amount of required memory
    fftengine : :obj:`str`, optional
        Engine used for fft computation (``numpy``, ``scipy`` or ``fftw``)
    **kwargs_solver
        Arbitrary keyword arguments for chosen solver
        (:py:func:`scipy.sparse.linalg.cg` and
        :py:func:`pylops.optimization.solver.cg` are used as default for numpy
        and cupy `data`, respectively)

    Returns
    -------
    minv : :obj:`numpy.ndarray`
        Inverted model of size :math:`[n_r \,(\times n_{vs}) \times n_t]`
        for ``twosided=False`` or
        :math:`[n_r \,(\times n_vs) \times 2n_t-1]` for ``twosided=True``
    madj : :obj:`numpy.ndarray`
        Adjoint model of size :math:`[n_r \,(\times n_{vs}) \times n_t]`
        for ``twosided=False`` or
        :math:`[n_r \,(\times n_r) \times 2n_t-1]` for ``twosided=True``
    psfinv : :obj:`numpy.ndarray`
        Inverted psf of size :math:`[n_r \times n_r \times n_t]`
        for ``twosided=False`` or
        :math:`[n_r \times n_r \times 2n_t-1]` for ``twosided=True``
    psfadj : :obj:`numpy.ndarray`
        Adjoint psf of size :math:`[n_r \times n_r \times n_t]`
        for ``twosided=False`` or
        :math:`[n_r \times n_r \times 2n_t-1]` for ``twosided=True``

    See Also
    --------
    MDC : Multi-dimensional convolution

    Notes
    -----
    Multi-dimensional deconvolution (MDD) is a mathematical ill-solved problem,
    well-known in the image processing and geophysical community [1]_.

    MDD aims at removing the effects of a Multi-dimensional Convolution
    (MDC) kernel or the so-called blurring operator or point-spread
    function (PSF) from a given data. It can be written as

    .. math::
        \mathbf{d}= \mathbf{D} \mathbf{m}

    or, equivalently, by means of its normal equation

    .. math::
        \mathbf{m}= (\mathbf{D}^H\mathbf{D})^{-1} \mathbf{D}^H\mathbf{d}

    where :math:`\mathbf{D}^H\mathbf{D}` is the PSF.

    .. [1] Wapenaar, K., van der Neut, J., Ruigrok, E., Draganov, D., Hunziker,
       J., Slob, E., Thorbecke, J., and Snieder, R., "Seismic interferometry
       by crosscorrelation and by multi-dimensional deconvolution: a
       systematic comparison", Geophyscial Journal International, vol. 185,
       pp. 1335-1364. 2011.

    """
    ncp = get_array_module(d)

    ns, nr, nt = G.shape
    if len(d.shape) == 2:
        nv = 1
    else:
        nv = d.shape[1]
    if twosided:
        if add_negative:
            nt2 = 2 * nt - 1
        else:
            nt2 = nt
            nt = (nt2 + 1) // 2
        nfmax_allowed = int(np.ceil((nt2 + 1) / 2))
    else:
        nt2 = nt
        nfmax_allowed = nt

    # Fix nfmax to be at maximum equal to half of the size of fft samples
    if nfmax is None or nfmax > nfmax_allowed:
        nfmax = nfmax_allowed
        logging.warning("nfmax set equal to ceil[(nt+1)/2=%d]" % nfmax)

    # Add negative part to data and model
    if twosided and add_negative:
        G = np.concatenate((ncp.zeros((ns, nr, nt - 1)), G), axis=-1)
        d = np.concatenate((np.squeeze(np.zeros((ns, nv, nt - 1))), d), axis=-1)
    # Bring kernel to frequency domain
    Gfft = np.fft.rfft(G, nt2, axis=-1)
    Gfft = Gfft[..., :nfmax]

    # Bring frequency/time to first dimension
    Gfft = np.moveaxis(Gfft, -1, 0)
    d = np.moveaxis(d, -1, 0)
    if psf:
        G = np.moveaxis(G, -1, 0)

    # Define MDC linear operator
    MDCop = MDC(
        Gfft,
        nt2,
        nv=nv,
        dt=dt,
        dr=dr,
        twosided=twosided,
        transpose=False,
        saveGt=saveGt,
        fftengine=fftengine,
    )
    if psf:
        PSFop = MDC(
            Gfft,
            nt2,
            nv=nr,
            dt=dt,
            dr=dr,
            twosided=twosided,
            transpose=False,
            saveGt=saveGt,
            fftengine=fftengine,
        )
    if dottest:
        Dottest(
            MDCop, nt2 * ns * nv, nt2 * nr * nv, verb=True, backend=get_module_name(ncp)
        )
        if psf:
            Dottest(
                PSFop,
                nt2 * ns * nr,
                nt2 * nr * nr,
                verb=True,
                backend=get_module_name(ncp),
            )

    # Adjoint
    if adjoint:
        madj = MDCop.H * d.ravel()
        madj = np.squeeze(madj.reshape(nt2, nr, nv))
        madj = np.moveaxis(madj, 0, -1)
        if psf:
            psfadj = PSFop.H * G.ravel()
            psfadj = np.squeeze(psfadj.reshape(nt2, nr, nr))
            psfadj = np.moveaxis(psfadj, 0, -1)

    # Inverse
    if twosided and causality_precond:
        P = np.ones((nt2, nr, nv))
        P[: nt - 1] = 0
        if smooth_precond > 0:
            P = filtfilt(np.ones(smooth_precond) / smooth_precond, 1, P, axis=0)
        P = to_cupy_conditional(d, P)
        Pop = Diagonal(P)
        minv = PreconditionedInversion(
            MDCop, Pop, d.ravel(), returninfo=False, **kwargs_solver
        )
    else:
        if ncp == np and "callback" not in kwargs_solver:
            minv = lsqr(MDCop, d.ravel(), **kwargs_solver)[0]
        else:
            minv = cgls(
                MDCop,
                d.ravel(),
                ncp.zeros(int(MDCop.shape[1]), dtype=MDCop.dtype),
                **kwargs_solver
            )[0]
    minv = np.squeeze(minv.reshape(nt2, nr, nv))
    minv = np.moveaxis(minv, 0, -1)

    if wav is not None:
        wav1 = wav.copy()
        for _ in range(minv.ndim - 1):
            wav1 = wav1[ncp.newaxis]
        minv = get_fftconvolve(d)(minv, wav1, mode="same")

    if psf:
        if ncp == np:
            psfinv = lsqr(PSFop, G.ravel(), **kwargs_solver)[0]
        else:
            psfinv = cgls(
                PSFop,
                G.ravel(),
                ncp.zeros(int(PSFop.shape[1]), dtype=PSFop.dtype),
                **kwargs_solver
            )[0]
        psfinv = np.squeeze(psfinv.reshape(nt2, nr, nr))
        psfinv = np.moveaxis(psfinv, 0, -1)
        if wav is not None:
            wav1 = wav.copy()
            for _ in range(psfinv.ndim - 1):
                wav1 = wav1[np.newaxis]
            psfinv = get_fftconvolve(d)(psfinv, wav1, mode="same")
    if adjoint and psf:
        return minv, madj, psfinv, psfadj
    elif adjoint:
        return minv, madj
    elif psf:
        return minv, psfinv
    else:
        return minv
Пример #4
0
def ISTA(Op, data, niter, eps=0.1, alpha=None, eigsiter=None, eigstol=0,
         tol=1e-10, monitorres=False, returninfo=False, show=False,
         threshkind='soft', perc=None, callback=None):
    r"""Iterative Shrinkage-Thresholding Algorithm (ISTA).

    Solve an optimization problem with :math:`Lp, \quad p=0, 1/2, 1`
    regularization, given the operator ``Op`` and data ``y``. The operator
    can be real or complex, and should ideally be either square :math:`N=M`
    or underdetermined :math:`N<M`.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    niter : :obj:`int`
        Number of iterations
    eps : :obj:`float`, optional
        Sparsity damping
    alpha : :obj:`float`, optional
        Step size (:math:`\alpha \le 1/\lambda_{max}(\mathbf{Op}^H\mathbf{Op})`
        guarantees convergence. If ``None``, the maximum eigenvalue is
        estimated and the optimal step size is chosen. If provided, the
        condition will not be checked internally).
    eigsiter : :obj:`float`, optional
        Number of iterations for eigenvalue estimation if ``alpha=None``
    eigstol : :obj:`float`, optional
        Tolerance for eigenvalue estimation if ``alpha=None``
    tol : :obj:`float`, optional
        Tolerance. Stop iterations if difference between inverted model
        at subsequent iterations is smaller than ``tol``
    monitorres : :obj:`bool`, optional
        Monitor that residual is decreasing
    returninfo : :obj:`bool`, optional
        Return info of CG solver
    show : :obj:`bool`, optional
        Display iterations log
    threshkind : :obj:`str`, optional
        Kind of thresholding ('hard', 'soft', 'half', 'hard-percentile',
        'soft-percentile', or 'half-percentile' - 'soft' used as default)
    perc : :obj:`float`, optional
        Percentile, as percentage of values to be kept by thresholding (to be
        provided when thresholding is soft-percentile or half-percentile)
    callback : :obj:`callable`, optional
        Function with signature (``callback(x)``) to call after each iteration
        where ``x`` is the current model vector

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    niter : :obj:`int`
        Number of effective iterations
    cost : :obj:`numpy.ndarray`, optional
        History of cost function

    Raises
    ------
    NotImplementedError
        If ``threshkind`` is different from hard, soft, half, soft-percentile,
        or half-percentile
    ValueError
        If ``perc=None`` when ``threshkind`` is soft-percentile or
        half-percentile
    ValueError
        If ``monitorres=True`` and residual increases

    See Also
    --------
    OMP: Orthogonal Matching Pursuit (OMP).
    FISTA: Fast Iterative Shrinkage-Thresholding Algorithm (FISTA).
    SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1).
    SplitBregman: Split Bregman for mixed L2-L1 norms.

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
        J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_2^2 +
            \epsilon ||\mathbf{x}||_p

    using the Iterative Shrinkage-Thresholding Algorithms (ISTA) [1]_, where
    :math:`p=0, 1, 1/2`. This is a very simple iterative algorithm which
    applies the following step:

    .. math::
        \mathbf{x}^{(i+1)} = T_{(\epsilon \alpha /2, p)} (\mathbf{x}^{(i)} +
        \alpha \mathbf{Op}^H (\mathbf{d} - \mathbf{Op} \mathbf{x}^{(i)}))

    where :math:`\epsilon \alpha /2` is the threshold and :math:`T_{(\tau, p)}`
    is the thresholding rule. The most common variant of ISTA uses the
    so-called soft-thresholding rule :math:`T(\tau, p=1)`. Alternatively an
    hard-thresholding rule is used in the case of `p=0` or a half-thresholding
    rule is used in the case of `p=1/2`. Finally, percentile bases thresholds
    are also implemented: the damping factor is not used anymore an the
    threshold changes at every iteration based on the computed percentile.

    .. [1] Daubechies, I., Defrise, M., and De Mol, C., “An iterative
       thresholding algorithm for linear inverse problems with a sparsity
       constraint”, Communications on pure and applied mathematics, vol. 57,
       pp. 1413-1457. 2004.

    """
    if not threshkind in ['hard', 'soft', 'half', 'hard-percentile',
                          'soft-percentile', 'half-percentile']:
        raise NotImplementedError('threshkind should be hard, soft, half,'
                                  'hard-percentile, soft-percentile, '
                                  'or half-percentile')
    if threshkind in ['hard-percentile',
                      'soft-percentile',
                      'half-percentile'] and perc is None:
        raise ValueError('Provide a percentile when choosing hard-percentile,'
                         'soft-percentile, or half-percentile thresholding')

    # choose thresholding function
    if threshkind == 'soft':
        threshf = _softthreshold
    elif threshkind == 'hard':
        threshf = _hardthreshold
    elif threshkind == 'half':
        threshf = _halfthreshold
    elif threshkind == 'hard-percentile':
        threshf = _hardthreshold_percentile
    elif threshkind == 'soft-percentile':
        threshf = _softthreshold_percentile
    else:
        threshf = _halfthreshold_percentile

    # identify backend to use
    ncp = get_array_module(data)

    if show:
        tstart = time.time()
        print('ISTA optimization (%s thresholding)\n'
              '-----------------------------------------------------------\n'
              'The Operator Op has %d rows and %d cols\n'
              'eps = %10e\ttol = %10e\tniter = %d' % (threshkind,
                                                      Op.shape[0],
                                                      Op.shape[1],
                                                      eps, tol, niter))
    # step size
    if alpha is None:
        if not isinstance(Op, LinearOperator):
            Op = LinearOperator(Op, explicit=False)
        # compute largest eigenvalues of Op^H * Op
        Op1 = LinearOperator(Op.H * Op, explicit=False)
        if get_module_name(ncp) == 'numpy':
            maxeig = np.abs(Op1.eigs(neigs=1, symmetric=True, niter=eigsiter,
                                     **dict(tol=eigstol, which='LM'))[0])
        else:
            maxeig = np.abs(power_iteration(Op1, niter=eigsiter,
                                            tol=eigstol, dtype=Op1.dtype,
                                            backend='cupy')[0])
        alpha = 1./maxeig

    # define threshold
    thresh = eps * alpha * 0.5

    if show:
        if perc is None:
            print('alpha = %10e\tthresh = %10e' % (alpha, thresh))
        else:
            print('alpha = %10e\tperc = %.1f' % (alpha, perc))
        print('-----------------------------------------------------------\n')
        head1 = '   Itn       x[0]        r2norm     r12norm     xupdate'
        print(head1)

    # initialize model and cost function
    xinv = ncp.zeros(int(Op.shape[1]), dtype=Op.dtype)
    if monitorres:
        normresold = np.inf
    if returninfo:
        cost = np.zeros(niter+1)

    # iterate
    for iiter in range(niter):
        xinvold = xinv.copy()

        # compute residual
        res = data - Op.matvec(xinv)
        if monitorres:
            normres = np.linalg.norm(res)
            if  normres > normresold:
                raise ValueError('ISTA stopped at iteration %d due to '
                                 'residual increasing, consider modifying '
                                 'eps and/or alpha...' % iiter)
            else:
                normresold = normres

        # compute gradient
        grad = alpha * Op.rmatvec(res)

        # update inverted model
        xinv_unthesh = xinv + grad
        if perc is None:
            xinv = threshf(xinv_unthesh, thresh)
        else:
            xinv = threshf(xinv_unthesh, 100 - perc)

        # model update
        xupdate = np.linalg.norm(xinv - xinvold)

        if returninfo or show:
            costdata = 0.5 * np.linalg.norm(res) ** 2
            costreg = eps * np.linalg.norm(xinv, ord=1)
        if returninfo:
            cost[iiter] = costdata + costreg

        # run callback
        if callback is not None:
            callback(xinv)

        if show:
            if iiter < 10 or niter - iiter < 10 or iiter % 10 == 0:
                msg = '%6g  %12.5e  %10.3e   %9.3e  %10.3e' % \
                      (iiter+1, to_numpy(xinv[:2])[0], costdata,
                       costdata + costreg, xupdate)
                print(msg)

        # check tolerance
        if xupdate < tol:
            logging.warning('update smaller that tolerance for '
                            'iteration %d' % iiter)
            niter = iiter
            break

    # get values pre-threshold at locations where xinv is different from zero
    # xinv = np.where(xinv != 0, xinv_unthesh, xinv)

    if show:
        print('\nIterations = %d        Total time (s) = %.2f'
              % (niter, time.time() - tstart))
        print('---------------------------------------------------------\n')
    if returninfo:
        return xinv, niter, cost[:niter]
    else:
        return xinv, niter
Пример #5
0
    def apply_multiplepoints(self,
                             trav,
                             G0=None,
                             nfft=None,
                             rtm=False,
                             greens=False,
                             dottest=False,
                             usematmul=False,
                             **kwargs_solver):
        r"""Marchenko redatuming for multiple points

        Solve the Marchenko redatuming inverse problem for multiple
        points given their direct arrival traveltime curves (``trav``)
        and waveforms (``G0``).

        Parameters
        ----------
        trav : :obj:`numpy.ndarray`
            Traveltime of first arrival from subsurface points to
            surface receivers of size :math:`[n_r \times n_{vs}]`
        G0 : :obj:`numpy.ndarray`, optional
            Direct arrival in time domain of size
            :math:`[n_r \times n_{vs} \times n_t]` (if None, create arrival
            using ``trav``)
        nfft : :obj:`int`, optional
            Number of samples in fft when creating the analytical direct wave
        rtm : :obj:`bool`, optional
            Compute and return rtm redatuming
        greens : :obj:`bool`, optional
            Compute and return Green's functions
        dottest : :obj:`bool`, optional
            Apply dot-test
        usematmul : :obj:`bool`, optional
            Use :func:`numpy.matmul` (``True``) or for-loop with :func:`numpy.dot`
            (``False``) in :py:class:`pylops.signalprocessing.Fredholm1` operator.
            Refer to Fredholm1 documentation for details.
        **kwargs_solver
            Arbitrary keyword arguments for chosen solver
            (:py:func:`scipy.sparse.linalg.lsqr` and
            :py:func:`pylops.optimization.solver.cgls` are used as default
            for numpy and cupy `data`, respectively)

        Returns
        ----------
        f1_inv_minus : :obj:`numpy.ndarray`
            Inverted upgoing focusing function of size
            :math:`[n_r \times n_{vs} \times n_t]`
        f1_inv_plus : :obj:`numpy.ndarray`
            Inverted downgoing focusing functionof size
            :math:`[n_r \times n_{vs} \times n_t]`
        p0_minus : :obj:`numpy.ndarray`
            Single-scattering standard redatuming upgoing Green's function
            of size :math:`[n_r \times n_{vs} \times n_t]`
        g_inv_minus : :obj:`numpy.ndarray`
            Inverted upgoing Green's function of size
            :math:`[n_r \times n_{vs} \times n_t]`
        g_inv_plus : :obj:`numpy.ndarray`
            Inverted downgoing Green's function of size
            :math:`[n_r \times n_{vs} \times n_t]`

        """
        nvs = trav.shape[1]

        # Create window
        trav_off = trav - self.toff
        trav_off = np.round(trav_off / self.dt).astype(int)

        w = np.zeros((self.nr, nvs, self.nt), dtype=self.dtype)
        for ir in range(self.nr):
            for ivs in range(nvs):
                w[ir, ivs, :trav_off[ir, ivs]] = 1
        w = np.concatenate((np.flip(w, axis=-1), w[:, :, 1:]), axis=-1)
        if self.nsmooth > 0:
            smooth = np.ones(self.nsmooth, dtype=self.dtype) / self.nsmooth
            w = filtfilt(smooth, 1, w)
        w = to_cupy_conditional(self.Rtwosided_fft, w)

        # Create operators
        Rop = MDC(
            self.Rtwosided_fft,
            self.nt2,
            nv=nvs,
            dt=self.dt,
            dr=self.dr,
            twosided=True,
            conj=False,
            fftengine=self.fftengine,
            transpose=False,
            prescaled=self.prescaled,
            usematmul=usematmul,
            dtype=self.dtype,
        )
        R1op = MDC(
            self.Rtwosided_fft,
            self.nt2,
            nv=nvs,
            dt=self.dt,
            dr=self.dr,
            twosided=True,
            conj=True,
            fftengine=self.fftengine,
            transpose=False,
            prescaled=self.prescaled,
            usematmul=usematmul,
            dtype=self.dtype,
        )
        Rollop = Roll(
            self.ns * nvs * self.nt2,
            dims=(self.nt2, self.ns, nvs),
            dir=0,
            shift=-1,
            dtype=self.dtype,
        )
        Wop = Diagonal(w.transpose(2, 0, 1).ravel())
        Iop = Identity(self.nr * nvs * self.nt2)
        Mop = Block([[Iop, -1 * Wop * Rop], [-1 * Wop * Rollop * R1op, Iop]
                     ]) * BlockDiag([Wop, Wop])
        Gop = Block([[Iop, -1 * Rop], [-1 * Rollop * R1op, Iop]])

        if dottest:
            Dottest(
                Gop,
                2 * self.nr * nvs * self.nt2,
                2 * self.nr * nvs * self.nt2,
                raiseerror=True,
                verb=True,
                backend=get_module_name(self.ncp),
            )
        if dottest:
            Dottest(
                Mop,
                2 * self.ns * nvs * self.nt2,
                2 * self.nr * nvs * self.nt2,
                raiseerror=True,
                verb=True,
                backend=get_module_name(self.ncp),
            )

        # Create input focusing function
        if G0 is None:
            if self.wav is not None and nfft is not None:
                G0 = np.zeros((self.nr, nvs, self.nt), dtype=self.dtype)
                for ivs in range(nvs):
                    G0[:, ivs] = (directwave(
                        self.wav,
                        trav[:, ivs],
                        self.nt,
                        self.dt,
                        nfft=nfft,
                        derivative=True,
                    )).T
                G0 = to_cupy_conditional(self.Rtwosided_fft, G0)
            else:
                logging.error("wav and/or nfft are not provided. "
                              "Provide either G0 or wav and nfft...")
                raise ValueError("wav and/or nfft are not provided. "
                                 "Provide either G0 or wav and nfft...")
        fd_plus = np.concatenate((
            np.flip(G0, axis=-1).transpose(2, 0, 1),
            self.ncp.zeros((self.nt - 1, self.nr, nvs), dtype=self.dtype),
        ))

        # Run standard redatuming as benchmark
        if rtm:
            p0_minus = Rop * fd_plus.ravel()
            p0_minus = p0_minus.reshape(self.nt2, self.ns,
                                        nvs).transpose(1, 2, 0)

        # Create data and inverse focusing functions
        d = Wop * Rop * fd_plus.ravel()
        d = np.concatenate((
            d.reshape(self.nt2, self.ns, nvs),
            self.ncp.zeros((self.nt2, self.ns, nvs), dtype=self.dtype),
        ))

        # Invert for focusing functions
        if self.ncp == np:
            f1_inv = lsqr(Mop, d.ravel(), **kwargs_solver)[0]
        else:
            f1_inv = cgls(Mop,
                          d.ravel(),
                          x0=self.ncp.zeros(2 * (2 * self.nt - 1) * self.nr *
                                            nvs,
                                            dtype=self.dtype),
                          **kwargs_solver)[0]

        f1_inv = f1_inv.reshape(2 * self.nt2, self.nr, nvs)
        f1_inv_tot = f1_inv + np.concatenate((self.ncp.zeros(
            (self.nt2, self.nr, nvs), dtype=self.dtype), fd_plus))
        f1_inv_minus = f1_inv_tot[:self.nt2].transpose(1, 2, 0)
        f1_inv_plus = f1_inv_tot[self.nt2:].transpose(1, 2, 0)

        if greens:
            # Create Green's functions
            g_inv = Gop * f1_inv_tot.ravel()
            g_inv = g_inv.reshape(2 * self.nt2, self.ns, nvs)
            g_inv_minus = -g_inv[:self.nt2].transpose(1, 2, 0)
            g_inv_plus = np.flip(g_inv[self.nt2:], axis=0).transpose(1, 2, 0)

        if rtm and greens:
            return f1_inv_minus, f1_inv_plus, p0_minus, g_inv_minus, g_inv_plus
        elif rtm:
            return f1_inv_minus, f1_inv_plus, p0_minus
        elif greens:
            return f1_inv_minus, f1_inv_plus, g_inv_minus, g_inv_plus
        else:
            return f1_inv_minus, f1_inv_plus
Пример #6
0
def PoststackInversion(data,
                       wav,
                       m0=None,
                       explicit=False,
                       simultaneous=False,
                       epsI=None,
                       epsR=None,
                       dottest=False,
                       epsRL1=None,
                       **kwargs_solver):
    r"""Post-stack linearized seismic inversion.

    Invert post-stack seismic operator to retrieve an elastic parameter of
    choice from band-limited seismic post-stack data.
    Depending on the choice of input parameters, inversion can be
    trace-by-trace with explicit operator or global with either
    explicit or linear operator.

    Parameters
    ----------
    data : :obj:`np.ndarray`
        Band-limited seismic post-stack data of size
        :math:`[n_{t0} (\times n_x \times n_y)]`
    wav : :obj:`np.ndarray`
        Wavelet in time domain (must have odd number of elements
        and centered to zero). If 1d, assume stationary wavelet for the entire
        time axis. If 2d of size :math:`[n_{t0} \times n_h]` use as
        non-stationary wavelet
    m0 : :obj:`np.ndarray`, optional
        Background model of size :math:`[n_{t0} (\times n_x \times n_y)]`
    explicit : :obj:`bool`, optional
        Create a chained linear operator (``False``, preferred for large data)
        or a ``MatrixMult`` linear operator with dense matrix
        (``True``, preferred for small data)
    simultaneous : :obj:`bool`, optional
        Simultaneously invert entire data (``True``) or invert
        trace-by-trace (``False``) when using ``explicit`` operator
        (note that the entire data is always inverted when working
        with linear operator)
    epsI : :obj:`float`, optional
        Damping factor for Tikhonov regularization term
    epsR : :obj:`float`, optional
        Damping factor for additional Laplacian regularization term
    dottest : :obj:`bool`, optional
        Apply dot-test
    epsRL1 : :obj:`float`, optional
        Damping factor for additional blockiness regularization term
    **kwargs_solver
        Arbitrary keyword arguments for :py:func:`scipy.linalg.lstsq`
        solver (if ``explicit=True`` and  ``epsR=None``)
        or :py:func:`scipy.sparse.linalg.lsqr` solver (if ``explicit=False``
        and/or ``epsR`` is not ``None``)

    Returns
    -------
    minv : :obj:`np.ndarray`
        Inverted model of size :math:`[n_{t0} (\times n_x \times n_y)]`
    datar : :obj:`np.ndarray`
        Residual data (i.e., data - background data) of
        size :math:`[n_{t0} (\times n_x \times n_y)]`

    Notes
    -----
    The cost function and solver used in the seismic post-stack inversion
    module depends on the choice of ``explicit``, ``simultaneous``, ``epsI``,
    and ``epsR`` parameters:

    * ``explicit=True``, ``epsI=None`` and ``epsR=None``: the explicit
      solver :py:func:`scipy.linalg.lstsq` is used if ``simultaneous=False``
      (or the iterative solver :py:func:`scipy.sparse.linalg.lsqr` is used
      if ``simultaneous=True``)
    * ``explicit=True`` with ``epsI`` and ``epsR=None``: the regularized
      normal equations :math:`\mathbf{W}^T\mathbf{d} = (\mathbf{W}^T
      \mathbf{W} + \epsilon_I^2 \mathbf{I}) \mathbf{AI}` are instead fed
      into the :py:func:`scipy.linalg.lstsq` solver if ``simultaneous=False``
      (or the iterative solver :py:func:`scipy.sparse.linalg.lsqr`
      if ``simultaneous=True``)
    * ``explicit=False`` and ``epsR=None``: the iterative solver
      :py:func:`scipy.sparse.linalg.lsqr` is used
    * ``explicit=False`` with ``epsR`` and ``epsRL1=None``: the iterative
      solver :py:func:`pylops.optimization.leastsquares.RegularizedInversion`
      is used to solve the spatially regularized problem.
    * ``explicit=False`` with ``epsR`` and ``epsRL1``: the iterative
      solver :py:func:`pylops.optimization.sparsity.SplitBregman`
      is used to solve the blockiness-promoting (in vertical direction)
      and spatially regularized (in additional horizontal directions) problem.

    Note that the convergence of iterative solvers such as
    :py:func:`scipy.sparse.linalg.lsqr` can be very slow for this type of
    operator. It is suggested to take a two steps approach with first a
    trace-by-trace inversion using the explicit operator, followed by a
    regularized global inversion using the outcome of the previous
    inversion as initial guess.
    """
    ncp = get_array_module(wav)

    # check if background model and data have same shape
    if m0 is not None and data.shape != m0.shape:
        raise ValueError('data and m0 must have same shape')

    # find out dimensions
    if data.ndim == 1:
        dims = 1
        nt0 = data.size
        nspat = None
        nspatprod = nx = 1
    elif data.ndim == 2:
        dims = 2
        nt0, nx = data.shape
        nspat = (nx, )
        nspatprod = nx
    else:
        dims = 3
        nt0, nx, ny = data.shape
        nspat = (nx, ny)
        nspatprod = nx * ny
        data = data.reshape(nt0, nspatprod)

    # create operator
    PPop = PoststackLinearModelling(wav,
                                    nt0=nt0,
                                    spatdims=nspat,
                                    explicit=explicit)
    if dottest:
        Dottest(PPop,
                nt0 * nspatprod,
                nt0 * nspatprod,
                raiseerror=True,
                backend=get_module_name(ncp),
                verb=True)

    # create and remove background data from original data
    datar = data.flatten() if m0 is None else \
        data.flatten() - PPop * m0.flatten()
    # invert model
    if epsR is None:
        # inversion without spatial regularization
        if explicit:
            if epsI is None and not simultaneous:
                # solve unregularized equations indipendently trace-by-trace
                minv = \
                get_lstsq(data)(PPop.A, datar.reshape(nt0, nspatprod).squeeze(),
                                **kwargs_solver)[0]
            elif epsI is None and simultaneous:
                # solve unregularized equations simultaneously
                if ncp == np:
                    minv = lsqr(PPop, datar, **kwargs_solver)[0]
                else:
                    minv = \
                        cgls(PPop, datar,
                             x0=ncp.zeros(int(PPop.shape[1]), PPop.dtype),
                             **kwargs_solver)[0]
            elif epsI is not None:
                # create regularized normal equations
                PP = ncp.dot(PPop.A.T, PPop.A) + \
                     epsI * ncp.eye(nt0, dtype=PPop.A.dtype)
                datarn = ncp.dot(PPop.A.T, datar.reshape(nt0, nspatprod))
                if not simultaneous:
                    # solve regularized normal eqs. trace-by-trace
                    minv = get_lstsq(data)(PP, datarn, **kwargs_solver)[0]
                else:
                    # solve regularized normal equations simultaneously
                    PPop_reg = MatrixMult(PP, dims=nspatprod)
                    if ncp == np:
                        minv = lsqr(PPop_reg, datar.ravel(),
                                    **kwargs_solver)[0]
                    else:
                        minv = cgls(PPop_reg,
                                    datar.ravel(),
                                    x0=ncp.zeros(int(PPop_reg.shape[1]),
                                                 PPop_reg.dtype),
                                    **kwargs_solver)[0]
            else:
                # create regularized normal eqs. and solve them simultaneously
                PP = ncp.dot(PPop.A.T, PPop.A) + \
                     epsI * ncp.eye(nt0, dtype=PPop.A.dtype)
                datarn = PPop.A.T * datar.reshape(nt0, nspatprod)
                PPop_reg = MatrixMult(PP, dims=nspatprod)
                minv = \
                    get_lstsq(data)(PPop_reg.A, datarn.flatten(),
                                    **kwargs_solver)[0]
        else:
            # solve unregularized normal equations simultaneously with lop
            if ncp == np:
                minv = lsqr(PPop, datar, **kwargs_solver)[0]
            else:
                minv = \
                    cgls(PPop, datar,
                         x0=ncp.zeros(int(PPop.shape[1]), PPop.dtype),
                         **kwargs_solver)[0]
    else:
        if epsRL1 is None:
            # L2 inversion with spatial regularization
            if dims == 1:
                Regop = SecondDerivative(nt0, dtype=PPop.dtype)
            elif dims == 2:
                Regop = Laplacian((nt0, nx), dtype=PPop.dtype)
            else:
                Regop = Laplacian((nt0, nx, ny), dirs=(1, 2), dtype=PPop.dtype)

            minv = RegularizedInversion(
                PPop, [Regop],
                data.flatten(),
                x0=None if m0 is None else m0.flatten(),
                epsRs=[epsR],
                returninfo=False,
                **kwargs_solver)
        else:
            # Blockiness-promoting inversion with spatial regularization
            if dims == 1:
                RegL1op = FirstDerivative(nt0,
                                          kind='forward',
                                          dtype=PPop.dtype)
                RegL2op = None
            elif dims == 2:
                RegL1op = FirstDerivative(nt0 * nx,
                                          dims=(nt0, nx),
                                          dir=0,
                                          kind='forward',
                                          dtype=PPop.dtype)
                RegL2op = SecondDerivative(nt0 * nx,
                                           dims=(nt0, nx),
                                           dir=1,
                                           dtype=PPop.dtype)
            else:
                RegL1op = FirstDerivative(nt0 * nx * ny,
                                          dims=(nt0, nx, ny),
                                          dir=0,
                                          kind='forward',
                                          dtype=PPop.dtype)
                RegL2op = Laplacian((nt0, nx, ny),
                                    dirs=(1, 2),
                                    dtype=PPop.dtype)

            if 'mu' in kwargs_solver.keys():
                mu = kwargs_solver['mu']
                kwargs_solver.pop('mu')
            else:
                mu = 1.
            if 'niter_outer' in kwargs_solver.keys():
                niter_outer = kwargs_solver['niter_outer']
                kwargs_solver.pop('niter_outer')
            else:
                niter_outer = 3
            if 'niter_inner' in kwargs_solver.keys():
                niter_inner = kwargs_solver['niter_inner']
                kwargs_solver.pop('niter_inner')
            else:
                niter_inner = 5
            if not isinstance(epsRL1, (list, tuple)):
                epsRL1 = list([epsRL1])
            if not isinstance(epsR, (list, tuple)):
                epsR = list([epsR])
            minv = SplitBregman(PPop, [RegL1op],
                                data.ravel(),
                                RegsL2=[RegL2op],
                                epsRL1s=epsRL1,
                                epsRL2s=epsR,
                                mu=mu,
                                niter_outer=niter_outer,
                                niter_inner=niter_inner,
                                x0=None if m0 is None else m0.flatten(),
                                **kwargs_solver)[0]

    # compute residual
    if epsR is None:
        datar -= PPop * minv.ravel()
    else:
        datar = data.ravel() - PPop * minv.ravel()

    # reshape inverted model and residual data
    if dims == 1:
        minv = minv.squeeze()
        datar = datar.squeeze()
    elif dims == 2:
        minv = minv.reshape(nt0, nx)
        datar = datar.reshape(nt0, nx)
    else:
        minv = minv.reshape(nt0, nx, ny)
        datar = datar.reshape(nt0, nx, ny)

    if m0 is not None and epsR is None:
        minv = minv + m0

    return minv, datar
Пример #7
0
    def apply_onepoint(self,
                       trav,
                       G0=None,
                       nfft=None,
                       rtm=False,
                       greens=False,
                       dottest=False,
                       fast=None,
                       **kwargs_solver):
        r"""Marchenko redatuming for one point

        Solve the Marchenko redatuming inverse problem for a single point
        given its direct arrival traveltime curve (``trav``)
        and waveform (``G0``).

        Parameters
        ----------
        trav : :obj:`numpy.ndarray`
            Traveltime of first arrival from subsurface point to
            surface receivers of size :math:`[n_r \times 1]`
        G0 : :obj:`numpy.ndarray`, optional
            Direct arrival in time domain of size :math:`[n_r \times n_t]`
            (if None, create arrival using ``trav``)
        nfft : :obj:`int`, optional
            Number of samples in fft when creating the analytical direct wave
        rtm : :obj:`bool`, optional
            Compute and return rtm redatuming
        greens : :obj:`bool`, optional
            Compute and return Green's functions
        dottest : :obj:`bool`, optional
            Apply dot-test
        fast : :obj:`bool`
            *Deprecated*, will be removed in v2.0.0
        **kwargs_solver
            Arbitrary keyword arguments for chosen solver
            (:py:func:`scipy.sparse.linalg.lsqr` and
            :py:func:`pylops.optimization.solver.cgls` are used as default
            for numpy and cupy `data`, respectively)

        Returns
        ----------
        f1_inv_minus : :obj:`numpy.ndarray`
            Inverted upgoing focusing function of size :math:`[n_r \times n_t]`
        f1_inv_plus : :obj:`numpy.ndarray`
            Inverted downgoing focusing function
            of size :math:`[n_r \times n_t]`
        p0_minus : :obj:`numpy.ndarray`
            Single-scattering standard redatuming upgoing Green's function of
            size :math:`[n_r \times n_t]`
        g_inv_minus : :obj:`numpy.ndarray`
            Inverted upgoing Green's function of size :math:`[n_r \times n_t]`
        g_inv_plus : :obj:`numpy.ndarray`
            Inverted downgoing Green's function
            of size :math:`[n_r \times n_t]`

        """
        # Create window
        trav_off = trav - self.toff
        trav_off = np.round(trav_off / self.dt).astype(np.int)

        w = np.zeros((self.nr, self.nt), dtype=self.dtype)
        for ir in range(self.nr):
            w[ir, :trav_off[ir]] = 1
        w = np.hstack((np.fliplr(w), w[:, 1:]))
        if self.nsmooth > 0:
            smooth = np.ones(self.nsmooth, dtype=self.dtype) / self.nsmooth
            w = filtfilt(smooth, 1, w)
        w = to_cupy_conditional(self.Rtwosided_fft, w)

        # Create operators
        Rop = MDC(self.Rtwosided_fft,
                  self.nt2,
                  nv=1,
                  dt=self.dt,
                  dr=self.dr,
                  twosided=True,
                  conj=False,
                  transpose=False,
                  saveGt=self.saveRt,
                  prescaled=self.prescaled,
                  dtype=self.dtype)
        R1op = MDC(self.Rtwosided_fft,
                   self.nt2,
                   nv=1,
                   dt=self.dt,
                   dr=self.dr,
                   twosided=True,
                   conj=True,
                   transpose=False,
                   saveGt=self.saveRt,
                   prescaled=self.prescaled,
                   dtype=self.dtype)
        Rollop = Roll(self.nt2 * self.ns,
                      dims=(self.nt2, self.ns),
                      dir=0,
                      shift=-1,
                      dtype=self.dtype)
        Wop = Diagonal(w.T.flatten())
        Iop = Identity(self.nr * self.nt2)
        Mop = Block([[Iop, -1 * Wop * Rop], [-1 * Wop * Rollop * R1op, Iop]
                     ]) * BlockDiag([Wop, Wop])
        Gop = Block([[Iop, -1 * Rop], [-1 * Rollop * R1op, Iop]])

        if dottest:
            Dottest(Gop,
                    2 * self.ns * self.nt2,
                    2 * self.nr * self.nt2,
                    raiseerror=True,
                    verb=True,
                    backend=get_module_name(self.ncp))
        if dottest:
            Dottest(Mop,
                    2 * self.ns * self.nt2,
                    2 * self.nr * self.nt2,
                    raiseerror=True,
                    verb=True,
                    backend=get_module_name(self.ncp))

        # Create input focusing function
        if G0 is None:
            if self.wav is not None and nfft is not None:
                G0 = (directwave(self.wav,
                                 trav,
                                 self.nt,
                                 self.dt,
                                 nfft=nfft,
                                 derivative=True)).T
                G0 = to_cupy_conditional(self.Rtwosided_fft, G0)
            else:
                logging.error('wav and/or nfft are not provided. '
                              'Provide either G0 or wav and nfft...')
                raise ValueError('wav and/or nfft are not provided. '
                                 'Provide either G0 or wav and nfft...')
        fd_plus = np.concatenate((np.fliplr(G0).T,
                                  self.ncp.zeros((self.nt - 1, self.nr),
                                                 dtype=self.dtype)))

        # Run standard redatuming as benchmark
        if rtm:
            p0_minus = Rop * fd_plus.flatten()
            p0_minus = p0_minus.reshape(self.nt2, self.ns).T

        # Create data and inverse focusing functions
        d = Wop * Rop * fd_plus.flatten()
        d = np.concatenate((d.reshape(self.nt2, self.ns),
                            self.ncp.zeros((self.nt2, self.ns), self.dtype)))

        # Invert for focusing functions
        if self.ncp == np:
            f1_inv = lsqr(Mop, d.flatten(), **kwargs_solver)[0]
        else:
            f1_inv = cgls(Mop,
                          d.flatten(),
                          x0=self.ncp.zeros(2 * (2 * self.nt - 1) * self.nr,
                                            dtype=self.dtype),
                          **kwargs_solver)[0]

        f1_inv = f1_inv.reshape(2 * self.nt2, self.nr)
        f1_inv_tot = f1_inv + np.concatenate((self.ncp.zeros(
            (self.nt2, self.nr), dtype=self.dtype), fd_plus))
        f1_inv_minus = f1_inv_tot[:self.nt2].T
        f1_inv_plus = f1_inv_tot[self.nt2:].T
        if greens:
            # Create Green's functions
            g_inv = Gop * f1_inv_tot.flatten()
            g_inv = g_inv.reshape(2 * self.nt2, self.ns)
            g_inv_minus, g_inv_plus = -g_inv[:self.nt2].T, \
                                      np.fliplr(g_inv[self.nt2:].T)
        if rtm and greens:
            return f1_inv_minus, f1_inv_plus, p0_minus, g_inv_minus, g_inv_plus
        elif rtm:
            return f1_inv_minus, f1_inv_plus, p0_minus
        elif greens:
            return f1_inv_minus, f1_inv_plus, g_inv_minus, g_inv_plus
        else:
            return f1_inv_minus, f1_inv_plus
Пример #8
0
def WavefieldDecomposition(p,
                           vz,
                           nt,
                           nr,
                           dt,
                           dr,
                           rho,
                           vel,
                           nffts=(None, None, None),
                           critical=100.0,
                           ntaper=10,
                           scaling=1.0,
                           kind="inverse",
                           restriction=None,
                           sptransf=None,
                           solver=lsqr,
                           dottest=False,
                           dtype="complex128",
                           **kwargs_solver):
    r"""Up-down wavefield decomposition.

    Apply seismic wavefield decomposition from multi-component (pressure
    and vertical particle velocity) data. This process is also generally
    referred to as data-based deghosting.

    Parameters
    ----------
    p : :obj:`np.ndarray`
        Pressure data of size :math:`\lbrack n_{r_x} \,(\times n_{r_y})
        \times n_t \rbrack` (or :math:`\lbrack n_{r_{x,\text{sub}}}
        \,(\times n_{r_{y,\text{sub}}}) \times n_t \rbrack`
        in case a ``restriction`` operator is provided. Note that
        :math:`n_{r_{x,\text{sub}}}` (and :math:`n_{r_{y,\text{sub}}}`)
        must agree with the size of the output of this operator.)
    vz : :obj:`np.ndarray`
        Vertical particle velocity data of same size as pressure data
    nt : :obj:`int`
        Number of samples along the time axis
    nr : :obj:`int` or :obj:`tuple`
        Number of samples along the receiver axis (or axes)
    dt : :obj:`float`
        Sampling along the time axis
    dr : :obj:`float` or :obj:`tuple`
        Sampling along the receiver array (or axes)
    rho : :obj:`float`
        Density :math:`\rho` along the receiver array (must be constant)
    vel : :obj:`float`
        Velocity :math:`c` along the receiver array (must be constant)
    nffts : :obj:`tuple`, optional
        Number of samples along the wavenumber and frequency axes
    critical : :obj:`float`, optional
        Percentage of angles to retain in obliquity factor. For example, if
        ``critical=100`` only angles below the critical angle :math:`\frac{f(k_x)}{c}`
        will be retained
    ntaper : :obj:`float`, optional
        Number of samples of taper applied to obliquity factor around critical
        angle
    kind : :obj:`str`, optional
        Type of separation: ``inverse`` (default) or ``analytical``
    scaling : :obj:`float`, optional
        Scaling to apply to the operator (see Notes of
        :func:`pylops.waveeqprocessing.wavedecomposition.UpDownComposition2D`
        for more details)
    restriction : :obj:`pylops.LinearOperator`, optional
        Restriction operator
    sptransf : :obj:`pylops.LinearOperator`, optional
        Sparsifying operator
    solver : :obj:`float`, optional
        Function handle of solver to be used if ``kind='inverse'``
    dottest : :obj:`bool`, optional
        Apply dot-test
    dtype : :obj:`str`, optional
        Type of elements in input array.
    **kwargs_solver
        Arbitrary keyword arguments for chosen ``solver``

    Returns
    -------
    pup : :obj:`np.ndarray`
        Up-going wavefield
    pdown : :obj:`np.ndarray`
        Down-going wavefield

    Raises
    ------
    KeyError
        If ``kind`` is neither ``analytical`` nor ``inverse``

    Notes
    -----
    Up- and down-going components of seismic data :math:`p^-(x, t)`
    and :math:`p^+(x, t)` can be estimated from multi-component data
    :math:`p(x, t)` and :math:`v_z(x, t)` by computing the following
    expression [1]_:

    .. math::
        \begin{bmatrix}
            \hat{p}^+  \\
            \hat{p}^-
        \end{bmatrix}(k_x, \omega) = \frac{1}{2}
        \begin{bmatrix}
            1  & \frac{\omega \rho}{k_z} \\
            1  & - \frac{\omega \rho}{k_z}  \\
        \end{bmatrix}
        \begin{bmatrix}
            \hat{p}  \\
            \hat{v}_z
        \end{bmatrix}(k_x, \omega)

    if ``kind='analytical'`` or alternatively by solving the equation in
    :func:`ptcpy.waveeqprocessing.UpDownComposition2D` as an inverse problem,
    if ``kind='inverse'``.

    The latter approach has several advantages as data regularization
    can be included as part of the separation process allowing the input data
    to be aliased. This is obtained by solving the following problem:

    .. math::
        \begin{bmatrix}
            \mathbf{p}  \\
            s\mathbf{v_z}
        \end{bmatrix} =
        \begin{bmatrix}
            \mathbf{R}\mathbf{F} & 0 \\
            0 & s\mathbf{R}\mathbf{F}
        \end{bmatrix} \mathbf{W} \begin{bmatrix}
            \mathbf{F}^H \mathbf{S} & 0 \\
            0 & \mathbf{F}^H \mathbf{S}
        \end{bmatrix}  \mathbf{p^{\pm}}

    where :math:`\mathbf{R}` is a :class:`ptcpy.basicoperators.Restriction`
    operator and :math:`\mathbf{S}` is sparsyfing transform operator (e.g.,
    :class:`ptcpy.signalprocessing.Radon2D`).

    .. [1] Wapenaar, K. "Reciprocity properties of one-way propagators",
       Geophysics, vol. 63, pp. 1795-1798. 1998.

    """
    ncp = get_array_module(p)
    backend = get_module_name(ncp)

    ndims = p.ndim
    if ndims == 2:
        dims = (nr, nt)
        dims2 = (2 * nr, nt)
        nr2 = nr
        decomposition = _obliquity2D
        composition = UpDownComposition2D
    else:
        dims = (nr[0], nr[1], nt)
        dims2 = (2 * nr[0], nr[1], nt)
        nr2 = nr[0]
        decomposition = _obliquity3D
        composition = UpDownComposition3D
    if kind == "analytical":
        FFTop, OBLop = decomposition(
            nt,
            nr,
            dt,
            dr,
            rho,
            vel,
            nffts=nffts,
            critical=critical,
            ntaper=ntaper,
            composition=False,
            backend=backend,
            dtype=dtype,
        )
        VZ = FFTop * vz.ravel()

        # scaled Vz
        VZ_obl = OBLop * VZ
        vz_obl = FFTop.H * VZ_obl
        vz_obl = ncp.real(vz_obl.reshape(dims))

        #  separation
        pup = (p - vz_obl) / 2
        pdown = (p + vz_obl) / 2

    elif kind == "inverse":
        d = ncp.concatenate((p.ravel(), scaling * vz.ravel()))
        UDop = composition(
            nt,
            nr,
            dt,
            dr,
            rho,
            vel,
            nffts=nffts,
            critical=critical,
            ntaper=ntaper,
            scaling=scaling,
            backend=backend,
            dtype=dtype,
        )
        if restriction is not None:
            UDop = restriction * UDop
        if sptransf is not None:
            UDop = UDop * BlockDiag([sptransf, sptransf])
            UDop.dtype = ncp.real(ncp.ones(1, UDop.dtype)).dtype
        if dottest:
            Dottest(
                UDop,
                UDop.shape[0],
                UDop.shape[1],
                complexflag=2,
                backend=backend,
                verb=True,
            )

        # separation by inversion
        dud = solver(UDop, d.ravel(), **kwargs_solver)[0]
        if sptransf is None:
            dud = ncp.real(dud)
        else:
            dud = BlockDiag([sptransf, sptransf]) * ncp.real(dud)
        dud = dud.reshape(dims2)
        pdown, pup = dud[:nr2], dud[nr2:]
    else:
        raise KeyError("kind must be analytical or inverse")

    return pup, pdown