Ejemplo n.º 1
0
def test_Sliding3D(par):
    """Dot-test and inverse for Sliding3D operator"""
    Op = MatrixMult(
        np.ones((par["nwiny"] * par["nwinx"] * par["nt"],
                 par["ny"] * par["nx"] * par["nt"])))

    Slid = Sliding3D(
        Op,
        dims=(par["ny"] * par["winsy"], par["nx"] * par["winsx"], par["nt"]),
        dimsd=(par["npy"], par["npx"], par["nt"]),
        nwin=(par["nwiny"], par["nwinx"]),
        nover=(par["novery"], par["noverx"]),
        nop=(par["ny"], par["nx"]),
        tapertype=par["tapertype"],
    )
    assert dottest(
        Slid,
        par["npy"] * par["npx"] * par["nt"],
        par["ny"] * par["nx"] * par["nt"] * par["winsy"] * par["winsx"],
    )
    x = np.ones(
        (par["ny"] * par["nx"] * par["winsy"] * par["winsx"], par["nt"]))
    y = Slid * x.ravel()

    xinv = LinearOperator(Slid) / y
    assert_array_almost_equal(x.ravel(), xinv)
Ejemplo n.º 2
0
def FirstDirectionalDerivative(dims, v, sampling=1, edge=False,
                               dtype='float64'):
    r"""First Directional derivative.

    Apply directional derivative operator to a multi-dimensional
    array (at least 2 dimensions are required) along either a single common
    direction or different directions for each point of the array.

    Parameters
    ----------
    dims : :obj:`tuple`
        Number of samples for each dimension.
    v : :obj:`np.ndarray`, optional
        Single direction (array of size :math:`n_{dims}`) or group of directions
        (array of size :math:`[n_{dims} \times n_{d0} \times ... \times n_{n_{dims}}`)
    sampling : :obj:`tuple`, optional
        Sampling steps for each direction.
    edge : :obj:`bool`, optional
        Use reduced order derivative at edges (``True``) or
        ignore them (``False``).
    dtype : :obj:`str`, optional
        Type of elements in input array.

    Returns
    -------
    ddop : :obj:`pylops.LinearOperator`
        First directional derivative linear operator

    Notes
    -----
    The FirstDirectionalDerivative applies a first-order derivative
    to a multi-dimensional array along the direction defined by the unitary
    vector \mathbf{v}:

    .. math::
        df_\mathbf{v} =
            \nabla f \mathbf{v}

    or along the directions defined by the unitary vectors
    :math:`\mathbf{v}(x, y)`:

    .. math::
        df_\mathbf{v}(x,y) =
            \nabla f(x,y) \mathbf{v}(x,y)

    where we have here considered the 2-dimensional case.

    This operator can be easily implemented as the concatenation of the
    :py:class:`pylops.Gradient` operator and the :py:class:`pylops.Diagonal`
    operator with :math:\mathbf{v} along the main diagonal.

    """
    Gop = Gradient(dims, sampling=sampling, edge=edge, dtype=dtype)
    if v.ndim == 1:
        Dop = Diagonal(v, dims=[len(dims)]+list(dims), dir=0, dtype=dtype)
    else:
        Dop = Diagonal(v.ravel(), dtype=dtype)
    Sop = Sum(dims=[len(dims)]+list(dims), dir=0, dtype=dtype)
    ddop = Sop * Dop * Gop
    return LinearOperator(ddop)
Ejemplo n.º 3
0
def SecondDirectionalDerivative(dims,
                                v,
                                sampling=1,
                                edge=False,
                                dtype="float64"):
    r"""Second Directional derivative.

    Apply a second directional derivative operator to a multi-dimensional array
    along either a single common direction or different directions for each
    point of the array.

    .. note:: At least 2 dimensions are required, consider using
      :py:func:`pylops.SecondDerivative` for 1d arrays.

    Parameters
    ----------
    dims : :obj:`tuple`
        Number of samples for each dimension.
    v : :obj:`np.ndarray`, optional
        Single direction (array of size :math:`n_\text{dims}`) or group of directions
        (array of size :math:`[n_\text{dims} \times n_{d_0} \times ... \times n_{d_{n_\text{dims}}}]`)
    sampling : :obj:`tuple`, optional
        Sampling steps for each direction.
    edge : :obj:`bool`, optional
        Use reduced order derivative at edges (``True``) or
        ignore them (``False``).
    dtype : :obj:`str`, optional
        Type of elements in input array.

    Returns
    -------
    ddop : :obj:`pylops.LinearOperator`
        Second directional derivative linear operator

    Notes
    -----
    The SecondDirectionalDerivative applies a second-order derivative
    to a multi-dimensional array along the direction defined by the unitary
    vector :math:`\mathbf{v}`:

    .. math::
        d^2f_\mathbf{v} =
            - D_\mathbf{v}^T [D_\mathbf{v} f]

    where :math:`D_\mathbf{v}` is the first-order directional derivative
    implemented by :func:`pylops.SecondDirectionalDerivative`.

    This operator is sometimes also referred to as directional Laplacian
    in the literature.
    """
    Dop = FirstDirectionalDerivative(dims,
                                     v,
                                     sampling=sampling,
                                     edge=edge,
                                     dtype=dtype)
    ddop = -Dop.H * Dop
    return LinearOperator(ddop)
Ejemplo n.º 4
0
def test_Sliding1D(par):
    """Dot-test and inverse for Sliding1D operator
    """
    Op = MatrixMult(np.ones((par['nwiny'], par['ny'])))

    Slid = Sliding1D(Op,
                     dim=par['ny'] * par['winsy'],
                     dimd=par['npy'],
                     nwin=par['nwiny'],
                     nover=par['novery'],
                     tapertype=par['tapertype'])
    assert dottest(Slid, par['npy'], par['ny'] * par['winsy'])
    x = np.ones(par['ny'] * par['winsy'])
    y = Slid * x.flatten()

    xinv = LinearOperator(Slid) / y
    assert_array_almost_equal(x.flatten(), xinv)
Ejemplo n.º 5
0
def test_Patch2D(par):
    """Dot-test and inverse for Patch2D operator
    """
    Op = MatrixMult(
        np.ones((par['nwiny'] * par['nwint'], par['ny'] * par['nt'])))

    Pop = Patch2D(Op,
                  dims=(par['ny'] * par['winsy'], par['nt'] * par['winst']),
                  dimsd=(par['npy'], par['npt']),
                  nwin=(par['nwiny'], par['nwint']),
                  nover=(par['novery'], par['novert']),
                  nop=(par['ny'], par['nt']),
                  tapertype=par['tapertype'])
    assert dottest(Pop, par['npy'] * par['nt'],
                   par['ny'] * par['nt'] * par['winsy'] * par['winst'])
    x = np.ones((par['ny'] * par['winsy'], par['nt'] * par['winst']))
    y = Pop * x.flatten()

    xinv = LinearOperator(Pop) / y
    assert_array_almost_equal(x.flatten(), xinv)
Ejemplo n.º 6
0
def test_Patch2D(par):
    """Dot-test and inverse for Patch2D operator"""
    Op = MatrixMult(
        np.ones((par["nwiny"] * par["nwint"], par["ny"] * par["nt"])))

    Pop = Patch2D(
        Op,
        dims=(par["ny"] * par["winsy"], par["nt"] * par["winst"]),
        dimsd=(par["npy"], par["npt"]),
        nwin=(par["nwiny"], par["nwint"]),
        nover=(par["novery"], par["novert"]),
        nop=(par["ny"], par["nt"]),
        tapertype=par["tapertype"],
    )
    assert dottest(Pop, par["npy"] * par["nt"],
                   par["ny"] * par["nt"] * par["winsy"] * par["winst"])
    x = np.ones((par["ny"] * par["winsy"], par["nt"] * par["winst"]))
    y = Pop * x.ravel()

    xinv = LinearOperator(Pop) / y
    assert_array_almost_equal(x.ravel(), xinv)
Ejemplo n.º 7
0
def FISTA(Op,
          data,
          niter,
          eps=0.1,
          alpha=None,
          eigsiter=None,
          eigstol=0,
          tol=1e-10,
          returninfo=False,
          show=False,
          threshkind='soft',
          perc=None,
          callback=None):
    r"""Fast Iterative Shrinkage-Thresholding Algorithm (FISTA).

    Solve an optimization problem with :math:`L1` regularization function given
    the operator ``Op`` and data ``y``. The operator can be real or complex,
    and should ideally be either square :math:`N=M` or underdetermined
    :math:`N<M`.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    niter : :obj:`int`
        Number of iterations
    eps : :obj:`float`, optional
        Sparsity damping
    alpha : :obj:`float`, optional
        Step size (:math:`\alpha \le 1/\lambda_{max}(\mathbf{Op}^H\mathbf{Op})`
        guarantees convergence. If ``None``, the maximum eigenvalue is
        estimated and the optimal step size is chosen. If provided, the
        condition will not be checked internally).
    eigsiter : :obj:`int`, optional
        Number of iterations for eigenvalue estimation if ``alpha=None``
    eigstol : :obj:`float`, optional
        Tolerance for eigenvalue estimation if ``alpha=None``
    tol : :obj:`float`, optional
        Tolerance. Stop iterations if difference between inverted model
        at subsequent iterations is smaller than ``tol``
    returninfo : :obj:`bool`, optional
        Return info of FISTA solver
    show : :obj:`bool`, optional
        Display iterations log
    threshkind : :obj:`str`, optional
        Kind of thresholding ('hard', 'soft', 'half', 'soft-percentile', or
        'half-percentile' - 'soft' used as default)
    perc : :obj:`float`, optional
        Percentile, as percentage of values to be kept by thresholding (to be
        provided when thresholding is soft-percentile or half-percentile)
    callback : :obj:`callable`, optional
        Function with signature (``callback(x)``) to call after each iteration
        where ``x`` is the current model vector

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    niter : :obj:`int`
        Number of effective iterations
    cost : :obj:`numpy.ndarray`, optional
        History of cost function

    Raises
    ------
    NotImplementedError
        If ``threshkind`` is different from hard, soft, half, soft-percentile,
        or half-percentile
    ValueError
        If ``perc=None`` when ``threshkind`` is soft-percentile or
        half-percentile

    See Also
    --------
    OMP: Orthogonal Matching Pursuit (OMP).
    ISTA: Iterative Shrinkage-Thresholding Algorithm (ISTA).
    SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1).
    SplitBregman: Split Bregman for mixed L2-L1 norms.

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
        J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_2^2 +
            \epsilon ||\mathbf{x}||_p

    using the Fast Iterative Shrinkage-Thresholding Algorithm (FISTA) [1]_,
    where :math:`p=0, 1, 1/2`. This is a modified version of ISTA solver with
    improved convergence properties and limited additional computational cost.
    Similarly to the ISTA solver, the choice of the thresholding algorithm to
    apply at every iteration is based on the choice of :math:`p`.

    .. [1] Beck, A., and Teboulle, M., “A Fast Iterative Shrinkage-Thresholding
       Algorithm for Linear Inverse Problems”, SIAM Journal on
       Imaging Sciences, vol. 2, pp. 183-202. 2009.

    """
    if not threshkind in [
            'hard', 'soft', 'half', 'hard-percentile', 'soft-percentile',
            'half-percentile'
    ]:
        raise NotImplementedError('threshkind should be hard, soft, half,'
                                  'hard-percentile, soft-percentile, '
                                  'or half-percentile')
    if threshkind in ['hard-percentile', 'soft-percentile', 'half-percentile'
                      ] and perc is None:
        raise ValueError('Provide a percentile when choosing hard-percentile,'
                         'soft-percentile, or half-percentile thresholding')

    # choose thresholding function
    if threshkind == 'soft':
        threshf = _softthreshold
    elif threshkind == 'hard':
        threshf = _hardthreshold
    elif threshkind == 'half':
        threshf = _halfthreshold
    elif threshkind == 'hard-percentile':
        threshf = _hardthreshold_percentile
    elif threshkind == 'soft-percentile':
        threshf = _softthreshold_percentile
    else:
        threshf = _halfthreshold_percentile

    if show:
        tstart = time.time()
        print('FISTA optimization (%s thresholding)\n'
              '-----------------------------------------------------------\n'
              'The Operator Op has %d rows and %d cols\n'
              'eps = %10e\ttol = %10e\tniter = %d' %
              (threshkind, Op.shape[0], Op.shape[1], eps, tol, niter))
    # step size
    if alpha is None:
        if not isinstance(Op, LinearOperator):
            Op = LinearOperator(Op, explicit=False)
        # compute largest eigenvalues of Op^H * Op
        Op1 = LinearOperator(Op.H * Op, explicit=False)
        maxeig = np.abs(
            Op1.eigs(neigs=1,
                     symmetric=True,
                     niter=eigsiter,
                     **dict(tol=eigstol, which='LM')))[0]
        alpha = 1. / maxeig

    # define threshold
    thresh = eps * alpha * 0.5

    if show:
        if perc is None:
            print('alpha = %10e\tthresh = %10e' % (alpha, thresh))
        else:
            print('alpha = %10e\tperc = %.1f' % (alpha, perc))
        print('-----------------------------------------------------------\n')
        head1 = '   Itn       x[0]        r2norm     r12norm     xupdate'
        print(head1)

    # initialize model and cost function
    xinv = np.zeros(Op.shape[1], dtype=Op.dtype)
    zinv = xinv.copy()
    t = 1
    if returninfo:
        cost = np.zeros(niter + 1)

    # iterate
    for iiter in range(niter):
        xinvold = xinv.copy()

        # compute residual
        resz = data - Op.matvec(zinv)

        # compute gradient
        grad = alpha * Op.rmatvec(resz)

        # update inverted model
        xinv_unthesh = zinv + grad
        if perc is None:
            xinv = threshf(xinv_unthesh, thresh)
        else:
            xinv = threshf(xinv_unthesh, 100 - perc)

        # update auxiliary coefficients
        told = t
        t = (1. + np.sqrt(1. + 4. * t**2)) / 2.
        zinv = xinv + ((told - 1.) / t) * (xinv - xinvold)

        # model update
        xupdate = np.linalg.norm(xinv - xinvold)

        if returninfo or show:
            costdata = 0.5 * np.linalg.norm(data - Op.matvec(xinv))**2
            costreg = eps * np.linalg.norm(xinv, ord=1)
        if returninfo:
            cost[iiter] = costdata + costreg

        # run callback
        if callback is not None:
            callback(xinv)

        if show:
            if iiter < 10 or niter - iiter < 10 or iiter % 10 == 0:
                msg = '%6g  %12.5e  %10.3e   %9.3e  %10.3e' % \
                      (iiter+1, xinv[0], costdata, costdata+costreg, xupdate)
                print(msg)

        # check tolerance
        if xupdate < tol:
            niter = iiter
            break

    # get values pre-threshold  at locations where xinv is different from zero
    # xinv = np.where(xinv != 0, xinv_unthesh, xinv)

    if show:
        print('\nIterations = %d        Total time (s) = %.2f' %
              (niter, time.time() - tstart))
        print('---------------------------------------------------------\n')
    if returninfo:
        return xinv, niter, cost[:niter]
    else:
        return xinv, niter
Ejemplo n.º 8
0
def ISTA(Op,
         data,
         niter,
         eps=0.1,
         alpha=None,
         eigsiter=None,
         eigstol=0,
         tol=1e-10,
         monitorres=False,
         returninfo=False,
         show=False,
         threshkind='soft',
         perc=None,
         callback=None):
    r"""Iterative Shrinkage-Thresholding Algorithm (ISTA).

    Solve an optimization problem with :math:`Lp, \quad p=0, 1/2, 1`
    regularization, given the operator ``Op`` and data ``y``. The operator
    can be real or complex, and should ideally be either square :math:`N=M`
    or underdetermined :math:`N<M`.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    niter : :obj:`int`
        Number of iterations
    eps : :obj:`float`, optional
        Sparsity damping
    alpha : :obj:`float`, optional
        Step size (:math:`\alpha \le 1/\lambda_{max}(\mathbf{Op}^H\mathbf{Op})`
        guarantees convergence. If ``None``, the maximum eigenvalue is
        estimated and the optimal step size is chosen. If provided, the
        condition will not be checked internally).
    eigsiter : :obj:`float`, optional
        Number of iterations for eigenvalue estimation if ``alpha=None``
    eigstol : :obj:`float`, optional
        Tolerance for eigenvalue estimation if ``alpha=None``
    tol : :obj:`float`, optional
        Tolerance. Stop iterations if difference between inverted model
        at subsequent iterations is smaller than ``tol``
    monitorres : :obj:`bool`, optional
        Monitor that residual is decreasing
    returninfo : :obj:`bool`, optional
        Return info of CG solver
    show : :obj:`bool`, optional
        Display iterations log
    threshkind : :obj:`str`, optional
        Kind of thresholding ('hard', 'soft', 'half', 'hard-percentile',
        'soft-percentile', or 'half-percentile' - 'soft' used as default)
    perc : :obj:`float`, optional
        Percentile, as percentage of values to be kept by thresholding (to be
        provided when thresholding is soft-percentile or half-percentile)
    callback : :obj:`callable`, optional
        Function with signature (``callback(x)``) to call after each iteration
        where ``x`` is the current model vector

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    niter : :obj:`int`
        Number of effective iterations
    cost : :obj:`numpy.ndarray`, optional
        History of cost function

    Raises
    ------
    NotImplementedError
        If ``threshkind`` is different from hard, soft, half, soft-percentile,
        or half-percentile
    ValueError
        If ``perc=None`` when ``threshkind`` is soft-percentile or
        half-percentile
    ValueError
        If ``monitorres=True`` and residual increases

    See Also
    --------
    OMP: Orthogonal Matching Pursuit (OMP).
    FISTA: Fast Iterative Shrinkage-Thresholding Algorithm (FISTA).
    SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1).
    SplitBregman: Split Bregman for mixed L2-L1 norms.

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
        J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_2^2 +
            \epsilon ||\mathbf{x}||_p

    using the Iterative Shrinkage-Thresholding Algorithms (ISTA) [1]_, where
    :math:`p=0, 1, 1/2`. This is a very simple iterative algorithm which
    applies the following step:

    .. math::
        \mathbf{x}^{(i+1)} = T_{(\epsilon \alpha /2, p)} (\mathbf{x}^{(i)} +
        \alpha \mathbf{Op}^H (\mathbf{d} - \mathbf{Op} \mathbf{x}^{(i)}))

    where :math:`\epsilon \alpha /2` is the threshold and :math:`T_{(\tau, p)}`
    is the thresholding rule. The most common variant of ISTA uses the
    so-called soft-thresholding rule :math:`T(\tau, p=1)`. Alternatively an
    hard-thresholding rule is used in the case of `p=0` or a half-thresholding
    rule is used in the case of `p=1/2`. Finally, percentile bases thresholds
    are also implemented: the damping factor is not used anymore an the
    threshold changes at every iteration based on the computed percentile.

    .. [1] Daubechies, I., Defrise, M., and De Mol, C., “An iterative
       thresholding algorithm for linear inverse problems with a sparsity
       constraint”, Communications on pure and applied mathematics, vol. 57,
       pp. 1413-1457. 2004.

    """
    if not threshkind in [
            'hard', 'soft', 'half', 'hard-percentile', 'soft-percentile',
            'half-percentile'
    ]:
        raise NotImplementedError('threshkind should be hard, soft, half,'
                                  'hard-percentile, soft-percentile, '
                                  'or half-percentile')
    if threshkind in ['hard-percentile', 'soft-percentile', 'half-percentile'
                      ] and perc is None:
        raise ValueError('Provide a percentile when choosing hard-percentile,'
                         'soft-percentile, or half-percentile thresholding')

    # choose thresholding function
    if threshkind == 'soft':
        threshf = _softthreshold
    elif threshkind == 'hard':
        threshf = _hardthreshold
    elif threshkind == 'half':
        threshf = _halfthreshold
    elif threshkind == 'hard-percentile':
        threshf = _hardthreshold_percentile
    elif threshkind == 'soft-percentile':
        threshf = _softthreshold_percentile
    else:
        threshf = _halfthreshold_percentile

    if show:
        tstart = time.time()
        print('ISTA optimization (%s thresholding)\n'
              '-----------------------------------------------------------\n'
              'The Operator Op has %d rows and %d cols\n'
              'eps = %10e\ttol = %10e\tniter = %d' %
              (threshkind, Op.shape[0], Op.shape[1], eps, tol, niter))
    # step size
    if alpha is None:
        if not isinstance(Op, LinearOperator):
            Op = LinearOperator(Op, explicit=False)
        # compute largest eigenvalues of Op^H * Op
        Op1 = LinearOperator(Op.H * Op, explicit=False)
        maxeig = np.abs(
            Op1.eigs(neigs=1,
                     symmetric=True,
                     niter=eigsiter,
                     **dict(tol=eigstol, which='LM')))[0]
        alpha = 1. / maxeig

    # define threshold
    thresh = eps * alpha * 0.5

    if show:
        if perc is None:
            print('alpha = %10e\tthresh = %10e' % (alpha, thresh))
        else:
            print('alpha = %10e\tperc = %.1f' % (alpha, perc))
        print('-----------------------------------------------------------\n')
        head1 = '   Itn       x[0]        r2norm     r12norm     xupdate'
        print(head1)

    # initialize model and cost function
    xinv = np.zeros(Op.shape[1], dtype=Op.dtype)
    if monitorres:
        normresold = np.inf
    if returninfo:
        cost = np.zeros(niter + 1)

    # iterate
    for iiter in range(niter):
        xinvold = xinv.copy()

        # compute residual
        res = data - Op.matvec(xinv)
        if monitorres:
            normres = np.linalg.norm(res)
            if normres > normresold:
                raise ValueError('ISTA stopped at iteration %d due to '
                                 'residual increasing, consider modifying '
                                 'eps and/or alpha...' % iiter)
            else:
                normresold = normres

        # compute gradient
        grad = alpha * Op.rmatvec(res)

        # update inverted model
        xinv_unthesh = xinv + grad
        if perc is None:
            xinv = threshf(xinv_unthesh, thresh)
        else:
            xinv = threshf(xinv_unthesh, 100 - perc)

        # model update
        xupdate = np.linalg.norm(xinv - xinvold)

        if returninfo or show:
            costdata = 0.5 * np.linalg.norm(res)**2
            costreg = eps * np.linalg.norm(xinv, ord=1)
        if returninfo:
            cost[iiter] = costdata + costreg

        # run callback
        if callback is not None:
            callback(xinv)

        if show:
            if iiter < 10 or niter - iiter < 10 or iiter % 10 == 0:
                msg = '%6g  %12.5e  %10.3e   %9.3e  %10.3e' % \
                      (iiter+1, xinv[0], costdata, costdata+costreg, xupdate)
                print(msg)

        # check tolerance
        if xupdate < tol:
            logging.warning('update smaller that tolerance for '
                            'iteration %d' % iiter)
            niter = iiter
            break

    # get values pre-threshold at locations where xinv is different from zero
    # xinv = np.where(xinv != 0, xinv_unthesh, xinv)

    if show:
        print('\nIterations = %d        Total time (s) = %.2f' %
              (niter, time.time() - tstart))
        print('---------------------------------------------------------\n')
    if returninfo:
        return xinv, niter, cost[:niter]
    else:
        return xinv, niter
Ejemplo n.º 9
0
def OMP(Op,
        data,
        niter_outer=10,
        niter_inner=40,
        sigma=1e-4,
        normalizecols=False,
        show=False):
    r"""Orthogonal Matching Pursuit (OMP).

    Solve an optimization problem with :math:`L0` regularization function given
    the operator ``Op`` and data ``y``. The operator can be real or complex,
    and should ideally be either square :math:`N=M` or underdetermined
    :math:`N<M`.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    niter_outer : :obj:`int`
        Number of iterations of outer loop
    niter_inner : :obj:`int`
        Number of iterations of inner loop
    sigma : :obj:`list`
        Maximum L2 norm of residual. When smaller stop iterations.
    normalizecols : :obj:`list`
        Normalize columns (``True``) or not (``False``). Note that this can be
        expensive as it requires applying the forward operator
        :math:`n_{cols}` times to unit vectors (i.e., containing 1 at
        position j and zero otherwise); use only when the columns of the
        operator are expected to have highly varying norms.
    show : :obj:`bool`, optional
        Display iterations log

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    iiter : :obj:`int`
        Number of effective outer iterations
    cost : :obj:`numpy.ndarray`, optional
        History of cost function

    See Also
    --------
    ISTA: Iterative Shrinkage-Thresholding Algorithm (ISTA).
    FISTA: Fast Iterative Shrinkage-Thresholding Algorithm (FISTA).
    SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1).
    SplitBregman: Split Bregman for mixed L2-L1 norms.

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
            ||\mathbf{x}||_0 \quad  subj. to \quad
            ||\mathbf{Op}\mathbf{x}-\mathbf{b}||_2 <= \sigma,

    using Orthogonal Matching Pursuit (OMP). This is a very
    simple iterative algorithm which applies the following step:

    .. math::
        \Lambda_k = \Lambda_{k-1} \cup \{ arg max_j
        |\mathbf{Op}_j^H \mathbf{r}_k| \} \\
        \mathbf{x}_k =  \{ arg min_{\mathbf{x}}
        ||\mathbf{Op}_{\Lambda_k} \mathbf{x} - \mathbf{b}||_2

    """
    Op = LinearOperator(Op)
    if show:
        tstart = time.time()
        print(
            'OMP optimization\n'
            '-----------------------------------------------------------------\n'
            'The Operator Op has %d rows and %d cols\n'
            'sigma = %.2e\tniter_outer = %d\tniter_inner = %d\n'
            'normalization=%s' % (Op.shape[0], Op.shape[1], sigma, niter_outer,
                                  niter_inner, normalizecols))
    # find normalization factor for each column
    if normalizecols:
        ncols = Op.shape[1]
        norms = np.zeros(ncols)
        for icol in range(ncols):
            unit = np.zeros(ncols)
            unit[icol] = 1
            norms[icol] = np.linalg.norm(Op.matvec(unit))
    if show:
        print(
            '-----------------------------------------------------------------'
        )
        head1 = '    Itn           r2norm'
        print(head1)

    cols = []
    res = data.copy()
    cost = np.zeros(niter_outer + 1)
    cost[0] = np.linalg.norm(data)
    iiter = 0
    while iiter < niter_outer and cost[iiter] > sigma:
        cres = np.abs(Op.rmatvec(res))
        if normalizecols:
            cres = cres / norms
        # exclude columns already chosen by putting them negative
        if iiter > 0:
            cres[cols] = -1
        # choose column with max cres
        imax = np.argwhere(cres == np.max(cres)).ravel()
        nimax = len(imax)
        if nimax > 0:
            imax = imax[np.random.permutation(nimax)[0]]
        else:
            imax = imax[0]
        cols.append(imax)

        # estimate model for current set of columns
        Opcol = Op.apply_columns(cols)
        x = lsqr(Opcol, data, iter_lim=niter_inner)[0]
        res = data - Opcol.matvec(x)
        iiter += 1
        cost[iiter] = np.linalg.norm(res)
        if show:
            if iiter < 10 or niter_outer - iiter < 10 or iiter % 10 == 0:
                msg = '%6g        %12.5e' % (iiter + 1, cost[iiter])
                print(msg)
    xinv = np.zeros(Op.shape[1], dtype=Op.dtype)
    xinv[cols] = x
    if show:
        print('\nIterations = %d        Total time (s) = %.2f' %
              (iiter, time.time() - tstart))
        print(
            '-----------------------------------------------------------------\n'
        )
    return xinv, iiter, cost
Ejemplo n.º 10
0
def PhaseShift(vel, dz, nt, freq, kx, ky=None, dtype="float64"):
    r"""Phase shift operator

    Apply positive (forward) phase shift with constant velocity in
    forward mode, and negative (backward) phase shift with constant velocity in
    adjoint mode. Input model and data should be 2- or 3-dimensional arrays
    in time-space domain of size :math:`[n_t \times n_x \;(\times n_y)]`.

    Parameters
    ----------
    vel : :obj:`float`, optional
        Constant propagation velocity
    dz : :obj:`float`, optional
        Depth step
    nt : :obj:`int`, optional
        Number of time samples of model and data
    freq : :obj:`numpy.ndarray`
        Positive frequency axis
    kx : :obj:`int`, optional
        Horizontal wavenumber axis (centered around 0) of size
        :math:`[n_x \times 1]`.
    ky : :obj:`int`, optional
        Second horizontal wavenumber axis for 3d phase shift
        (centered around 0) of size :math:`[n_y \times 1]`.
    dtype : :obj:`str`, optional
        Type of elements in input array

    Returns
    -------
    Pop : :obj:`pylops.LinearOperator`
        Phase shift operator

    Notes
    -----
    The phase shift operator implements a one-way wave equation forward
    propagation in frequency-wavenumber domain by applying the following
    transformation to the input model:

    .. math::
        d(f, k_x, k_y) = m(f, k_x, k_y)
        e^{-j \Delta z \sqrt{\omega^2/v^2 - k_x^2 - k_y^2}}

    where :math:`v` is the constant propagation velocity and
    :math:`\Delta z` is the propagation depth. In adjoint mode, the data is
    propagated backward using the following transformation:

    .. math::
        m(f, k_x, k_y) = d(f, k_x, k_y)
        e^{j \Delta z \sqrt{\omega^2/v^2 - k_x^2 - k_y^2}}

    Effectively, the input model and data are assumed to be in time-space
    domain and forward Fourier transform is applied to both dimensions, leading
    to the following operator:

    .. math::
        \mathbf{d} = \mathbf{F}^H_t \mathbf{F}^H_x  \mathbf{P}
            \mathbf{F}_x \mathbf{F}_t \mathbf{m}

    where :math:`\mathbf{P}` perfoms the phase-shift as discussed above.

    """
    dtypefft = (np.ones(1, dtype=dtype) + 1j * np.ones(1, dtype=dtype)).dtype
    if ky is None:
        dims = (nt, kx.size)
        dimsfft = (freq.size, kx.size)
    else:
        dims = (nt, kx.size, ky.size)
        dimsfft = (freq.size, kx.size, ky.size)
    Fop = FFT(dims, dir=0, nfft=nt, real=True, dtype=dtype)
    Kxop = FFT(
        dimsfft, dir=1, nfft=kx.size, real=False, fftshift_after=True, dtype=dtypefft
    )
    if ky is not None:
        Kyop = FFT(
            dimsfft,
            dir=2,
            nfft=ky.size,
            real=False,
            fftshift_after=True,
            dtype=dtypefft,
        )
    Pop = _PhaseShift(vel, dz, freq, kx, ky, dtypefft)
    if ky is None:
        Pop = Fop.H * Kxop * Pop * Kxop.H * Fop
    else:
        Pop = Fop.H * Kxop * Kyop * Pop * Kyop.H * Kxop.H * Fop
    # Recasting of type is required to avoid FFT operators to cast to complex.
    # We know this is correct because forward and inverse FFTs are applied at
    # the beginning and end of this combined operator
    Pop.dtype = dtype
    return LinearOperator(Pop)
Ejemplo n.º 11
0
def Interp(M, iava, dims=None, dir=0, kind='linear', dtype='float64'):
    r"""Interpolation operator.

    Apply interpolation along direction ``dir``
    from regularly sampled input vector into fractionary positions ``iava``
    using one of the following algorithms:

    - *Nearest neighbour* interpolation
      is a thin wrapper around :obj:`pylops.Restriction` at ``np.round(iava)``
      locations.

    - *Linear interpolation* extracts values from input vector
      at locations ``np.floor(iava)`` and ``np.floor(iava)+1`` and linearly
      combines them in forward mode, places weighted versions of the
      interpolated values at locations ``np.floor(iava)`` and
      ``np.floor(iava)+1`` in an otherwise zero vector in adjoint mode.

    - *Sinc interpolation* performs sinc interpolation at locations ``iava``.
      Note that this is the most accurate method but it has higher computational
      cost as it involves multiplying the input data by a matrix of size
      :math:`N \times M`.

    .. note:: The vector ``iava`` should contain unique values. If the same
      index is repeated twice an error will be raised. This also applies when
      values beyond the last element of the input array for
      *linear interpolation* as those values are forced to be just before this
      element.

    Parameters
    ----------
    M : :obj:`int`
        Number of samples in model.
    iava : :obj:`list` or :obj:`numpy.ndarray`
         Floating indices of locations of available samples for interpolation.
    dims : :obj:`list`, optional
        Number of samples for each dimension
        (``None`` if only one dimension is available)
    dir : :obj:`int`, optional
        Direction along which restriction is applied.
    kind : :obj:`str`, optional
        Kind of interpolation (``nearest``, ``linear``, and ``sinc`` are
        currently supported)
    dtype : :obj:`str`, optional
        Type of elements in input array.

    Returns
    -------
    op : :obj:`pylops.LinearOperator`
        Linear intepolation operator
    iava : :obj:`list` or :obj:`numpy.ndarray`
        Corrected indices of locations of available samples
        (samples at ``M-1`` or beyond are forced to be at ``M-1-eps``)

    Raises
    ------
    ValueError
        If the vector ``iava`` contains repeated values.
    NotImplementedError
        If ``kind`` is not ``nearest``, ``linear`` or ``sinc``

    See Also
    --------
    pylops.Restriction : Restriction operator

    Notes
    -----
    *Linear interpolation* of a subset of :math:`N` values at locations
    ``iava`` from an input (or model) vector :math:`\mathbf{x}` of size
    :math:`M` can be expressed as:

    .. math::

        y_i = (1-w_i) x_{l^{l}_i} + w_i x_{l^{r}_i}
        \quad \forall i=1,2,...,N

    where :math:`\mathbf{l^l}=[\lfloor l_1 \rfloor, \lfloor l_2 \rfloor,...,
    \lfloor l_N \rfloor]` and :math:`\mathbf{l^r}=[\lfloor l_1 \rfloor +1,
    \lfloor l_2 \rfloor +1,...,
    \lfloor l_N \rfloor +1]` are vectors containing the indeces
    of the original array at which samples are taken, and
    :math:`\mathbf{w}=[l_1 - \lfloor l_1 \rfloor, l_2 - \lfloor l_2 \rfloor,
    ..., l_N - \lfloor l_N \rfloor]` are the linear interpolation weights.
    This operator can be implemented by simply summing two
    :class:`pylops.Restriction` operators which are weighted
    using :class:`pylops.basicoperators.Diagonal` operators.

    *Sinc interpolation* of a subset of :math:`N` values at locations
    ``iava`` from an input (or model) vector :math:`\mathbf{x}` of size
    :math:`M` can be expressed as:

    .. math::

        y_i = \sum_{j=0}^{M} x_j sinc(i-j) \quad \forall i=1,2,...,N

    This operator can be implemented using the :class:`pylops.MatrixMult`
    operator with a matrix containing the values of the sinc function at all
    :math:`i,j` possible combinations.

    """
    if kind == 'nearest':
        interpop, iava = _nearestinterp(M, iava, dims=dims, dir=dir, dtype=dtype)
    elif kind == 'linear':
        interpop, iava = _linearinterp(M, iava, dims=dims, dir=dir, dtype=dtype)
    elif kind == 'sinc':
        interpop = _sincinterp(M, iava, dims=dims, dir=dir, dtype=dtype)
    else:
        raise NotImplementedError('kind is not correct...')
    return LinearOperator(interpop), iava
Ejemplo n.º 12
0
def OMP(Op, data, niter_outer=10, niter_inner=40, sigma=1e-4,
        normalizecols=False, show=False):
    r"""Orthogonal Matching Pursuit (OMP).

    Solve an optimization problem with :math:`L0` regularization function given
    the operator ``Op`` and data ``y``. The operator can be real or complex,
    and should ideally be either square :math:`N=M` or underdetermined
    :math:`N<M`.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    niter_outer : :obj:`int`, optional
        Number of iterations of outer loop
    niter_inner : :obj:`int`, optional
        Number of iterations of inner loop. By choosing ``niter_inner=0``, the
        Matching Pursuit (MP) algorithm is implemented.
    sigma : :obj:`list`
        Maximum L2 norm of residual. When smaller stop iterations.
    normalizecols : :obj:`list`, optional
        Normalize columns (``True``) or not (``False``). Note that this can be
        expensive as it requires applying the forward operator
        :math:`n_{cols}` times to unit vectors (i.e., containing 1 at
        position j and zero otherwise); use only when the columns of the
        operator are expected to have highly varying norms.
    show : :obj:`bool`, optional
        Display iterations log

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    iiter : :obj:`int`
        Number of effective outer iterations
    cost : :obj:`numpy.ndarray`, optional
        History of cost function

    See Also
    --------
    ISTA: Iterative Shrinkage-Thresholding Algorithm (ISTA).
    FISTA: Fast Iterative Shrinkage-Thresholding Algorithm (FISTA).
    SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1).
    SplitBregman: Split Bregman for mixed L2-L1 norms.

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
            ||\mathbf{x}||_0 \quad  subj. to \quad
            ||\mathbf{Op}\mathbf{x}-\mathbf{b}||_2^2 <= \sigma,

    using Orthogonal Matching Pursuit (OMP). This is a very
    simple iterative algorithm which applies the following step:

    .. math::
        \Lambda_k = \Lambda_{k-1} \cup \{ arg max_j
        |\mathbf{Op}_j^H \mathbf{r}_k| \} \\
        \mathbf{x}_k =  \{ arg min_{\mathbf{x}}
        ||\mathbf{Op}_{\Lambda_k} \mathbf{x} - \mathbf{b}||_2^2

    Note that by choosing ``niter_inner=0`` the basic Matching Pursuit (MP)
    algorithm is implemented instead. In other words, instead of solving an
    optimization at each iteration to find the best :math:`\mathbf{x}` for the
    currently selected basis functions, the vector :math:`\mathbf{x}` is just
    updated at the new basis function by taking directly the value from
    the inner product :math:`\mathbf{Op}_j^H \mathbf{r}_k`.

    In this case it is highly reccomended to provide a normalized basis
    function. If different basis have different norms, the solver is likely
    to diverge. Similar observations apply to OMP, even though mild unbalancing
    between the basis is generally properly handled.

    """
    ncp = get_array_module(data)

    Op = LinearOperator(Op)
    if show:
        tstart = time.time()
        algname = 'OMP optimization\n' if niter_inner > 0 else 'MP optimization\n'
        print(algname +
              '-----------------------------------------------------------------\n'
              'The Operator Op has %d rows and %d cols\n'
              'sigma = %.2e\tniter_outer = %d\tniter_inner = %d\n'
              'normalization=%s' %
              (Op.shape[0], Op.shape[1], sigma, niter_outer,
               niter_inner, normalizecols))
    # find normalization factor for each column
    if normalizecols:
        ncols = Op.shape[1]
        norms = ncp.zeros(ncols)
        for icol in range(ncols):
            unit = ncp.zeros(ncols, dtype=Op.dtype)
            unit[icol] = 1
            norms[icol] = np.linalg.norm(Op.matvec(unit))
    if show:
        print('-----------------------------------------------------------------')
        head1 = '    Itn           r2norm'
        print(head1)

    if niter_inner == 0:
        x = []
    cols = []
    res = data.copy()
    cost = ncp.zeros(niter_outer + 1)
    cost[0] = np.linalg.norm(data)
    iiter = 0
    while iiter < niter_outer and cost[iiter] > sigma:
        # compute inner products
        cres = Op.rmatvec(res)
        cres_abs = np.abs(cres)
        if normalizecols:
            cres_abs = cres_abs / norms
        # choose column with max cres
        cres_max = np.max(cres_abs)
        imax = np.argwhere(cres_abs == cres_max).ravel()
        nimax = len(imax)
        if nimax > 0:
            imax = imax[np.random.permutation(nimax)[0]]
        else:
            imax = imax[0]
        # update active set
        if imax not in cols:
            addnew = True
            cols.append(int(imax))
        else:
            addnew = False
            imax_in_cols = cols.index(imax)

        # estimate model for current set of columns
        if niter_inner == 0:
            # MP update
            Opcol = Op.apply_columns([int(imax), ])
            res -= Opcol.matvec(cres[imax] * ncp.ones(1))
            if addnew:
                x.append(cres[imax])
            else:
                x[imax_in_cols] += cres[imax]
        else:
            # OMP update
            Opcol = Op.apply_columns(cols)
            if ncp == np:
                x = lsqr(Opcol, data, iter_lim=niter_inner)[0]
            else:
                x = cgls(Opcol, data, ncp.zeros(int(Opcol.shape[1]),
                                                dtype=Opcol.dtype),
                         niter=niter_inner)[0]
            res = data - Opcol.matvec(x)
        iiter += 1
        cost[iiter] = np.linalg.norm(res)
        if show:
            if iiter < 10 or niter_outer - iiter < 10 or iiter % 10 == 0:
                msg = '%6g        %12.5e' % (iiter + 1, cost[iiter])
                print(msg)
    xinv = ncp.zeros(int(Op.shape[1]), dtype=Op.dtype)
    xinv[cols] = ncp.array(x)
    if show:
        print('\nIterations = %d        Total time (s) = %.2f'
              % (iiter, time.time() - tstart))
        print(
            '-----------------------------------------------------------------\n')
    return xinv, iiter, cost
Ejemplo n.º 13
0
def FISTA(Op,
          data,
          niter,
          eps=0.1,
          alpha=None,
          eigsiter=None,
          eigstol=0,
          tol=1e-10,
          returninfo=False,
          show=False):
    r"""Fast Iterative Soft Thresholding Algorithm (FISTA).

    Solve an optimization problem with :math:`L1` regularization function given
    the operator ``Op`` and data ``y``. The operator can be real or complex,
    and should ideally be either square :math:`N=M` or underdetermined
    :math:`N<M`.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    niter : :obj:`int`
        Number of iterations
    eps : :obj:`float`, optional
        Sparsity damping
    alpha : :obj:`float`, optional
        Step size (:math:`\alpha \le 1/\lambda_{max}(\mathbf{Op}^H\mathbf{Op})`
        guarantees convergence. If ``None``, estimated to satisfy the
        condition, otherwise the condition will not be checked)
    eigsiter : :obj:`int`, optional
        Number of iterations for eigenvalue estimation if ``alpha=None``
    eigstol : :obj:`float`, optional
        Tolerance for eigenvalue estimation if ``alpha=None``
    tol : :obj:`float`, optional
        Tolerance. Stop iterations if difference between inverted model
        at subsequent iterations is smaller than ``tol``
    returninfo : :obj:`bool`, optional
        Return info of FISTA solver
    show : :obj:`bool`, optional
        Display iterations log

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    niter : :obj:`int`
        Number of effective iterations
    cost : :obj:`numpy.ndarray`, optional
        History of cost function

    See Also
    --------
    OMP: Orthogonal Matching Pursuit (OMP).
    ISTA: Iterative Soft Thresholding Algorithm (FISTA).
    SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1).
    SplitBregman: Split Bregman for mixed L2-L1 norms.

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
        J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_2^2 +
            \epsilon ||\mathbf{x}||_1

    using the Fast Iterative Soft Thresholding Algorithm (FISTA) [1]_. This is
    a modified version of ISTA solver with improved convergence properties and
    limitied additional computational cost.

    .. [1] Beck, A., and Teboulle, M., “A Fast Iterative Shrinkage-Thresholding
       Algorithm for Linear Inverse Problems”, SIAM Journal on
       Imaging Sciences, vol. 2, pp. 183-202. 2009.

    """
    if show:
        tstart = time.time()
        print('FISTA optimization\n'
              '-----------------------------------------------------------\n'
              'The Operator Op has %d rows and %d cols\n'
              'eps = %10e\ttol = %10e\tniter = %d' %
              (Op.shape[0], Op.shape[1], eps, tol, niter))
    # step size
    if alpha is None:
        if not isinstance(Op, LinearOperator):
            Op = LinearOperator(Op, explicit=False)
        # compute largest eigenvalues of Op^H * Op
        Op1 = LinearOperator(Op.H * Op, explicit=False)
        maxeig = np.abs(
            Op1.eigs(neigs=1,
                     symmetric=True,
                     niter=eigsiter,
                     **dict(tol=eigstol, which='LM')))[0]
        alpha = 1. / maxeig

    # define threshold
    thresh = eps * alpha * 0.5

    if show:
        print('alpha = %10e\tthresh = %10e' % (alpha, thresh))
        print('-----------------------------------------------------------\n')
        head1 = '   Itn       x[0]        r2norm     r12norm     xupdate'
        print(head1)

    # initialize model and cost function
    xinv = np.zeros(Op.shape[1], dtype=Op.dtype)
    zinv = xinv.copy()
    t = 1
    if returninfo:
        cost = np.zeros(niter + 1)

    # iterate
    for iiter in range(niter):
        xinvold = xinv.copy()

        # compute residual
        resz = data - Op.matvec(zinv)

        # compute gradient
        grad = alpha * Op.rmatvec(resz)

        # update inverted model
        xinv_unthesh = zinv + grad
        xinv = _softthreshold(xinv_unthesh, thresh)

        # update auxiliary coefficients
        told = t
        t = (1. + np.sqrt(1. + 4. * t**2)) / 2.
        zinv = xinv + ((told - 1.) / t) * (xinv - xinvold)

        # model update
        xupdate = np.linalg.norm(xinv - xinvold)

        if returninfo or show:
            costdata = 0.5 * np.linalg.norm(data - Op.matvec(xinv))**2
            costreg = eps * np.linalg.norm(xinv, ord=1)
        if returninfo:
            cost[iiter] = costdata + costreg

        if show:
            if iiter < 10 or niter - iiter < 10 or iiter % 10 == 0:
                msg = '%6g  %12.5e  %10.3e   %9.3e  %10.3e' % \
                      (iiter+1, xinv[0], costdata, costdata+costreg, xupdate)
                print(msg)

        # check tolerance
        if xupdate < tol:
            niter = iiter
            break

    # get values pre-threshold  at locations where xinv is different from zero
    #xinv = np.where(xinv != 0, xinv_unthesh, xinv)
    if show:
        print('\nIterations = %d        Total time (s) = %.2f' %
              (niter, time.time() - tstart))
        print('---------------------------------------------------------\n')
    if returninfo:
        return xinv, niter, cost[:niter]
    else:
        return xinv, niter
Ejemplo n.º 14
0
def ISTA(Op, data, niter, eps=0.1, alpha=None, eigsiter=None, eigstol=0,
         tol=1e-10, monitorres=False, returninfo=False, show=False):
    r"""Iterative Soft Thresholding Algorithm (ISTA).

    Solve an optimization problem with :math:`L1` regularization function given
    the operator ``Op`` and data ``y``. The operator can be real or complex,
    and should ideally be either square :math:`N=M` or underdetermined
    :math:`N<M`.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    niter : :obj:`int`
        Number of iterations
    eps : :obj:`float`, optional
        Sparsity damping
    alpha : :obj:`float`, optional
        Step size (:math:`\alpha \le 1/\lambda_{max}(\mathbf{Op}^H\mathbf{Op})`
        guarantees convergence. If ``None``, estimated to satisfy the
        condition, otherwise the condition will not be checked)
    eigsiter : :obj:`float`, optional
        Number of iterations for eigenvalue estimation if ``alpha=None``
    eigstol : :obj:`float`, optional
        Tolerance for eigenvalue estimation if ``alpha=None``
    tol : :obj:`float`, optional
        Tolerance. Stop iterations if difference between inverted model
        at subsequent iterations is smaller than ``tol``
    monitorres : :obj:`bool`, optional
        Monitor that residual is decreasing
    returninfo : :obj:`bool`, optional
        Return info of CG solver
    show : :obj:`bool`, optional
        Display iterations log

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    niter : :obj:`int`
        Number of effective iterations
    cost : :obj:`numpy.ndarray`, optional
        History of cost function

    Raises
    ------
    ValueError
        If ``monitorres=True`` and residual increases

    See Also
    --------
    FISTA: Fast Iterative Soft Thresholding Algorithm (FISTA).

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
        J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_2^2 +
            \epsilon ||\mathbf{x}||_1

    using the Iterative Soft Thresholding Algorithm (ISTA) [1]_. This is a very
    simple iterative algorithm which applies the following step:

    .. math::
        \mathbf{x}^{(i+1)} = soft (\mathbf{x}^{(i)} + \alpha \mathbf{Op}^H
        (\mathbf{d} - \mathbf{Op} \mathbf{x}^{(i)})), \epsilon \alpha /2)

    where :math:`\epsilon \alpha /2` is the
    threshold and :math:`soft()` is the so-called soft-thresholding rule.

    .. [1] Beck, A., and Teboulle, M., “A Fast Iterative Shrinkage-Thresholding
       Algorithm for Linear Inverse Problems”, SIAM Journal on
       Imaging Sciences, vol. 2, pp. 183-202. 2009.

    """
    if show:
        tstart = time.time()
        print('ISTA optimization\n'
              '-----------------------------------------------------------\n'
              'The Operator Op has %d rows and %d cols\n'
              'eps = %10e\ttol = %10e\tniter = %d' % (Op.shape[0],
                                                      Op.shape[1],
                                                      eps, tol, niter))
    # step size
    if alpha is None:
        if not isinstance(Op, LinearOperator):
            Op = LinearOperator(Op, explicit=False)
        # compute largest eigenvalues of Op^H * Op
        Op1 = LinearOperator(Op.H * Op, explicit=False)
        maxeig = np.abs(Op1.eigs(neigs=1, symmetric=True, niter=eigsiter,
                                 **dict(tol=eigstol, which='LM')))[0]
        alpha = 1./maxeig

    # define threshold
    thresh = eps*alpha*0.5

    if show:
        print('alpha = %10e\tthresh = %10e' % (alpha, thresh))
        print('-----------------------------------------------------------\n')
        head1 = '   Itn       x[0]        r2norm     r12norm     xupdate'
        print(head1)

    # initialize model and cost function
    xinv = np.zeros(Op.shape[1], dtype=Op.dtype)
    if monitorres:
        normresold = np.inf
    if returninfo:
        cost = np.zeros(niter+1)

    # iterate
    for iiter in range(niter):
        xinvold = xinv.copy()

        # compute residual
        res = data - Op.matvec(xinv)
        if monitorres:
            normres = np.linalg.norm(res)
            if  normres > normresold:
                raise ValueError('ISTA stopped at iteration %d due to '
                                 'residual increasing, consider modyfing '
                                 'eps and/or alpha...' % iiter)
            else:
                normresold = normres

        # compute gradient
        grad = alpha*Op.rmatvec(res)

        # update inverted model
        xinv_unthesh = xinv + grad
        xinv = _softthreshold(xinv_unthesh, thresh)

        # model update
        xupdate = np.linalg.norm(xinv - xinvold)

        if returninfo or show:
            costdata = 0.5 * np.linalg.norm(res) ** 2
            costreg = eps * np.linalg.norm(xinv, ord=1)
        if returninfo:
            cost[iiter] = costdata + costreg

        if show:
            if iiter < 10 or niter - iiter < 10 or iiter % 10 == 0:
                msg = '%6g  %12.5e  %10.3e   %9.3e  %10.3e' % \
                      (iiter+1, xinv[0], costdata, costdata+costreg, xupdate)
                print(msg)

        # check tolerance
        if xupdate < tol:
            niter = iiter
            break

    # get values pre-threshold at locations where xinv is different from zero
    #xinv = np.where(xinv != 0, xinv_unthesh, xinv)
    if show:
        print('\nIterations = %d        Total time (s) = %.2f'
              % (niter, time.time() - tstart))
        print('---------------------------------------------------------\n')
    if returninfo:
        return xinv, niter, cost[:niter]
    else:
        return xinv, niter