예제 #1
0
def test_NormalEquationsInversion(par):
    """Solve normal equations in least squares sense
    """
    np.random.seed(10)
    G = np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32') + \
        par['imag']*np.random.normal(0, 10,
                                     (par['ny'], par['nx'])).astype('float32')
    Gop = MatrixMult(G, dtype=par['dtype'])

    Reg = MatrixMult(np.eye(par['nx']), dtype=par['dtype'])
    Weigth = Diagonal(np.ones(par['ny']), dtype=par['dtype'])
    x = np.ones(par['nx']) + par['imag']*np.ones(par['nx'])
    x0 = np.random.normal(0, 10, par['nx']) + \
         par['imag']*np.random.normal(0, 10, par['nx']) if par['x0'] else None
    y = Gop*x

    # normal equations with regularization
    xinv = NormalEquationsInversion(Gop, [Reg], y, epsI=0,
                                    epsRs=[1e-8], x0=x0,
                                    returninfo=False,
                                    **dict(maxiter=200, tol=1e-10))
    assert_array_almost_equal(x, xinv, decimal=3)
    # normal equations with weight
    xinv = NormalEquationsInversion(Gop, None, y, Weight=Weigth, epsI=0,
                                    x0=x0, returninfo=False,
                                    **dict(maxiter=200, tol=1e-10))
    assert_array_almost_equal(x, xinv, decimal=3)
    # normal equations with weight and small regularization
    xinv = NormalEquationsInversion(Gop, [Reg], y, Weight=Weigth, epsI=0,
                                    epsRs=[1e-8], x0=x0, returninfo=False,
                                    **dict(maxiter=200, tol=1e-10))
    assert_array_almost_equal(x, xinv, decimal=3)
예제 #2
0
파일: sparsity.py 프로젝트: fpicetti/pylops
def _IRLS_data(Op, data, nouter, threshR=False, epsR=1e-10,
               epsI=1e-10, x0=None, tolIRLS=1e-10,
               returnhistory=False, **kwargs_solver):
    r"""Iteratively reweighted least squares with L1 data term
    """
    ncp = get_array_module(data)

    if x0 is not None:
        data = data - Op * x0
    if returnhistory:
        xinv_hist = ncp.zeros((nouter + 1, int(Op.shape[1])))
        rw_hist = ncp.zeros((nouter + 1, int(Op.shape[0])))

    # first iteration (unweighted least-squares)
    xinv = NormalEquationsInversion(Op, None, data, epsI=epsI,
                                    returninfo=False,
                                    **kwargs_solver)
    r = data - Op * xinv
    if returnhistory:
        xinv_hist[0] = xinv
    for iiter in range(nouter):
        # other iterations (weighted least-squares)
        xinvold = xinv.copy()
        if threshR:
            rw = 1. / ncp.maximum(ncp.abs(r), epsR)
        else:
            rw = 1. / (ncp.abs(r) + epsR)
        rw = rw / rw.max()
        R = Diagonal(rw)
        xinv = NormalEquationsInversion(Op, [], data, Weight=R,
                                        epsI=epsI,
                                        returninfo=False,
                                        **kwargs_solver)
        r = data - Op * xinv
        # save history
        if returnhistory:
            rw_hist[iiter] = rw
            xinv_hist[iiter + 1] = xinv
        # check tolerance
        if ncp.linalg.norm(xinv - xinvold) < tolIRLS:
            nouter = iiter
            break

    # adding initial guess
    if x0 is not None:
        xinv = x0 + xinv
        if returnhistory:
            xinv_hist = x0 + xinv_hist

    if returnhistory:
        return xinv, nouter, xinv_hist[:nouter + 1], rw_hist[:nouter + 1]
    else:
        return xinv, nouter
예제 #3
0
 def __call__(self, x: Union[Number,
                             np.ndarray]) -> Union[Number, np.ndarray]:
     return NormalEquationsInversion(Op=self.LinOp.PyLop,
                                     Regs=None,
                                     data=x,
                                     epsI=self.eps,
                                     returninfo=False)
예제 #4
0
    def pinv(self,
             y: Union[Number, np.ndarray],
             eps: Number = 0,
             **kwargs) -> Union[Number, np.ndarray]:
        r"""
        Evaluate the pseudo-inverse of the operator at ``y``.

        Parameters
        ----------
        y: Union[Number, np.ndarray]
            Point at which the pseudo-inverse is evaluated.
        eps: Number
            Tikhonov damping.
        kwargs:
            Arbitrary keyword arguments accepted by the function: :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion`.

        Returns
        -------
        numpy.ndarray
            Evaluation of the pseudo-inverse of the operator at ``y``.

        Notes
        -----
        This is a wrapper around the function :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion`. Additional
        information can be found in the help of this function.
        """
        return NormalEquationsInversion(Op=self.PyLop,
                                        Regs=None,
                                        data=y,
                                        epsI=eps,
                                        **kwargs,
                                        returninfo=False)
예제 #5
0
def test_WeightedInversion(par):
    """Compare results for normal equations and regularized inversion
    when used to solve weighted least square inversion
    """
    np.random.seed(10)
    G = np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32') + \
        par['imag'] * np.random.normal(0, 10, (par['ny'], par['nx'])).astype(
            'float32')
    Gop = MatrixMult(G, dtype=par['dtype'])
    w = np.arange(par['ny'])
    w1 = np.sqrt(w)
    Weigth = Diagonal(w, dtype=par['dtype'])
    Weigth1 = Diagonal(w1, dtype=par['dtype'])
    x = np.ones(par['nx']) + par['imag'] * np.ones(par['nx'])
    y = Gop * x

    xne = NormalEquationsInversion(Gop, None, y, Weight=Weigth,
                                   returninfo=False,
                                   **dict(maxiter=5, tol=1e-10))
    xreg = RegularizedInversion(Gop, None, y, Weight=Weigth1,
                                returninfo=False,
                                **dict(damp=0, iter_lim=5, show=0))
    print(xne)
    print(xreg)
    assert_array_almost_equal(xne, xreg, decimal=3)
def test_skinnyregularization(par):
    """Solve inversion with a skinny regularization (rows are smaller than
    the number of elements in the model vector)
    """
    np.random.seed(10)
    d = np.arange(par['nx'] - 1).astype(par['dtype']) + 1.
    Dop = Diagonal(d, dtype=par['dtype'])
    Regop = HStack([Identity(par['nx'] // 2), Identity(par['nx'] // 2)])

    x = np.arange(par['nx'] - 1)
    y = Dop * x

    xinv = NormalEquationsInversion(Dop, [
        Regop,
    ], y, epsRs=[
        1e-4,
    ])
    assert_array_almost_equal(x, xinv, decimal=2)

    xinv = RegularizedInversion(Dop, [
        Regop,
    ], y, epsRs=[
        1e-4,
    ])
    assert_array_almost_equal(x, xinv, decimal=2)
예제 #7
0
def IRLS(Op,
         data,
         nouter,
         threshR=False,
         epsR=1e-10,
         epsI=1e-10,
         x0=None,
         tolIRLS=1e-10,
         returnhistory=False,
         **kwargs_cg):
    r"""Iteratively reweighted least squares.

    Solve an optimization problem with :math:`L1` cost function given the
    operator ``Op`` and data ``y``. The cost function is minimized by
    iteratively solving a weighted least squares problem with the weight at
    iteration :math:`i` being based on the data residual at iteration
    :math:`i+1`.

    The IRLS solver is robust to *outliers* since the L1 norm given less
    weight to large residuals than L2 norm does.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    nouter : :obj:`int`
        Number of outer iterations
    threshR : :obj:`bool`, optional
        Apply thresholding in creation of weight (``True``)
        or damping (``False``)
    epsR : :obj:`float`, optional
        Damping to be applied to residuals for weighting term
    espI : :obj:`float`, optional
        Tikhonov damping
    x0 : :obj:`numpy.ndarray`, optional
        Initial guess
    tolIRLS : :obj:`float`, optional
        Tolerance. Stop outer iterations if difference between inverted model
        at subsequent iterations is smaller than ``tolIRLS``
    returnhistory : :obj:`bool`, optional
        Return history of inverted model for each outer iteration of IRLS
    **kwargs_cg
        Arbitrary keyword arguments for
        :py:func:`scipy.sparse.linalg.cg` solver

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    nouter : :obj:`int`
        Number of effective outer iterations
    xinv_hist : :obj:`numpy.ndarray`, optional
        History of inverted model
    rw_hist : :obj:`numpy.ndarray`, optional
        History of weights

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
        J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_1

    by a set of outer iterations which require to repeateadly solve a
    weighted least squares problem of the form:

    .. math::
        \mathbf{x}^{(i+1)} = \operatorname*{arg\,min}_\mathbf{x} ||\mathbf{d} -
        \mathbf{Op} \mathbf{x}||_{2, \mathbf{R}^{(i)}} +
        \epsilon_I^2 ||\mathbf{x}||

    where :math:`\mathbf{R}^{(i)}` is a diagonal weight matrix
    whose diagonal elements at iteration :math:`i` are equal to the absolute
    inverses of the residual vector :math:`\mathbf{r}^{(i)} =
    \mathbf{y} - \mathbf{Op} \mathbf{x}^{(i)}` at iteration :math:`i`.
    More specifically the j-th element of the diagonal of
    :math:`\mathbf{R}^{(i)}` is

    .. math::
        R^{(i)}_{j,j} = \frac{1}{|r^{(i)}_j|+\epsilon_R}

    or

    .. math::
        R^{(i)}_{j,j} = \frac{1}{max(|r^{(i)}_j|, \epsilon_R)}

    depending on the choice ``threshR``. In either case,
    :math:`\epsilon_R` is the user-defined stabilization/thresholding
    factor [1]_.

    .. [1] https://en.wikipedia.org/wiki/Iteratively_reweighted_least_squares

    """
    if x0 is not None:
        data = data - Op * x0
    if returnhistory:
        xinv_hist = np.zeros((nouter + 1, Op.shape[1]))
        rw_hist = np.zeros((nouter + 1, Op.shape[0]))

    # first iteration (unweighted least-squares)
    xinv = NormalEquationsInversion(Op,
                                    None,
                                    data,
                                    epsI=epsI,
                                    returninfo=False,
                                    **kwargs_cg)
    r = data - Op * xinv
    if returnhistory:
        xinv_hist[0] = xinv
    for iiter in range(nouter):
        # other iterations (weighted least-squares)
        xinvold = xinv.copy()
        if threshR:
            rw = 1. / np.maximum(np.abs(r), epsR)
        else:
            rw = 1. / (np.abs(r) + epsR)
        rw = rw / rw.max()
        R = Diagonal(rw)
        xinv = NormalEquationsInversion(Op, [],
                                        data,
                                        Weight=R,
                                        epsI=epsI,
                                        returninfo=False,
                                        **kwargs_cg)
        r = data - Op * xinv
        # save history
        if returnhistory:
            rw_hist[iiter] = rw
            xinv_hist[iiter + 1] = xinv
        # check tolerance
        if np.linalg.norm(xinv - xinvold) < tolIRLS:
            nouter = iiter
            break

    # adding initial guess
    if x0 is not None:
        xinv = x0 + xinv
        if returnhistory:
            xinv_hist = x0 + xinv_hist

    if returnhistory:
        return xinv, nouter, xinv_hist[:nouter + 1], rw_hist[:nouter + 1]
    else:
        return xinv, nouter