Exemplo n.º 1
0
def FirstDirectionalDerivative(dims, v, sampling=1, edge=False,
                               dtype='float64'):
    r"""First Directional derivative.

    Apply directional derivative operator to a multi-dimensional
    array (at least 2 dimensions are required) along either a single common
    direction or different directions for each point of the array.

    Parameters
    ----------
    dims : :obj:`tuple`
        Number of samples for each dimension.
    v : :obj:`np.ndarray`, optional
        Single direction (array of size :math:`n_{dims}`) or group of directions
        (array of size :math:`[n_{dims} \times n_{d0} \times ... \times n_{n_{dims}}`)
    sampling : :obj:`tuple`, optional
        Sampling steps for each direction.
    edge : :obj:`bool`, optional
        Use reduced order derivative at edges (``True``) or
        ignore them (``False``).
    dtype : :obj:`str`, optional
        Type of elements in input array.

    Returns
    -------
    ddop : :obj:`pylops.LinearOperator`
        First directional derivative linear operator

    Notes
    -----
    The FirstDirectionalDerivative applies a first-order derivative
    to a multi-dimensional array along the direction defined by the unitary
    vector \mathbf{v}:

    .. math::
        df_\mathbf{v} =
            \nabla f \mathbf{v}

    or along the directions defined by the unitary vectors
    :math:`\mathbf{v}(x, y)`:

    .. math::
        df_\mathbf{v}(x,y) =
            \nabla f(x,y) \mathbf{v}(x,y)

    where we have here considered the 2-dimensional case.

    This operator can be easily implemented as the concatenation of the
    :py:class:`pylops.Gradient` operator and the :py:class:`pylops.Diagonal`
    operator with :math:\mathbf{v} along the main diagonal.

    """
    Gop = Gradient(dims, sampling=sampling, edge=edge, dtype=dtype)
    if v.ndim == 1:
        Dop = Diagonal(v, dims=[len(dims)]+list(dims), dir=0, dtype=dtype)
    else:
        Dop = Diagonal(v.ravel(), dtype=dtype)
    Sop = Sum(dims=[len(dims)]+list(dims), dir=0, dtype=dtype)
    ddop = Sop * Dop * Gop
    return ddop
Exemplo n.º 2
0
def _linearinterp(M, iava, dims=None, dir=0, dtype='float64'):
    """Linear interpolation.
    """
    # ensure that samples are not beyond the last sample, in that case set to
    # penultimate sample and raise a warning
    if np.issubdtype(iava.dtype, np.integer):
        iava = iava.astype(np.float)
    if dims is None:
        lastsample = M
        dimsd = None
    else:
        lastsample = dims[dir]
        dimsd = list(dims)
        dimsd[dir] = len(iava)
        dimsd = tuple(dimsd)

    outside = (iava >= lastsample - 1)
    if sum(outside) > 0:
        logging.warning('at least one value is beyond penultimate sample, '
                        'forced to be at penultimate sample')
    iava[outside] = lastsample - 1 - 1e-10
    _checkunique(iava)

    # find indices and weights
    iva_l = np.floor(iava).astype(np.int)
    iva_r = iva_l + 1
    weights = iava - iva_l

    # create operators
    op = Diagonal(1 - weights, dims=dimsd, dir=dir, dtype=dtype) * \
         Restriction(M, iva_l, dims=dims, dir=dir, dtype=dtype) + \
         Diagonal(weights, dims=dimsd, dir=dir, dtype=dtype) * \
         Restriction(M, iva_r, dims=dims, dir=dir, dtype=dtype)
    return op, iava
Exemplo n.º 3
0
def test_WeightedInversion(par):
    """Compare results for normal equations and regularized inversion
    when used to solve weighted least square inversion
    """
    np.random.seed(10)
    G = np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32') + \
        par['imag'] * np.random.normal(0, 10, (par['ny'], par['nx'])).astype(
            'float32')
    Gop = MatrixMult(G, dtype=par['dtype'])
    w = np.arange(par['ny'])
    w1 = np.sqrt(w)
    Weigth = Diagonal(w, dtype=par['dtype'])
    Weigth1 = Diagonal(w1, dtype=par['dtype'])
    x = np.ones(par['nx']) + par['imag'] * np.ones(par['nx'])
    y = Gop * x

    xne = NormalEquationsInversion(Gop, None, y, Weight=Weigth,
                                   returninfo=False,
                                   **dict(maxiter=5, tol=1e-10))
    xreg = RegularizedInversion(Gop, None, y, Weight=Weigth1,
                                returninfo=False,
                                **dict(damp=0, iter_lim=5, show=0))
    print(xne)
    print(xreg)
    assert_array_almost_equal(xne, xreg, decimal=3)
Exemplo n.º 4
0
def test_describe():
    """Testing the describe method. As it is is difficult to verify that the
    output is correct, at this point we merely test that no error arises when
    applying this method to a variety of operators
    """
    A = MatrixMult(np.ones((10, 5)))
    A.name = "A"
    B = Diagonal(np.ones(5))
    B.name = "A"
    C = MatrixMult(np.ones((10, 5)))
    C.name = "C"

    AT = A.T
    AH = A.H
    A3 = 3 * A
    D = A + C
    E = D * B
    F = (A + C) * B + A
    G = HStack((A * B, C * B))
    H = BlockDiag((F, G))

    describe(A)
    describe(AT)
    describe(AH)
    describe(A3)
    describe(D)
    describe(E)
    describe(F)
    describe(G)
    describe(H)
Exemplo n.º 5
0
def test_dense(par):
    """Dense matrix representation of square matrix
    """
    diag = np.arange(par['nx']) + par['imag'] * np.arange(par['nx'])
    D = np.diag(diag)
    Dop = Diagonal(diag, dtype=par['dtype'])
    assert_array_equal(Dop.todense(), D)
Exemplo n.º 6
0
def test_sparse(par):
    """Sparse matrix representation"""
    diag = np.arange(par["nx"]) + par["imag"] * np.arange(par["nx"])
    D = np.diag(diag)
    Dop = Diagonal(diag, dtype=par["dtype"])
    S = Dop.tosparse()
    assert_array_equal(S.A, D)
Exemplo n.º 7
0
def focusing_wrapper(direct,toff,g0VS,iava,Rop,R1op,Restrop,t):
    nr=direct.shape[0]
    nsava=iava.shape[0]
    
    nt=t.shape[0]
    dt=t[1]-t[0]
    
    # window
    directVS_off = direct - toff
    idirectVS_off = np.round(directVS_off/dt).astype(np.int)
    w = np.zeros((nr, nt))
    wi = np.ones((nr, nt))
    for ir in range(nr-1):
        w[ir, :idirectVS_off[ir]]=1   
    wi = wi - w
         
    w = np.hstack((np.fliplr(w), w[:, 1:]))
    wi = np.hstack((np.fliplr(wi), wi[:, 1:]))
    
    # smoothing
    nsmooth=10
    if nsmooth>0:
        smooth=np.ones(nsmooth)/nsmooth
        w  = filtfilt(smooth, 1, w)
        wi  = filtfilt(smooth, 1, wi)
        
    # Input focusing function
    fd_plus =  np.concatenate((np.fliplr(g0VS.T), np.zeros((nr, nt-1))), axis=-1)
    
    # operators
    Wop = Diagonal(w.flatten())
    WSop = Diagonal(w[iava].flatten())
    WiSop = Diagonal(wi[iava].flatten())
    
    Mop = VStack([HStack([Restrop, -1*WSop*Rop]),
                   HStack([-1*WSop*R1op, Restrop])])*BlockDiag([Wop, Wop])
    
    Gop = VStack([HStack([Restrop, -1*Rop]),
                   HStack([-1*R1op, Restrop])])
    
    p0_minus = Rop*fd_plus.flatten()
    d = WSop*p0_minus
    
    p0_minus = p0_minus.reshape(nsava, 2*nt-1)
    d = np.concatenate((d.reshape(nsava, 2*nt-1), np.zeros((nsava, 2*nt-1))))
    
    # solve
    f1 = lsqr(Mop, d.flatten(), iter_lim=10, show=False)[0]
    f1 = f1.reshape(2*nr, (2*nt-1))
    f1_tot = f1 + np.concatenate((np.zeros((nr, 2*nt-1)), fd_plus))
    
    g = BlockDiag([WiSop,WiSop])*Gop*f1_tot.flatten()
    g = g.reshape(2*nsava, (2*nt-1))
    
    f1_minus, f1_plus =  f1_tot[:nr], f1_tot[nr:]
    g_minus, g_plus =  -g[:nsava], np.fliplr(g[nsava:])

    return f1_minus, f1_plus, g_minus, g_plus, p0_minus
Exemplo n.º 8
0
def test_sparse(par):
    """Sparse matrix representation
    """
    diag = np.arange(par['nx']) +\
           par['imag'] * np.arange(par['nx'])
    D = np.diag(diag)
    Dop = Diagonal(diag, dtype=par['dtype'])
    S = Dop.tosparse()
    assert_array_equal(S.A, D)
def test_skinnyregularization(par):
    """Solve inversion with a skinny regularization (rows are smaller than
    the number of elements in the model vector)
    """
    np.random.seed(10)
    d = np.arange(par['nx'] - 1).astype(par['dtype']) + 1.
    Dop = Diagonal(d, dtype=par['dtype'])
    Regop = HStack([Identity(par['nx'] // 2), Identity(par['nx'] // 2)])

    x = np.arange(par['nx'] - 1)
    y = Dop * x

    xinv = NormalEquationsInversion(Dop, [
        Regop,
    ], y, epsRs=[
        1e-4,
    ])
    assert_array_almost_equal(x, xinv, decimal=2)

    xinv = RegularizedInversion(Dop, [
        Regop,
    ], y, epsRs=[
        1e-4,
    ])
    assert_array_almost_equal(x, xinv, decimal=2)
def test_Diagonal_3dsignal(par):
    """Dot-test and inversion for Diagonal operator for 3d signal
    """
    for idim, ddim in enumerate((par['ny'], par['nx'], par['nt'])):
        d = da.arange(ddim, chunks=ddim // 2) + 1. +\
            par['imag'] * (da.arange(ddim, chunks=ddim // 2) + 1.)

        dDop = dDiagonal(d,
                         dims=(par['ny'], par['nx'], par['nt']),
                         dir=idim,
                         compute=(True, True),
                         dtype=par['dtype'])
        assert dottest(dDop,
                       par['ny'] * par['nx'] * par['nt'],
                       par['ny'] * par['nx'] * par['nt'],
                       chunks=(par['ny'] * par['nx'] * par['nt'] // 4,
                               par['ny'] * par['nx'] * par['nt'] // 4),
                       complexflag=0 if par['imag'] == 0 else 3)

        x = da.ones((par['ny'], par['nx'], par['nt']),
                    chunks=(par['ny'] * par['nx'] * par['nt'] // 4)) + \
            par['imag']*da.ones((par['ny'], par['nx'], par['nt']),
                                chunks=(par['ny'] * par['nx'] * par['nt'] // 4))
        Dop = Diagonal(d.compute(),
                       dims=(par['ny'], par['nx'], par['nt']),
                       dir=idim,
                       dtype=par['dtype'])
        dy = dDop * x.flatten()
        y = Dop * x.compute().flatten()
        assert_array_almost_equal(dy, y, decimal=5)
Exemplo n.º 11
0
def test_NormalEquationsInversion(par):
    """Solve normal equations in least squares sense
    """
    np.random.seed(10)
    G = np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32') + \
        par['imag']*np.random.normal(0, 10,
                                     (par['ny'], par['nx'])).astype('float32')
    Gop = MatrixMult(G, dtype=par['dtype'])

    Reg = MatrixMult(np.eye(par['nx']), dtype=par['dtype'])
    Weigth = Diagonal(np.ones(par['ny']), dtype=par['dtype'])
    x = np.ones(par['nx']) + par['imag']*np.ones(par['nx'])
    x0 = np.random.normal(0, 10, par['nx']) + \
         par['imag']*np.random.normal(0, 10, par['nx']) if par['x0'] else None
    y = Gop*x

    # normal equations with regularization
    xinv = NormalEquationsInversion(Gop, [Reg], y, epsI=0,
                                    epsRs=[1e-8], x0=x0,
                                    returninfo=False,
                                    **dict(maxiter=200, tol=1e-10))
    assert_array_almost_equal(x, xinv, decimal=3)
    # normal equations with weight
    xinv = NormalEquationsInversion(Gop, None, y, Weight=Weigth, epsI=0,
                                    x0=x0, returninfo=False,
                                    **dict(maxiter=200, tol=1e-10))
    assert_array_almost_equal(x, xinv, decimal=3)
    # normal equations with weight and small regularization
    xinv = NormalEquationsInversion(Gop, [Reg], y, Weight=Weigth, epsI=0,
                                    epsRs=[1e-8], x0=x0, returninfo=False,
                                    **dict(maxiter=200, tol=1e-10))
    assert_array_almost_equal(x, xinv, decimal=3)
Exemplo n.º 12
0
def test_eigs(par):
    """Eigenvalues and condition number estimate with ARPACK"""
    # explicit=True
    diag = np.arange(par["nx"], 0,
                     -1) + par["imag"] * np.arange(par["nx"], 0, -1)
    Op = MatrixMult(
        np.vstack((np.diag(diag), np.zeros(
            (par["ny"] - par["nx"], par["nx"])))))
    eigs = Op.eigs()
    assert_array_almost_equal(diag[:eigs.size], eigs, decimal=3)

    cond = Op.cond()
    assert_array_almost_equal(np.real(cond), par["nx"], decimal=3)

    # explicit=False
    Op = Diagonal(diag, dtype=par["dtype"])
    if par["ny"] > par["nx"]:
        Op = VStack([Op, Zero(par["ny"] - par["nx"], par["nx"])])
    eigs = Op.eigs()
    assert_array_almost_equal(diag[:eigs.size], eigs, decimal=3)

    # uselobpcg cannot be used for square non-symmetric complex matrices
    if np.iscomplex(Op):
        eigs1 = Op.eigs(uselobpcg=True)
        assert_array_almost_equal(eigs, eigs1, decimal=3)

    cond = Op.cond()
    assert_array_almost_equal(np.real(cond), par["nx"], decimal=3)

    if np.iscomplex(Op):
        cond1 = Op.cond(uselobpcg=True, niter=100)
        assert_array_almost_equal(np.real(cond), np.real(cond1), decimal=3)
Exemplo n.º 13
0
def test_RegularizedInversion(par):
    """Solve regularized inversion in least squares sense
    """
    np.random.seed(10)
    G = np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32') + \
        par['imag']*np.random.normal(0, 10,
                                     (par['ny'], par['nx'])).astype('float32')
    Gop = MatrixMult(G, dtype=par['dtype'])
    Reg = MatrixMult(np.eye(par['nx']), dtype=par['dtype'])
    Weigth = Diagonal(np.ones(par['ny']), dtype=par['dtype'])
    x = np.ones(par['nx']) + par['imag']*np.ones(par['nx'])
    x0 = np.random.normal(0, 10, par['nx']) + \
         par['imag']*np.random.normal(0, 10, par['nx']) if par['x0'] else None
    y = Gop*x

    # regularized inversion with regularization
    xinv = RegularizedInversion(Gop, [Reg], y, epsRs=[1e-8], x0=x0,
                                returninfo=False,
                                **dict(damp=0, iter_lim=200, show=0))
    assert_array_almost_equal(x, xinv, decimal=3)
    # regularized inversion with weight
    xinv = RegularizedInversion(Gop, None, y, Weight=Weigth,
                                x0=x0,
                                returninfo=False,
                                **dict(damp=0, iter_lim=200, show=0))
    assert_array_almost_equal(x, xinv, decimal=3)
    # regularized inversion with regularization
    xinv = RegularizedInversion(Gop, [Reg], y, Weight=Weigth,
                                epsRs=[1e-8], x0=x0,
                                returninfo=False,
                                **dict(damp=0, iter_lim=200, show=0))
    assert_array_almost_equal(x, xinv, decimal=3)
Exemplo n.º 14
0
def _IRLS_model(Op, data, nouter, threshR=False, epsR=1e-10,
                epsI=1e-10, x0=None, tolIRLS=1e-10,
                returnhistory=False, **kwargs_solver):
    r"""Iteratively reweighted least squares with L1 model term
    """
    ncp = get_array_module(data)

    if x0 is not None:
        data = data - Op * x0
    if returnhistory:
        xinv_hist = ncp.zeros((nouter + 1, int(Op.shape[1])))
        rw_hist = ncp.zeros((nouter + 1, int(Op.shape[0])))

    Iop = Identity(data.size, dtype=data.dtype)
    # first iteration (unweighted least-squares)
    if ncp == np:
        xinv = Op.H @ \
               lsqr(Op @ Op.H + (epsI ** 2) * Iop, data, **kwargs_solver)[0]
    else:
        xinv = Op.H @ cgls(Op @ Op.H + (epsI ** 2) * Iop, data,
                           ncp.zeros(int(Op.shape[0]), dtype=Op.dtype),
                           **kwargs_solver)[0]
    if returnhistory:
        xinv_hist[0] = xinv
    for iiter in range(nouter):
        # other iterations (weighted least-squares)
        xinvold = xinv.copy()
        rw = np.abs(xinv)
        rw = rw / rw.max()
        R = Diagonal(rw, dtype=rw.dtype)
        if ncp == np:
            xinv = R @ Op.H @ lsqr(Op @ R @ Op.H + epsI ** 2 * Iop,
                                   data, **kwargs_solver)[0]
        else:
            xinv = R @ Op.H @ cgls(Op @ R @ Op.H + epsI ** 2 * Iop,
                                   data,
                                   ncp.zeros(int(Op.shape[0]), dtype=Op.dtype),
                                   **kwargs_solver)[0]
        # save history
        if returnhistory:
            rw_hist[iiter] = rw
            xinv_hist[iiter + 1] = xinv
        # check tolerance
        if np.linalg.norm(xinv - xinvold) < tolIRLS:
            nouter = iiter
            break

    # adding initial guess
    if x0 is not None:
        xinv = x0 + xinv
        if returnhistory:
            xinv_hist = x0 + xinv_hist

    if returnhistory:
        return xinv, nouter, xinv_hist[:nouter + 1], rw_hist[:nouter + 1]
    else:
        return xinv, nouter
Exemplo n.º 15
0
def test_dense_skinny(par):
    """Dense matrix representation of skinny matrix"""
    diag = np.arange(par["nx"]) + par["imag"] * np.arange(par["nx"])
    D = np.diag(diag)
    Dop = Diagonal(diag, dtype=par["dtype"])
    Zop = Zero(par["nx"], 3, dtype=par["dtype"])
    Op = HStack([Dop, Zop])
    O = np.hstack((D, np.zeros((par["nx"], 3))))
    assert_array_equal(Op.todense(), O)
Exemplo n.º 16
0
def _IRLS_data(Op, data, nouter, threshR=False, epsR=1e-10,
               epsI=1e-10, x0=None, tolIRLS=1e-10,
               returnhistory=False, **kwargs_solver):
    r"""Iteratively reweighted least squares with L1 data term
    """
    ncp = get_array_module(data)

    if x0 is not None:
        data = data - Op * x0
    if returnhistory:
        xinv_hist = ncp.zeros((nouter + 1, int(Op.shape[1])))
        rw_hist = ncp.zeros((nouter + 1, int(Op.shape[0])))

    # first iteration (unweighted least-squares)
    xinv = NormalEquationsInversion(Op, None, data, epsI=epsI,
                                    returninfo=False,
                                    **kwargs_solver)
    r = data - Op * xinv
    if returnhistory:
        xinv_hist[0] = xinv
    for iiter in range(nouter):
        # other iterations (weighted least-squares)
        xinvold = xinv.copy()
        if threshR:
            rw = 1. / ncp.maximum(ncp.abs(r), epsR)
        else:
            rw = 1. / (ncp.abs(r) + epsR)
        rw = rw / rw.max()
        R = Diagonal(rw)
        xinv = NormalEquationsInversion(Op, [], data, Weight=R,
                                        epsI=epsI,
                                        returninfo=False,
                                        **kwargs_solver)
        r = data - Op * xinv
        # save history
        if returnhistory:
            rw_hist[iiter] = rw
            xinv_hist[iiter + 1] = xinv
        # check tolerance
        if ncp.linalg.norm(xinv - xinvold) < tolIRLS:
            nouter = iiter
            break

    # adding initial guess
    if x0 is not None:
        xinv = x0 + xinv
        if returnhistory:
            xinv_hist = x0 + xinv_hist

    if returnhistory:
        return xinv, nouter, xinv_hist[:nouter + 1], rw_hist[:nouter + 1]
    else:
        return xinv, nouter
Exemplo n.º 17
0
def test_Diagonal(par):
    """Dot-test and inversion for Diagonal operator
    """
    d = np.arange(par['nx']) + 1.

    Dop = Diagonal(d, dtype=par['dtype'])
    assert dottest(Dop, par['nx'], par['nx'], complexflag=0 if par['imag'] == 0 else 3)

    x = np.ones(par['nx']) + par['imag']*np.ones(par['nx'])
    xlsqr = lsqr(Dop, Dop * x, damp=1e-20, iter_lim=300, show=0)[0]

    assert_array_almost_equal(x, xlsqr, decimal=4)
Exemplo n.º 18
0
def test_Diagonal_1dsignal(par):
    """Dot-test and inversion for Diagonal operator for 1d signal"""
    for ddim in (par["nx"], par["nt"]):
        d = np.arange(ddim) + 1.0 + par["imag"] * (np.arange(ddim) + 1.0)

        Dop = Diagonal(d, dtype=par["dtype"])
        assert dottest(Dop,
                       ddim,
                       ddim,
                       complexflag=0 if par["imag"] == 0 else 3)

        x = np.ones(ddim) + par["imag"] * np.ones(ddim)
        xlsqr = lsqr(Dop, Dop * x, damp=1e-20, iter_lim=300, show=0)[0]

        assert_array_almost_equal(x, xlsqr, decimal=4)
Exemplo n.º 19
0
def test_scaled(par):
    """Verify that _ScaledLinearOperator produces the correct type based
    on its inputs types
    """
    dtypes = [np.float32, np.float64]
    for dtype in dtypes:
        diag = np.arange(par['nx'], dtype=dtype) + \
               par['imag'] * np.arange(par['nx'], dtype=dtype)
        Dop = Diagonal(diag, dtype=dtype)
        Sop = 3. * Dop
        S1op = -3. * Dop
        S2op = Dop * 3.
        S3op = Dop * -3.
        assert Sop.dtype == dtype
        assert S1op.dtype == dtype
        assert S2op.dtype == dtype
        assert S3op.dtype == dtype
Exemplo n.º 20
0
def test_L2_op(par):
    """L2 norm of Op*x and proximal/dual proximal
    """
    b = np.zeros(par['nx'], dtype=par['dtype'])
    d = np.random.normal(0., 1., par['nx']).astype(par['dtype'])
    l2 = L2(Op=Diagonal(d, dtype=par['dtype']),
            b=b,
            sigma=par['sigma'],
            niter=500)

    # norm
    x = np.random.normal(0., 1., par['nx']).astype(par['dtype'])
    assert l2(x) == (par['sigma'] / 2.) * np.linalg.norm(d * x)**2

    # prox: since Op is a Diagonal operator the denominator becomes
    # 1 + sigma*tau*d[i] for every i
    tau = 2.
    den = 1. + par['sigma'] * tau * d**2
    assert_array_almost_equal(l2.prox(x, tau), x / den, decimal=4)
def test_Diagonal_1dsignal(par):
    """Dot-test and comparison with Pylops for Diagonal operator for 1d signal
    """
    for ddim in (par['nx'], par['nt']):
        d = da.arange(ddim, chunks=ddim//2) + 1. +\
            par['imag'] * (da.arange(ddim, chunks=ddim//2) + 1.)
        dDop = dDiagonal(d, compute=(True, True), dtype=par['dtype'])
        assert dottest(dDop,
                       ddim,
                       ddim,
                       chunks=(ddim // 2, ddim // 2),
                       complexflag=0 if par['imag'] == 0 else 3)

        x = da.ones(ddim, chunks=ddim//2) + \
            par['imag']*da.ones(ddim, chunks=ddim//2)
        Dop = Diagonal(d.compute(), dtype=par['dtype'])
        dy = dDop * x
        y = Dop * x.compute()
        assert_array_almost_equal(dy, y, decimal=5)
Exemplo n.º 22
0
def test_Diagonal_2dsignal(par):
    """Dot-test and inversion for Diagonal operator for 2d signal"""
    for idim, ddim in enumerate((par["nx"], par["nt"])):
        d = np.arange(ddim) + 1.0 + par["imag"] * (np.arange(ddim) + 1.0)

        Dop = Diagonal(d,
                       dims=(par["nx"], par["nt"]),
                       dir=idim,
                       dtype=par["dtype"])
        assert dottest(
            Dop,
            par["nx"] * par["nt"],
            par["nx"] * par["nt"],
            complexflag=0 if par["imag"] == 0 else 3,
        )

        x = np.ones((par["nx"], par["nt"])) + par["imag"] * np.ones(
            (par["nx"], par["nt"]))
        xlsqr = lsqr(Dop, Dop * x.ravel(), damp=1e-20, iter_lim=300, show=0)[0]

        assert_array_almost_equal(x.ravel(), xlsqr.ravel(), decimal=4)
Exemplo n.º 23
0
def test_Diagonal_3dsignal(par):
    """Dot-test and inversion for Diagonal operator for 3d signal
    """
    for idim, ddim in enumerate((par['ny'], par['nx'], par['nt'])):
        d = np.arange(ddim) + 1. +\
            par['imag'] * (np.arange(ddim) + 1.)

        Dop = Diagonal(d,
                       dims=(par['ny'], par['nx'], par['nt']),
                       dir=idim,
                       dtype=par['dtype'])
        assert dottest(Dop,
                       par['ny'] * par['nx'] * par['nt'],
                       par['ny'] * par['nx'] * par['nt'],
                       complexflag=0 if par['imag'] == 0 else 3)

        x = np.ones((par['ny'], par['nx'], par['nt'])) + \
            par['imag']*np.ones((par['ny'], par['nx'], par['nt']))
        xlsqr = lsqr(Dop, Dop * x.ravel(), damp=1e-20, iter_lim=300, show=0)[0]

        assert_array_almost_equal(x.ravel(), xlsqr.ravel(), decimal=4)
Exemplo n.º 24
0
def test_overloads(par):
    """Apply various overloaded operators (.H, -, +, *) and ensure that the
    returned operator is still of pylops LinearOperator type
    """
    diag = np.arange(par["nx"]) + par["imag"] * np.arange(par["nx"])
    Dop = Diagonal(diag, dtype=par["dtype"])

    # .H
    assert isinstance(Dop.H, LinearOperator)
    # negate
    assert isinstance(-Dop, LinearOperator)
    # multiply by scalar
    assert isinstance(2 * Dop, LinearOperator)
    # +
    assert isinstance(Dop + Dop, LinearOperator)
    # -
    assert isinstance(Dop - 2 * Dop, LinearOperator)
    # *
    assert isinstance(Dop * Dop, LinearOperator)
    # **
    assert isinstance(Dop**2, LinearOperator)
Exemplo n.º 25
0
def test_eigs(par):
    """Eigenvalues and condition number estimate with ARPACK
    """
    # explicit=True
    diag = np.arange(par['nx'], 0, -1) +\
           par['imag'] * np.arange(par['nx'], 0, -1)
    Op = MatrixMult(np.vstack((np.diag(diag),
                               np.zeros((par['ny'] - par['nx'], par['nx'])))))
    eigs = Op.eigs()
    assert_array_almost_equal(diag[:eigs.size], eigs, decimal=3)

    cond = Op.cond()
    assert_array_almost_equal(np.real(cond), par['nx'], decimal=3)

    #  explicit=False
    Op = Diagonal(diag, dtype=par['dtype'])
    if par['ny'] > par['nx']:
        Op = VStack([Op, Zero(par['ny'] - par['nx'], par['nx'])])
    eigs = Op.eigs()
    assert_array_almost_equal(diag[:eigs.size], eigs, decimal=3)

    cond = Op.cond()
    assert_array_almost_equal(np.real(cond), par['nx'], decimal=3)
Exemplo n.º 26
0
def IRLS(Op,
         data,
         nouter,
         threshR=False,
         epsR=1e-10,
         epsI=1e-10,
         x0=None,
         tolIRLS=1e-10,
         returnhistory=False,
         **kwargs_cg):
    r"""Iteratively reweighted least squares.

    Solve an optimization problem with :math:`L1` cost function given the
    operator ``Op`` and data ``y``. The cost function is minimized by
    iteratively solving a weighted least squares problem with the weight at
    iteration :math:`i` being based on the data residual at iteration
    :math:`i+1`.

    The IRLS solver is robust to *outliers* since the L1 norm given less
    weight to large residuals than L2 norm does.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    data : :obj:`numpy.ndarray`
        Data
    nouter : :obj:`int`
        Number of outer iterations
    threshR : :obj:`bool`, optional
        Apply thresholding in creation of weight (``True``)
        or damping (``False``)
    epsR : :obj:`float`, optional
        Damping to be applied to residuals for weighting term
    espI : :obj:`float`, optional
        Tikhonov damping
    x0 : :obj:`numpy.ndarray`, optional
        Initial guess
    tolIRLS : :obj:`float`, optional
        Tolerance. Stop outer iterations if difference between inverted model
        at subsequent iterations is smaller than ``tolIRLS``
    returnhistory : :obj:`bool`, optional
        Return history of inverted model for each outer iteration of IRLS
    **kwargs_cg
        Arbitrary keyword arguments for
        :py:func:`scipy.sparse.linalg.cg` solver

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model
    nouter : :obj:`int`
        Number of effective outer iterations
    xinv_hist : :obj:`numpy.ndarray`, optional
        History of inverted model
    rw_hist : :obj:`numpy.ndarray`, optional
        History of weights

    Notes
    -----
    Solves the following optimization problem for the operator
    :math:`\mathbf{Op}` and the data :math:`\mathbf{d}`:

    .. math::
        J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_1

    by a set of outer iterations which require to repeateadly solve a
    weighted least squares problem of the form:

    .. math::
        \mathbf{x}^{(i+1)} = \operatorname*{arg\,min}_\mathbf{x} ||\mathbf{d} -
        \mathbf{Op} \mathbf{x}||_{2, \mathbf{R}^{(i)}} +
        \epsilon_I^2 ||\mathbf{x}||

    where :math:`\mathbf{R}^{(i)}` is a diagonal weight matrix
    whose diagonal elements at iteration :math:`i` are equal to the absolute
    inverses of the residual vector :math:`\mathbf{r}^{(i)} =
    \mathbf{y} - \mathbf{Op} \mathbf{x}^{(i)}` at iteration :math:`i`.
    More specifically the j-th element of the diagonal of
    :math:`\mathbf{R}^{(i)}` is

    .. math::
        R^{(i)}_{j,j} = \frac{1}{|r^{(i)}_j|+\epsilon_R}

    or

    .. math::
        R^{(i)}_{j,j} = \frac{1}{max(|r^{(i)}_j|, \epsilon_R)}

    depending on the choice ``threshR``. In either case,
    :math:`\epsilon_R` is the user-defined stabilization/thresholding
    factor [1]_.

    .. [1] https://en.wikipedia.org/wiki/Iteratively_reweighted_least_squares

    """
    if x0 is not None:
        data = data - Op * x0
    if returnhistory:
        xinv_hist = np.zeros((nouter + 1, Op.shape[1]))
        rw_hist = np.zeros((nouter + 1, Op.shape[0]))

    # first iteration (unweighted least-squares)
    xinv = NormalEquationsInversion(Op,
                                    None,
                                    data,
                                    epsI=epsI,
                                    returninfo=False,
                                    **kwargs_cg)
    r = data - Op * xinv
    if returnhistory:
        xinv_hist[0] = xinv
    for iiter in range(nouter):
        # other iterations (weighted least-squares)
        xinvold = xinv.copy()
        if threshR:
            rw = 1. / np.maximum(np.abs(r), epsR)
        else:
            rw = 1. / (np.abs(r) + epsR)
        rw = rw / rw.max()
        R = Diagonal(rw)
        xinv = NormalEquationsInversion(Op, [],
                                        data,
                                        Weight=R,
                                        epsI=epsI,
                                        returninfo=False,
                                        **kwargs_cg)
        r = data - Op * xinv
        # save history
        if returnhistory:
            rw_hist[iiter] = rw
            xinv_hist[iiter + 1] = xinv
        # check tolerance
        if np.linalg.norm(xinv - xinvold) < tolIRLS:
            nouter = iiter
            break

    # adding initial guess
    if x0 is not None:
        xinv = x0 + xinv
        if returnhistory:
            xinv_hist = x0 + xinv_hist

    if returnhistory:
        return xinv, nouter, xinv_hist[:nouter + 1], rw_hist[:nouter + 1]
    else:
        return xinv, nouter
Exemplo n.º 27
0
def Sliding3D(Op, dims, dimsd, nwin, nover, nop,
              tapertype='hanning', design=False, nproc=1):
    """3D Sliding transform operator.

    Apply a transform operator ``Op`` repeatedly to patches of the model
    vector in forward mode and patches of the data vector in adjoint mode.
    More specifically, in forward mode the model vector is divided into patches
    each patch is transformed, and patches are then recombined in a sliding
    window fashion. Both model and data should be 3-dimensional
    arrays in nature as they are internally reshaped and interpreted as
    3-dimensional arrays. Each patch contains in fact a portion of the
    array in the first and second dimensions (and the entire third dimension).

    This operator can be used to perform local, overlapping transforms (e.g.,
    :obj:`pylops.signalprocessing.FFTND`
    or :obj:`pylops.signalprocessing.Radon3D`) of 3-dimensional arrays.

    .. note:: The shape of the model has to be consistent with
       the number of windows for this operator not to return an error. As the
       number of windows depends directly on the choice of ``nwin`` and
       ``nover``, it is recommended to use ``design=True`` if unsure about the
       choice ``dims`` and use the number of windows printed on screen to
       define such input parameter.

    .. warning:: Depending on the choice of `nwin` and `nover` as well as the
       size of the data, sliding windows may not cover the entire first and/or
       second dimensions. The start and end indeces of each window can be
       displayed using ``design=True`` while defining the best sliding window
       approach.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Transform operator
    dims : :obj:`tuple`
        Shape of 3-dimensional model. Note that ``dims[0]`` and ``dims[1]``
        should be multiple of the model sizes of the transform in the
        first and second dimensions
    dimsd : :obj:`tuple`
        Shape of 3-dimensional data
    nwin : :obj:`tuple`
        Number of samples of window
    nover : :obj:`tuple`
        Number of samples of overlapping part of window
    nop : :obj:`tuple`
        Number of samples in axes of transformed domain associated
        to spatial axes in the data
    tapertype : :obj:`str`, optional
        Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``)
    design : :obj:`bool`, optional
        Print number sliding window (``True``) or not (``False``)

    Returns
    -------
    Sop : :obj:`pylops.LinearOperator`
        Sliding operator

    Raises
    ------
    ValueError
        Identified number of windows is not consistent with provided model
        shape (``dims``).

    """
    # model windows
    mwin0_ins, mwin0_ends = _slidingsteps(dims[0],
                                          Op.shape[1]//(nop[1]*dims[2]), 0)
    mwin1_ins, mwin1_ends = _slidingsteps(dims[1],
                                          Op.shape[1]//(nop[0]*dims[2]), 0)

    # data windows
    dwin0_ins, dwin0_ends = _slidingsteps(dimsd[0], nwin[0], nover[0])
    dwin1_ins, dwin1_ends = _slidingsteps(dimsd[1], nwin[1], nover[1])
    nwins0 = len(dwin0_ins)
    nwins1 = len(dwin1_ins)
    nwins = nwins0*nwins1

    # create tapers
    if tapertype is not None:
        tap = taper3d(dimsd[2], nwin, nover, tapertype=tapertype)

    # check that identified number of windows agrees with mode size
    if design:
        logging.warning('(%d,%d) windows required...', nwins0, nwins1)
        logging.warning('model wins - start0:%s, end0:%s, start1:%s, end1:%s',
                        str(mwin0_ins), str(mwin0_ends),
                        str(mwin1_ins), str(mwin1_ends))
        logging.warning('data wins - start0:%s, end0:%s, start1:%s, end1:%s',
                        str(dwin0_ins), str(dwin0_ends),
                        str(dwin1_ins), str(dwin1_ends))

    if nwins*Op.shape[1]//dims[2] != dims[0]*dims[1]:
        raise ValueError('Model shape (dims=%s) is not consistent with chosen '
                         'number of windows. Choose dims[0]=%d and '
                         'dims[1]=%d for the operator to work with '
                         'estimated number of windows, or create '
                         'the operator with design=True to find out the'
                         'optimal number of windows for the current '
                         'model size...'
                         % (str(dims), nwins0*Op.shape[1]//(nop[1]*dims[2]),
                            nwins1 * Op.shape[1]//(nop[0]*dims[2])))
    # transform to apply
    if tapertype is None:
        OOp = BlockDiag([Op for _ in range(nwins)], nproc=nproc)
    else:
        OOp = BlockDiag([Diagonal(tap.flatten()) * Op
                         for _ in range(nwins)], nproc=nproc)

    hstack = HStack([Restriction(dimsd[1] * dimsd[2] * nwin[0],
                                 range(win_in, win_end),
                                 dims=(nwin[0], dimsd[1], dimsd[2]),
                                 dir=1).H
                     for win_in, win_end in zip(dwin1_ins,
                                                dwin1_ends)])

    combining1 = BlockDiag([hstack]*nwins0)
    combining0 = HStack([Restriction(np.prod(dimsd),
                                     range(win_in, win_end),
                                     dims=dimsd, dir=0).H
                         for win_in, win_end in zip(dwin0_ins, dwin0_ends)])
    Sop = combining0 * combining1 * OOp
    return Sop
Exemplo n.º 28
0
def Sliding1D(Op, dim, dimd, nwin, nover, tapertype="hanning", design=False):
    r"""1D Sliding transform operator.

    Apply a transform operator ``Op`` repeatedly to slices of the model
    vector in forward mode and slices of the data vector in adjoint mode.
    More specifically, in forward mode the model vector is divided into
    slices, each slice is transformed, and slices are then recombined in a
    sliding window fashion.

    This operator can be used to perform local, overlapping transforms (e.g.,
    :obj:`pylops.signalprocessing.FFT`) on 1-dimensional arrays.

    .. note:: The shape of the model has to be consistent with
       the number of windows for this operator not to return an error. As the
       number of windows depends directly on the choice of ``nwin`` and
       ``nover``, it is recommended to use ``design=True`` if unsure about the
       choice ``dims`` and use the number of windows printed on screen to
       define such input parameter.

    .. warning:: Depending on the choice of `nwin` and `nover` as well as the
       size of the data, sliding windows may not cover the entire data.
       The start and end indices of each window can be displayed using
       ``design=True`` while defining the best sliding window approach.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Transform operator
    dim : :obj:`tuple`
        Shape of 1-dimensional model.
    dimd : :obj:`tuple`
        Shape of 1-dimensional data
    nwin : :obj:`int`
        Number of samples of window
    nover : :obj:`int`
        Number of samples of overlapping part of window
    tapertype : :obj:`str`, optional
        Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``)
    design : :obj:`bool`, optional
        Print number of sliding window (``True``) or not (``False``)

    Returns
    -------
    Sop : :obj:`pylops.LinearOperator`
        Sliding operator

    Raises
    ------
    ValueError
        Identified number of windows is not consistent with provided model
        shape (``dims``).

    """
    # model windows
    mwin_ins, mwin_ends = _slidingsteps(dim, Op.shape[1], 0)
    # data windows
    dwin_ins, dwin_ends = _slidingsteps(dimd, nwin, nover)
    nwins = len(dwin_ins)

    # create tapers
    if tapertype is not None:
        tap = taper(nwin, nover, tapertype=tapertype)
        tapin = tap.copy()
        tapin[:nover] = 1
        tapend = tap.copy()
        tapend[-nover:] = 1
        taps = {}
        taps[0] = tapin
        for i in range(1, nwins - 1):
            taps[i] = tap
        taps[nwins - 1] = tapend

    # check that identified number of windows agrees with mode size
    if design:
        logging.warning("%d windows required...", nwins)
        logging.warning("model wins - start:%s, end:%s", str(mwin_ins), str(mwin_ends))
        logging.warning("data wins - start:%s, end:%s", str(dwin_ins), str(dwin_ends))
    if nwins * Op.shape[1] != dim:
        raise ValueError(
            "Model shape (dim=%d) is not consistent with chosen "
            "number of windows. Choose dim=%d for the "
            "operator to work with estimated number of windows, "
            "or create the operator with design=True to find "
            "out the optimal number of windows for the current "
            "model size..." % (dim, nwins * Op.shape[1])
        )
    # transform to apply
    if tapertype is None:
        OOp = BlockDiag([Op for _ in range(nwins)])
    else:
        OOp = BlockDiag([Diagonal(taps[itap].ravel()) * Op for itap in range(nwins)])

    combining = HStack(
        [
            Restriction(dimd, np.arange(win_in, win_end), dtype=Op.dtype).H
            for win_in, win_end in zip(dwin_ins, dwin_ends)
        ]
    )
    Sop = combining * OOp
    return Sop
Exemplo n.º 29
0
def NormalEquationsInversion(Op,
                             Regs,
                             data,
                             Weight=None,
                             dataregs=None,
                             epsI=0,
                             epsRs=None,
                             x0=None,
                             returninfo=False,
                             NRegs=None,
                             epsNRs=None,
                             **kwargs_solver):
    r"""Inversion of normal equations.

    Solve the regularized normal equations for a system of equations
    given the operator ``Op``, a data weighting operator ``Weight`` and
    optionally a list of regularization terms ``Regs`` and/or ``NRegs``.

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Operator to invert
    Regs : :obj:`list`
        Regularization operators (``None`` to avoid adding regularization)
    data : :obj:`numpy.ndarray`
        Data
    Weight : :obj:`pylops.LinearOperator`, optional
        Weight operator
    dataregs : :obj:`list`, optional
        Regularization data (must have the same number of elements
        as ``Regs``)
    epsI : :obj:`float`, optional
        Tikhonov damping
    epsRs : :obj:`list`, optional
         Regularization dampings (must have the same number of elements
         as ``Regs``)
    x0 : :obj:`numpy.ndarray`, optional
        Initial guess
    returninfo : :obj:`bool`, optional
        Return info of CG solver
    NRegs : :obj:`list`
        Normal regularization operators (``None`` to avoid adding
        regularization). Such operators must apply the chain of the
        forward and the adjoint in one go. This can be convenient in
        cases where a faster implementation is available compared to applying
        the forward followed by the adjoint.
    epsNRs : :obj:`list`, optional
         Regularization dampings for normal operators (must have the same
         number of elements as ``NRegs``)
    **kwargs_solver
        Arbitrary keyword arguments for chosen solver
        (:py:func:`scipy.sparse.linalg.cg` and
        :py:func:`pylops.optimization.solver.cg` are used as default for numpy
        and cupy `data`, respectively)

    Returns
    -------
    xinv : :obj:`numpy.ndarray`
        Inverted model.
    istop : :obj:`int`
        Convergence information:

        ``0``: successful exit

        ``>0``: convergence to tolerance not achieved, number of iterations

        ``<0``: illegal input or breakdown

    See Also
    --------
    RegularizedInversion: Regularized inversion
    PreconditionedInversion: Preconditioned inversion

    Notes
    -----
    Solve the following normal equations for a system of regularized equations
    given the operator :math:`\mathbf{Op}`, a data weighting operator
    :math:`\mathbf{W}`, a list of regularization terms (:math:`\mathbf{R_i}`
    and/or :math:`\mathbf{N_i}`), the data :math:`\mathbf{d}` and
    regularization data :math:`\mathbf{d}_{R_i}`, and the damping factors
    :math:`\epsilon_I`, :math:`\epsilon_{{R}_i}` and :math:`\epsilon_{{N}_i}`:

    .. math::
        ( \mathbf{Op}^T \mathbf{W} \mathbf{Op} +
        \sum_i \epsilon_{{R}_i}^2 \mathbf{R}_i^T \mathbf{R}_i +
        \sum_i \epsilon_{{N}_i}^2 \mathbf{N}_i +
        \epsilon_I^2 \mathbf{I} )  \mathbf{x}
        = \mathbf{Op}^T \mathbf{W} \mathbf{d} +  \sum_i \epsilon_{{R}_i}^2
        \mathbf{R}_i^T \mathbf{d}_{R_i}

    Note that the data term of the regularizations :math:`\mathbf{N_i}` is
    implicitly assumed to be zero.

    """
    ncp = get_array_module(data)

    # store adjoint
    OpH = Op.H

    # create dataregs and epsRs if not provided
    if dataregs is None and Regs is not None:
        dataregs = [
            ncp.zeros(int(Reg.shape[0]), dtype=Reg.dtype) for Reg in Regs
        ]
    if epsRs is None and Regs is not None:
        epsRs = [1] * len(Regs)

    # Normal equations
    if Weight is not None:
        y_normal = OpH * Weight * data
    else:
        y_normal = OpH * data
    if Weight is not None:
        Op_normal = OpH * Weight * Op
    else:
        Op_normal = OpH * Op

    # Add regularization terms
    if epsI > 0:
        Op_normal += epsI**2 * Diagonal(
            ncp.ones(int(Op.shape[1]), dtype=Op.dtype), dtype=Op.dtype)

    if Regs is not None:
        for epsR, Reg, datareg in zip(epsRs, Regs, dataregs):
            RegH = Reg.H
            y_normal += epsR**2 * RegH * datareg
            Op_normal += epsR**2 * RegH * Reg

    if NRegs is not None:
        for epsNR, NReg in zip(epsNRs, NRegs):
            Op_normal += epsNR**2 * NReg

    # solver
    if x0 is not None:
        y_normal = y_normal - Op_normal * x0
    if ncp == np:
        xinv, istop = sp_cg(Op_normal, y_normal, **kwargs_solver)
    else:
        xinv = cg(Op_normal, y_normal,
                  ncp.zeros(int(Op_normal.shape[1]), dtype=Op_normal.dtype),
                  **kwargs_solver)[0]
        istop = None
    if x0 is not None:
        xinv = x0 + xinv

    if returninfo:
        return xinv, istop
    else:
        return xinv
def focusing_wrapper(direct, toff, g0VS, iava1, Rop1, R1op1, Restrop1, iava2,
                     Rop2, R1op2, Restrop2, t):
    nsmooth = 10
    nr = direct.shape[0]
    nsava1 = iava1.shape[0]
    nsava2 = iava2.shape[0]

    nt = t.shape[0]
    dt = t[1] - t[0]

    # window
    directVS_off = direct - toff
    idirectVS_off = np.round(directVS_off / dt).astype(np.int)
    w = np.zeros((nr, nt))
    wi = np.ones((nr, nt))
    for ir in range(nr - 1):
        w[ir, :idirectVS_off[ir]] = 1
    wi = wi - w

    w = np.hstack((np.fliplr(w), w[:, 1:]))
    wi = np.hstack((np.fliplr(wi), wi[:, 1:]))

    if nsmooth > 0:
        smooth = np.ones(nsmooth) / nsmooth
        w = filtfilt(smooth, 1, w)
        wi = filtfilt(smooth, 1, wi)

    # Input focusing function
    fd_plus = np.concatenate((np.fliplr(g0VS.T), np.zeros((nr, nt - 1))),
                             axis=-1)

    # operators
    Wop = Diagonal(w.flatten())
    WSop1 = Diagonal(w[iava1].flatten())
    WSop2 = Diagonal(w[iava2].flatten())
    WiSop1 = Diagonal(wi[iava1].flatten())
    WiSop2 = Diagonal(wi[iava2].flatten())

    Mop1 = VStack([
        HStack([Restrop1, -1 * WSop1 * Rop1]),
        HStack([-1 * WSop1 * R1op1, Restrop1])
    ]) * BlockDiag([Wop, Wop])
    Mop2 = VStack([
        HStack([Restrop2, -1 * WSop2 * Rop2]),
        HStack([-1 * WSop2 * R1op2, Restrop2])
    ]) * BlockDiag([Wop, Wop])
    Mop = VStack([
        HStack([Mop1, Mop1, Zero(Mop1.shape[0], Mop1.shape[1])]),
        HStack([Mop2, Zero(Mop2.shape[0], Mop2.shape[1]), Mop2])
    ])

    Gop1 = VStack(
        [HStack([Restrop1, -1 * Rop1]),
         HStack([-1 * R1op1, Restrop1])])
    Gop2 = VStack(
        [HStack([Restrop2, -1 * Rop2]),
         HStack([-1 * R1op2, Restrop2])])

    d1 = WSop1 * Rop1 * fd_plus.flatten()
    d1 = np.concatenate(
        (d1.reshape(nsava1, 2 * nt - 1), np.zeros((nsava1, 2 * nt - 1))))
    d2 = WSop2 * Rop2 * fd_plus.flatten()
    d2 = np.concatenate(
        (d2.reshape(nsava2, 2 * nt - 1), np.zeros((nsava2, 2 * nt - 1))))

    d = np.concatenate((d1, d2))

    # solve
    comb_f = lsqr(Mop, d.flatten(), iter_lim=10, show=False)[0]
    comb_f = comb_f.reshape(6 * nr, (2 * nt - 1))
    comb_f_tot = comb_f + np.concatenate((np.zeros(
        (nr, 2 * nt - 1)), fd_plus, np.zeros((4 * nr, 2 * nt - 1))))

    f1_1 = comb_f_tot[:2 * nr] + comb_f_tot[2 * nr:4 * nr]
    f1_2 = comb_f_tot[:2 * nr] + comb_f_tot[4 * nr:]

    g_1 = BlockDiag([WiSop1, WiSop1]) * Gop1 * f1_1.flatten()
    g_1 = g_1.reshape(2 * nsava1, (2 * nt - 1))
    g_2 = BlockDiag([WiSop2, WiSop2]) * Gop2 * f1_2.flatten()
    g_2 = g_2.reshape(2 * nsava2, (2 * nt - 1))

    f1_1_minus, f1_1_plus = f1_1[:nr], f1_1[nr:]
    f1_2_minus, f1_2_plus = f1_2[:nr], f1_2[nr:]
    g_1_minus, g_1_plus = -g_1[:nsava1], np.fliplr(g_1[nsava1:])
    g_2_minus, g_2_plus = -g_2[:nsava2], np.fliplr(g_2[nsava2:])

    return f1_1_minus, f1_1_plus, f1_2_minus, f1_2_plus, g_1_minus, g_1_plus, g_2_minus, g_2_plus