Пример #1
0
def SR3(Op,
        Reg,
        data,
        kappa,
        eps,
        x0=None,
        adaptive=True,
        iter_lim_outer=int(1e2),
        iter_lim_inner=int(1e2)):
    r"""Sparse Relaxed Regularized Regression

    Applies the Sparse Relaxed Regularized Regression (SR3) algorithm to
    an inverse problem with a sparsity constraint of the form

    .. math::

        \min_x \dfrac{1}{2}\Vert \mathbf{Ax} - \mathbf{b}\Vert_2^2 +
        \lambda\Vert \mathbf{Lx}\Vert_1

    SR3 introduces an auxiliary variable :math:`\mathbf{z} = \mathbf{Lx}`,
    and instead solves

    .. math::

        \min_{\mathbf{x},\mathbf{z}} \dfrac{1}{2}\Vert \mathbf{Ax} -
        \mathbf{b}\Vert_2^2 + \lambda\Vert \mathbf{z}\Vert_1 +
        \dfrac{\kappa}{2}\Vert \mathbf{Lx} - \mathbf{z}\Vert_2^2

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Forward operator
    Reg : :obj:`numpy.ndarray`
        Regularization operator
    data : :obj:`numpy.ndarray`
        Data
    kappa : :obj:`float`
        Parameter controlling the difference between :math:`\mathbf{z}`
        and :math:`\mathbf{Lx}`
    eps : :obj:`float`
        Regularization parameter
    x0 : :obj:`numpy.ndarray`, optional
        Initial guess
    adaptive : :obj:`bool`, optional
        Use adaptive SR3 with a stopping criterion for the inner iterations
        or not
    iter_lim_outer : :obj:`int`, optional
        Maximum number of iterations for the outer iteration
    iter_lim_inner : :obj:`int`, optional
        Maximum number of iterations for the inner iteration

    Returns
    -------
    x: :obj:`numpy.ndarray`
        Approximate solution.

    Notes
    -----
    SR3 uses the following algorithm:

        .. math::
            \mathbf{x}_{k+1} = (\mathbf{A}^T\mathbf{A} + \kappa
            \mathbf{L}^T\mathbf{L})^{-1}(\mathbf{A}^T\mathbf{b} +
            \kappa \mathbf{L}^T\mathbf{y}_k) \\
            \mathbf{y}_{k+1} = \text{prox}_{\lambda/\kappa\mathcal{R}}
            (\mathbf{Lx}_{k+1})

    """
    (m, n) = Op.shape
    if x0 is None:
        x0 = np.zeros(n)
    x = x0.copy()
    p = Reg.shape[0]
    v = np.zeros(p)
    w = v
    eta = 1 / kappa
    theta = 1
    AL = pylops.VStack([Op, np.sqrt(kappa) * Reg])
    for _ in range(iter_lim_outer):
        # Compute the inner iteration
        if adaptive:
            x = _lsqr(AL, np.concatenate((data, np.sqrt(kappa) * v)),
                      iter_lim_inner, v, x, kappa, eps, Reg)
        else:
            x = sp_lsqr(AL,
                        np.concatenate((data, np.sqrt(kappa) * v)),
                        iter_lim=iter_lim_inner,
                        x0=x)[0]
        # Compute the outer iteration
        w_old = w
        temp = Reg.matvec(x)
        w = np.sign(temp) * np.maximum(abs(temp) - eta * eps, 0)
        err1 = np.linalg.norm(v - w) / max(1, np.linalg.norm(w))
        if err1 < 1e-6:
            return x
        theta = 2 / (1 + np.sqrt(1 + 4 / (theta**2)))
        v = w + (1 - theta) * (w - w_old)
    return x
Пример #2
0
# To start we create the forward problem

n = 5
x = np.arange(n) + 1.

# make A
Ar = np.random.normal(0, 1, (n, n))
Ai = np.random.normal(0, 1, (n, n))
A = Ar + 1j * Ai
Aop = pylops.MatrixMult(A, dtype=np.complex)
y = Aop @ x

###############################################################################
# Let's check we can solve this problem using the first formulation
A1op = Aop.toreal(forw=False, adj=True)
xinv = A1op.div(y)

print('xinv=%s\n' % xinv)

###############################################################################
# Let's now see how we formulate the second problem
Amop = pylops.MemoizeOperator(Aop, max_neval=10)
Arop = Amop.toreal()
Aiop = Amop.toimag()

A1op = pylops.VStack([Arop, Aiop])
y1 = np.concatenate([np.real(y), np.imag(y)])
xinv1 = np.real(A1op.div(y1))

print('xinv1=%s\n' % xinv1)
Пример #3
0
    vmax=0.1,
)
axs[3].set_title("Noisy Lop")
axs[3].set_xlabel(r"$\theta$")
axs[3].axis("tight")
plt.tight_layout()
plt.subplots_adjust(top=0.85)

###############################################################################
# Finally before moving to the 2d example, we consider the case when both PP
# and PS data are available. A joint PP-PS inversion can be easily solved
# as follows.
PSop = pylops.avo.prestack.PrestackLinearModelling(
    2 * wav, theta, vsvp=vsvp, nt0=nt0, linearization="ps"
)
PPPSop = pylops.VStack((PPop, PSop))

# data
dPPPS = PPPSop * m.ravel()
dPPPS = dPPPS.reshape(2, nt0, ntheta)

dPPPSn = dPPPS + np.random.normal(0, 1e-2, dPPPS.shape)

# Invert
minvPPSP, dPPPS_res = pylops.avo.prestack.PrestackInversion(
    dPPPS,
    theta,
    [wav, 2 * wav],
    m0=mback,
    linearization=["fatti", "ps"],
    epsR=5e-1,
Пример #4
0
#
#    .. math::
#       \mathbf{D_{Vstack}} =
#        \begin{bmatrix}
#          \mathbf{D_v}    \\
#          \mathbf{D_h}
#        \end{bmatrix}, \qquad
#       \mathbf{y} =
#        \begin{bmatrix}
#          \mathbf{D_v}\mathbf{x}    \\
#          \mathbf{D_h}\mathbf{x}
#        \end{bmatrix}
Nv, Nh = 11, 21
X = np.zeros((Nv, Nh))
X[int(Nv / 2), int(Nh / 2)] = 1
Dstack = pylops.VStack([D2vop, D2hop])

Y = np.reshape(Dstack * X.ravel(), (Nv * 2, Nh))

fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Vertical stacking", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
axs[1].axis("tight")
axs[1].set_title(r"$y$")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
Пример #5
0
D_dec = R * ND.flatten()
D_adj = (R.H * D_dec).reshape(nt, nr, ns)
D1_r_dec = np.real(R * (D1_r.flatten()))
D1_s_dec = np.real(R * (D1_s.flatten()))
D2_r_dec = np.real(R * (D2_r.flatten()))
D2_s_dec = np.real(R * (D2_s.flatten()))
D1_rs_dec = np.real(R * (D1_rs.flatten()))
D1sD2r_dec = np.real(R * (D1sD2r.flatten()))
D2sD1r_dec = np.real(R * (D2sD1r.flatten()))
D2sD2r_dec = np.real(R * (D2sD2r.flatten()))

Forward = pylops.VStack([
    R * mask_tt * F.H * mask_fre, R * mask_tt * F.H * D1op_r * mask_fre,
    R * mask_tt * F.H * D1op_s * mask_fre,
    R * mask_tt * F.H * D1op_r * D1op_s * mask_fre,
    R * mask_tt * F.H * D2op_r * mask_fre,
    R * mask_tt * F.H * D2op_s * mask_fre,
    R * mask_tt * F.H * D1op_s * D2op_r * mask_fre,
    R * mask_tt * F.H * D2op_s * D1op_r * mask_fre,
    R * mask_tt * F.H * D2op_s * D2op_r * mask_fre
])

rhs = np.concatenate((D_dec, D1_r_dec, D1_s_dec, D1_rs_dec, D2_r_dec, D2_s_dec,
                      D1sD2r_dec, D2sD1r_dec, D2sD2r_dec),
                     axis=0)

it = 1000
xinv_ = \
    pylops.optimization.leastsquares.RegularizedInversion(Forward, [], rhs,
                                                          **dict(damp=0, iter_lim=it, show=0))

xinv = scail * mask_tt * F.H * mask_fre * xinv_
Пример #6
0
def SR3(Op,
        Reg,
        data,
        kappa,
        eps,
        x0=None,
        adaptive=True,
        iter_lim_outer=int(1e2),
        iter_lim_inner=int(1e2)):
    r"""Implementation of SR3

    This function applies SR3 to an inverse problem with a sparsity constraint, of the form

    .. math::

        \min_x \dfrac{1}{2}\Vert Ax - b\Vert_2^2 + \lambda\Vert Lx\Vert_1

    SR3 introduces an auxiliary variable :math:`z = Lx`, and instead solves

    .. math::

        \min_{x,z} \dfrac{1}{2}\Vert Ax - b\Vert_2^2 + \lambda\Vert z\Vert_1 + \dfrac{\kappa}{2}\Vert Lx - z\Vert_2^2

    Parameters
    ----------
    Op : :obj:`pylops.LinearOperator`
        Forward operator
    Reg : :obj:`numpy.ndarray`
        Regularization operator
    data : :obj:`numpy.ndarray`
        Data
    kappa : :obj:`float`
        Parameter controlling the difference between :math: `z` and :math: `Lx`
    eps : :obj:`float`
        Regularization parameter
    x0 : :obj:`numpy.ndarray`, optional
        Initial guess
    adaptive: :obj:`Boolean`
        Use adaptive SR3 with a stopping criterion for the inner iterations or not
    iter_lim_outer : :obj:`int`, optional
        Maximum number of iterations for the outer iteration
    iter_limt_inner : :obj:`int`, optional
        Maximum number of iterations for the inner iteration
    returninfo : :obj:`bool`, optional
        Return info of CG solver

    Returns
    -------
    x: :obj:`numpy.ndarray`
        Approximate solution.

    Notes
    -----

    """
    (m, n) = Op.shape
    if x0 is None:
        x0 = np.zeros(n)
    x = x0
    p = Reg.shape[0]
    v = np.zeros(p)
    w = v
    x = np.zeros(n)
    eta = 1 / kappa
    theta = 1
    AL = pylops.VStack([Op, np.sqrt(kappa) * Reg])
    for i in range(iter_lim_outer):
        # Compute the inner iteration
        if adaptive:
            x = _lsqr(AL, np.concatenate((data, np.sqrt(kappa) * v)),
                      iter_lim_inner, v, x, kappa, eps, Reg)
        else:
            x = sp_lsqr(AL,
                        np.concatenate((data, np.sqrt(kappa) * v)),
                        iter_lim=iter_lim_inner,
                        x0=x)[0]
        # Compute the outer iteration
        w_old = w
        temp = Reg.matvec(x)
        w = np.sign(temp) * np.maximum(abs(temp) - eta * eps, 0)
        err1 = np.linalg.norm(v - w) / max(1, np.linalg.norm(w))
        if err1 < 1e-6:
            return x
        theta = 2 / (1 + np.sqrt(1 + 4 / (theta**2)))
        v = w + (1 - theta) * (w - w_old)
    return x
D1op_hand = pylops.Diagonal(coeff1)
D2op_hand = pylops.Diagonal(coeff2)

D1_hand_fre = D1op_hand * fre_sqz
D2_hand_fre = D2op_hand * fre_sqz

D1_hand = F.H * D1_hand_fre
D2_hand = F.H * D2_hand_fre

# solve the linear equations
D2_dec = np.real(R * (D2_hand))
D1_dec = np.real(R * (D1_hand))
D_dec = R * ND.flatten()

Forward2 = pylops.VStack([
    R * mask_2 * Slid, R * mask_2 * F.H * D1op_hand * F * Slid,
    R * mask_2 * F.H * D2op_hand * F * Slid
])
rhs2 = np.concatenate((D_dec, D1_dec, D2_dec), axis=0)

####################
### LSQR solver ####
####################
xinv_ = \
    pylops.optimization.leastsquares.RegularizedInversion(Forward2, [], rhs2,
                                                          **dict(damp=0, iter_lim=400, show=0))
# xista, niteri, costi = \
#     pylops.optimization.sparsity.FISTA(Forward2, rhs2, niter=200, eps=1e-4,
#                                       tol=1e-5, returninfo=True)
####################
### SPGL1 solver ###
####################
Пример #8
0
R = pylops.Restriction(N, idx)

D_dec = R * ND.flatten()
D_adj = (R.H * D_dec).reshape(nt, nr, ns)
D1_r_dec = np.real(R * (D1_r.flatten()))
D1_s_dec = np.real(R * (D1_s.flatten()))
D2_r_dec = np.real(R * (D2_r.flatten()))
D2_s_dec = np.real(R * (D2_s.flatten()))
D1_rs_dec = np.real(R * (D1_rs.flatten()))
D1sD2r_dec = np.real(R * (D1sD2r.flatten()))
D2sD1r_dec = np.real(R * (D2sD1r.flatten()))
D2sD2r_dec = np.real(R * (D2sD2r.flatten()))

Forward = pylops.VStack([
    R * F.H, R * F.H * D1op_r, R * F.H * D1op_s, R * F.H * D1op_r * D1op_s,
    R * F.H * D2op_r, R * F.H * D2op_s, R * F.H * D1op_s * D2op_r,
    R * F.H * D2op_s * D1op_r, R * F.H * D2op_s * D2op_r
])

rhs = np.concatenate((D_dec, D1_r_dec, D1_s_dec, D1_rs_dec, D2_r_dec, D2_s_dec,
                      D1sD2r_dec, D2sD1r_dec, D2sD2r_dec),
                     axis=0)

it = 1000
xinv_ = \
    pylops.optimization.leastsquares.RegularizedInversion(Forward, [], rhs,
                                                          **dict(damp=0, iter_lim=it, show=0))

xinv = scail * F.H * xinv_
xinv = np.real(xinv.reshape(nt, nr, ns))
xinv_fre = np.fft.fftshift(np.fft.fftn(xinv))
Пример #9
0
def trap_phase_2(image_vecs_medsub, model_vecs, temporal_basis,
                 trap_params: TrapParams):
    xp = core.get_array_module(image_vecs_medsub)
    was_gpu_array = xp is cp
    timers = {}
    flat_model_vecs = model_vecs.ravel()
    if trap_params.scale_model_std:
        model_coeff_scale = xp.std(flat_model_vecs)
        flat_model_vecs /= model_coeff_scale
    else:
        model_coeff_scale = 1
    if trap_params.force_gpu_fit:
        temporal_basis = cp.asarray(temporal_basis)
        flat_model_vecs = cp.asarray(flat_model_vecs)
    operator_block_diag = [temporal_basis.T] * image_vecs_medsub.shape[0]
    opstack = [
        pylops.BlockDiag(operator_block_diag),
    ]
    if trap_params.incorporate_offset:
        if trap_params.background_split_mask is not None:
            left_mask_vec = trap_params.background_split_mask
            left_mask_megavec = np.repeat(left_mask_vec[:, np.newaxis],
                                          model_vecs.shape[1]).ravel()
            assert len(left_mask_megavec) == len(flat_model_vecs)
            left_mask_megavec = left_mask_megavec[np.newaxis, :].astype(
                flat_model_vecs.dtype)
            left_mask_megavec = left_mask_megavec - left_mask_megavec.mean()
            left_mask_megavec /= np.linalg.norm(left_mask_megavec)
            # "ones" for left side pixels -> fit constant offset for left psf
            opstack.append(xp.asarray(left_mask_megavec))
            # "ones" for right side pixels -> fit constant offset for right psf
            right_mask_megavec = -1 * left_mask_megavec
            opstack.append(xp.asarray(right_mask_megavec))
        else:
            background_megavec = np.ones_like(flat_model_vecs[xp.newaxis, :])
            background_megavec /= np.linalg.norm(background_megavec)
            opstack.append(xp.asarray(background_megavec))
    opstack.append(flat_model_vecs[xp.newaxis, :])
    op = pylops.VStack(opstack).transpose()
    log.debug(f"TRAP operator: {op}")

    image_megavec = image_vecs_medsub.ravel()
    log.debug(
        f"Performing inversion on A.shape={op.shape} and b={image_megavec.shape}"
    )
    timers['invert'] = time.perf_counter()
    if trap_params.use_cgls:
        solver = pylops.optimization.solver.cgls
        log.debug(f"{solver=}")
        solver_kwargs = dict(damp=trap_params.damp, tol=trap_params.tol)
        cgls_result = solver(op, image_megavec, xp.zeros(int(op.shape[1])),
                             **solver_kwargs)
        xinv = cgls_result[0]
    else:
        soln = lsqr.lsqr(
            op,
            image_megavec,
            x0=None,
            atol=trap_params.tol,
            damp=trap_params.damp,
            # show=True,
            # calc_var=True,
        )
        xinv = soln[0]
        # import matplotlib.pyplot as plt
        # plt.plot(soln[-1])
        # plt.yscale('log')
    timers['invert'] = time.perf_counter() - timers['invert']
    log.debug(f"Finished RegularizedInversion in {timers['invert']} sec")
    if core.get_array_module(xinv) is cp:
        model_coeff = float(xinv.get()[-1])
    else:
        model_coeff = float(xinv[-1])
    model_coeff = model_coeff / model_coeff_scale
    # return model_coeff, timers
    if trap_params.compute_residuals:
        solnvec = xinv
        solnvec[-1] = 0  # zero planet model contribution
        log.debug(f"Constructing starlight estimate from fit vector")
        timers['subtract'] = time.perf_counter()
        estimate_vecs = op.dot(solnvec).reshape(image_vecs_medsub.shape)
        if core.get_array_module(
                image_vecs_medsub) is not core.get_array_module(estimate_vecs):
            image_vecs_medsub = core.get_array_module(estimate_vecs).asarray(
                image_vecs_medsub)
        resid_vecs = image_vecs_medsub - estimate_vecs
        if core.get_array_module(resid_vecs) is cp and not was_gpu_array:
            resid_vecs = resid_vecs.get()
        timers['subtract'] = time.perf_counter() - timers['subtract']
        log.debug(f"Starlight subtracted in {timers['subtract']} sec")
        return model_coeff, timers, resid_vecs
    else:
        return model_coeff, timers, None