# Calculate complex reordering
        idx_bu = bulk_up(prior, sparsify, unsparsify, k)
        idx_bu = idx_bu + 1j * idx_bu
        pbar.update()
        reorder_bu = lambda x: idx_bu

        # Calculate real/imag reordering
        idx_r = whittle_down(prior.real, sparsify, unsparsify, 1 - k)
        pbar.update()
        idx_i = whittle_down(prior.imag, sparsify, unsparsify, 1 - k)
        pbar.update()
        idx_wd = idx_r + 1j * idx_i
        reorder_wd = lambda x: idx_wd

        # Do 2d reordering
        _, reordering_r = sort2d(prior.real)
        _, reordering_i = sort2d(prior.imag)
        idx_ro = reordering_r + 1j * reordering_i
        reorder_ro = lambda x: idx_ro

        ## Col/row-wise seem to do the best in general.
        # Column wise reordering
        idx_r = colwise(prior.real)
        idx_i = colwise(prior.imag)
        idx_cw = idx_r + 1j * idx_i
        reorder_cw = lambda x: idx_cw

        # Row wise reordering
        idx_r = rowwise(prior.real)
        idx_i = rowwise(prior.imag)
        idx_rw = idx_r + 1j * idx_i
Exemple #2
0
        inverse_fun=lambda x0: np.fft.ifft2(x0),  #uft.inverse,
        sparsify=sparsify,
        unsparsify=unsparsify,
        reorder_fun=None,
        alpha=.05,
        thresh_sep=thresh_sep,
        x=im,
        ignore_residual=ignore_residual,
        ignore_mse=ignore_mse,
        ignore_ssim=ignore_ssim,
        disp=disp,
        maxiter=maxiter,
        strikes=strikes)

    # Do recon doing 2d monotonic sort
    idx_mono = sort2d(recon.real)[1] + 1j * (sort2d(recon.imag)[1])
    # from mr_utils.utils import avg_patch_vals as avp
    # idx_mono_r = sort2d(avp(recon.real))[1]
    # idx_mono_i = sort2d(avp(recon.imag))[1]
    # idx_mono = idx_mono_r + 1j*idx_mono_i
    recon_mono = proximal_GD(
        kspace_u.copy(),
        forward_fun=lambda x0: np.fft.fft2(x0) * samp,  #uft.forward,
        inverse_fun=lambda x0: np.fft.ifft2(x0),  #uft.inverse,
        sparsify=sparsify,
        unsparsify=unsparsify,
        reorder_fun=lambda x0: idx_mono,
        alpha=.03,
        thresh_sep=thresh_sep,
        x=im,
        ignore_residual=ignore_residual,
Exemple #3
0
    print(res)
    # assert not np.allclose(res['x'], cs[idx])
    H3 = make_hist(res['x'], idx)

    # We can get a bit closer with clever choice of histogram metric
    plt.plot(H1, '--', label='Target')
    plt.plot(H2, '.', label='Truncated')
    plt.plot(H3, '.', label='Fit')
    plt.legend()
    plt.show()

    # Could we pick better indices?  Try sorting in 2-dimensions, then
    # truncating.  Obviously, the histograms between sorted and unsorted x will
    # be the same
    from mr_utils.utils.sort2d import sort2d
    x_2d, idx_2d = sort2d(x)
    wvlt_2d = T(x_2d).flatten()
    assert np.count_nonzero(wvlt_2d) > k, \
        'We are trying to do better than sort2d!'
    idx = np.argsort(-np.abs(wvlt_2d))[:k]
    cs = wvlt_2d[idx]
    H4 = make_hist(cs, idx)
    plt.plot(H1)
    plt.plot(H4)
    plt.show()

    # Try the same tuning technique
    x0 = cs.copy()
    res = least_squares(
        lambda y: H_metric(H1, make_hist(y, idx), H_metrics[h_met]), x0)
    print(res)
Exemple #4
0
 def reorder_every_iter(x_hat):
     '''Update reordering every iteration based on current estimate.'''
     _, reordering_r = sort2d(x_hat.real)
     _, reordering_i = sort2d(x_hat.imag)
     return reordering_r + 1j * reordering_i
Exemple #5
0
 def reorder_once(_x_hat):
     '''True reordering.'''
     _, reordering_r = sort2d(uft.inverse_ortho(y).real)
     _, reordering_i = sort2d(uft.inverse_ortho(y).imag)
     reordering = reordering_r + 1j * reordering_i
     return reordering
Exemple #6
0
 def true_reorder(_x_hat):
     '''True reordering.'''
     _, reordering_r = sort2d(x.real)
     _, reordering_i = sort2d(x.imag)
     reordering = reordering_r + 1j * reordering_i
     return reordering
Exemple #7
0
                             unsparsify,
                             reorder_fun=None,
                             mode='soft',
                             alpha=alpha0,
                             thresh_sep=True,
                             selective=None,
                             x=x,
                             ignore_residual=False,
                             disp=True,
                             maxiter=500)
    view(recon_none)

    if 'monosort' in run:
        # We need to find the best alpha for the monotonically sorted
        # recon, use the recon_none as the CS reconstruction prior
        _, reordering_r = sort2d(recon_none.real)
        _, reordering_i = sort2d(recon_none.imag)
        idx_ro = reordering_r + 1j * reordering_i
        monosort = lambda x: idx_ro
        # alpha = 0.05
        # pGD = partial(
        #     proximal_GD, y=kspace_u, forward_fun=uft.forward_ortho,
        #     inverse_fun=uft.inverse_ortho, sparsify=sparsify,
        #     unsparsify=unsparsify, reorder_fun=monosort,
        #     mode='soft', thresh_sep=True, selective=None, x=x,
        #     ignore_residual=False, disp=False, maxiter=200)
        # obj = lambda alpha0: compare_mse(
        #     np.abs(x), np.abs(pGD(alpha=alpha0)))
        # res = minimize(obj, alpha0)
        # print(res)
        # # Best alpha0 = 0.09299786 for 500 iterations, N=64
Exemple #8
0
def GD_TV(y,
          forward_fun,
          inverse_fun,
          alpha=.5,
          lam=.01,
          do_reordering=False,
          x=None,
          ignore_residual=False,
          disp=False,
          maxiter=200):
    r'''Gradient descent for a generic encoding model and TV constraint.

    Parameters
    ==========
    y : array_like
        Measured data (i.e., y = Ax).
    forward_fun : callable
        A, the forward transformation function.
    inverse_fun : callable
        A^H, the inverse transformation function.
    alpha : float, optional
        Step size.
    lam : float, optional
        TV constraint weight.
    do_reordering : bool, optional
        Whether or not to reorder for sparsity constraint.
    x : array_like, optional
        The true image we are trying to reconstruct.
    ignore_residual : bool, optional
        Whether or not to break out of loop if resid increases.
    disp : bool, optional
        Whether or not to display iteration info.
    maxiter : int, optional
        Maximum number of iterations.

    Returns
    =======
    x_hat : array_like
        Estimate of x.

    Notes
    =====
    Solves the problem:

    .. math::

        \min_x || y - Ax ||^2_2  + \lambda \text{TV}(x)

    If `x=None`, then MSE will not be calculated.
    '''

    # Make sure compare_mse is defined
    if x is None:
        compare_mse = lambda xx, yy: 0
        logging.info('No true x provided, MSE will not be calculated.')
        xabs = 0
    else:
        from skimage.measure import compare_mse
        xabs = np.abs(x)  # Precompute absolute value of true image

        # Get the reordering indicies ready, both for real and imag parts
        if do_reordering:
            from mr_utils.utils.sort2d import sort2d
            from mr_utils.utils.orderings import inverse_permutation
            _, reordering_r = sort2d(x.real)
            _, reordering_i = sort2d(x.imag)
            inverse_reordering_r = inverse_permutation(reordering_r)
            inverse_reordering_i = inverse_permutation(reordering_i)

    # Get some display stuff happening
    if disp:
        from mr_utils.utils.printtable import Table
        table = Table(['iter', 'norm', 'MSE'], [len(repr(maxiter)), 8, 8],
                      ['d', 'e', 'e'])
        hdr = table.header()
        for line in hdr.split('\n'):
            logging.info(line)

    # Initialize
    x_hat = np.zeros(y.shape, dtype=y.dtype)
    r = -y.copy()
    prev_stop_criteria = np.inf
    norm_y = np.linalg.norm(y)

    # Do the thing
    for ii in range(int(maxiter)):

        # Fidelity term
        fidelity = inverse_fun(r)

        # Let's reorder if we said that was going to be a thing
        if do_reordering:
            # real part
            xr = x_hat.real.flatten()[reordering_r].reshape(x.shape)
            second_term_r = dTV(xr).flatten()[inverse_reordering_r] \
                .reshape(x.shape)

            # imag part
            xi = x_hat.imag.flatten()[reordering_i].reshape(x.shape)
            second_term_i = dTV(xi).flatten()[inverse_reordering_i] \
                .reshape(x.shape)

            # put it all together...
            second_term = second_term_r + 1j * second_term_i
        else:
            # Sparsity term
            second_term = dTV(x_hat)

        # Compute stop criteria
        stop_criteria = np.linalg.norm(r) / norm_y
        if not ignore_residual and stop_criteria > prev_stop_criteria:
            logging.warning(('Breaking out of loop after %d iterations. '
                             'Norm of residual increased!'), ii)
            break
        prev_stop_criteria = stop_criteria

        # Take the step
        x_hat -= alpha * (fidelity + lam * second_term)

        # Tell the user what happened
        if disp:
            logging.info(
                table.row(
                    [ii, stop_criteria,
                     compare_mse(np.abs(x_hat), xabs)]))

        # Compute residual
        r = forward_fun(x_hat) - y

    return x_hat