Exemple #1
0
 def test_sigma_mad(self):
     """Test sigma_mad."""
     npt.assert_almost_equal(
         stats.sigma_mad(self.data1),
         2.9651999999999998,
         err_msg='Incorrect sigma from MAD',
     )
Exemple #2
0
    def reweight(self, x_new):
        """ Updat the weights.

        Parameters
        ----------
        x_new: ndarray
            the current primal solution.

        Returns
        -------
        sigma_est: ndarray
            the variance estimate on each scale.
        """
        self.linear_op.op(x_new)
        weights = np.empty((0, ), dtype=self.weights.dtype)
        sigma_est = []
        for scale in range(self.linear_op.transform.nb_scale):
            bands_array, _ = flatten(self.linear_op.transform[scale])
            if scale == (self.linear_op.transform.nb_scale - 1):
                std_at_scale_i = 0.
            else:
                std_at_scale_i = sigma_mad(bands_array)
            sigma_est.append(std_at_scale_i)
            thr = np.ones(bands_array.shape, dtype=weights.dtype)
            thr *= self.thresh_factor * std_at_scale_i
            weights = np.concatenate((weights, thr))
        self.weights = weights
        return sigma_est
Exemple #3
0
def get_weights(data, psf, filters, wave_thresh_factor=np.array([3, 3, 4])):
    """Get Sparsity Weights

    Parameters
    ----------
    data : np.ndarray
        Input data, 2D image
    psf : np.ndarray
        Input PSF, 2D image
    filters : np.ndarray
        Wavelet filters
    wave_thresh_factor : np.ndarray, optional
        Threshold factors for each wavelet scale (default is
        np.array([3, 3, 4]))

    Returns
    -------
    np.ndarray weights

    """

    noise_est = sigma_mad(data)

    filter_conv = filter_convolve(np.rot90(psf, 2), filters)

    filter_norm = np.array([
        np.linalg.norm(a) * b * np.ones(data.shape)
        for a, b in zip(filter_conv, wave_thresh_factor)
    ])

    return noise_est * filter_norm
Exemple #4
0
def set_noise(data, **kwargs):
    """Set the noise level

    This method calculates the noise standard deviation using the median
    absolute deviation (MAD) of the input data and adds it to the keyword
    arguments.

    Parameters
    ----------
    data : np.ndarray
        Input noisy data (3D array)

    Returns
    -------
    dict Updated keyword arguments

    """

    # It the noise is not already provided calculate it using the MAD
    if isinstance(kwargs['noise_est'], type(None)):
        kwargs['noise_est'] = sigma_mad(data)

    print(' - Noise Estimate:', kwargs['noise_est'])
    if 'log' in kwargs:
        kwargs['log'].info(' - Noise Estimate: ' + str(kwargs['noise_est']))

    return kwargs
Exemple #5
0
def set_noise(data, **kwargs):
    """Set the noise level

    This method calculates the noise standard deviation using the median
    absolute deviation (MAD) of the input data and adds it to the keyword
    arguments.

    Parameters
    ----------
    data : np.ndarray
        Input noisy data (3D array)

    Returns
    -------
    dict Updated keyword arguments

    """

    # It the noise is not already provided calculate it using the MAD
    if isinstance(kwargs['noise_est'], type(None)):
        kwargs['noise_est'] = sigma_mad(data)

    print(' - Noise Estimate:', kwargs['noise_est'])
    if 'log' in kwargs:
        kwargs['log'].info(' - Noise Estimate: ' + str(kwargs['noise_est']))

    return kwargs
Exemple #6
0
def sigma_mad_sparse(grad_op, linear_op):
    """ Estimate the std from the mad routine on each approximation scale.

    Parameters
    ----------
    grad_op: instance
        gradient operator.
    linear_op: instance
        linear operator.

    Returns
    -------
    sigma: list of float
        a list of std estimate for each scale.
    """
    linear_op.op(grad_op.grad)
    return [
        sigma_mad(flatten(linear_op.transform[scale])[0])
        for scale in range(linear_op.transform.nb_scale)
    ]
Exemple #7
0
def condatvu(gradient_op,
             linear_op,
             dual_regularizer,
             cost_op,
             max_nb_of_iter=150,
             tau=None,
             sigma=None,
             relaxation_factor=1.0,
             x_init=None,
             std_est=None,
             std_est_method=None,
             std_thr=2.,
             nb_of_reweights=1,
             metric_call_period=5,
             metrics={},
             verbose=0):
    """ The Condat-Vu sparse reconstruction with reweightings.

    Parameters
    ----------
    gradient_op: instance of class GradBase
        the gradient operator.
    linear_op: instance of LinearBase
        the linear operator: seek the sparsity, ie. a wavelet transform.
    dual_regularizer: instance of ProximityParent
        the  dual regularization operator
    cost_op: instance of costObj
        the cost function used to check for convergence during the
        optimization.
    max_nb_of_iter: int, default 150
        the maximum number of iterations in the Condat-Vu proximal-dual
        splitting algorithm.
    tau, sigma: float, default None
        parameters of the Condat-Vu proximal-dual splitting algorithm.
        If None estimates these parameters.
    relaxation_factor: float, default 0.5
        parameter of the Condat-Vu proximal-dual splitting algorithm.
        If 1, no relaxation.
    x_init: np.ndarray (optional, default None)
        the initial guess of image
    std_est: float, default None
        the noise std estimate.
        If None use the MAD as a consistent estimator for the std.
    std_est_method: str, default None
        if the standard deviation is not set, estimate this parameter using
        the mad routine in the image ('primal') or in the sparse wavelet
        decomposition ('dual') domain.
    std_thr: float, default 2.
        use this treshold expressed as a number of sigma in the residual
        proximity operator during the thresholding.
    relaxation_factor: float, default 0.5
        parameter of the Condat-Vu proximal-dual splitting algorithm.
        If 1, no relaxation.
    nb_of_reweights: int, default 1
        the number of reweightings.
    metric_call_period: int (default 5)
        the period on which the metrics are compute.
    metrics: dict (optional, default None)
        the list of desired convergence metrics: {'metric_name':
        [@metric, metric_parameter]}. See modopt for the metrics API.
    verbose: int, default 0
        the verbosity level.

    Returns
    -------
    x_final: ndarray
        the estimated CONDAT-VU solution.
    costs: list of float
        the cost function values.
    metrics: dict
        the requested metrics values during the optimization.
    y_final: ndarrat
        the estimated dual CONDAT-VU solution
    """
    # Check inputs
    start = time.clock()
    if std_est_method not in (None, "primal", "dual"):
        raise ValueError(
            "Unrecognize std estimation method '{0}'.".format(std_est_method))

    # Define the initial primal and dual solutions
    if x_init is None:
        x_init = np.squeeze(
            np.zeros((linear_op.n_coils, *gradient_op.fourier_op.shape),
                     dtype=np.complex))
    primal = x_init
    dual = linear_op.op(primal)
    weights = dual

    # Define the weights used during the thresholding in the dual domain,
    # the reweighting strategy, and the prox dual operator

    # Case1: estimate the noise std in the image domain
    if std_est_method == "primal":
        if std_est is None:
            std_est = sigma_mad(gradient_op.MtX(gradient_op.obs_data))
        weights[...] = std_thr * std_est
        reweight_op = cwbReweight(weights)
        dual_regularizer.weights = reweight_op.weights

    # Case2: estimate the noise std in the sparse wavelet domain
    elif std_est_method == "dual":
        if std_est is None:
            std_est = 0.0
        weights[...] = std_thr * std_est
        reweight_op = mReweight(weights, linear_op, thresh_factor=std_thr)
        dual_regularizer.weights = reweight_op.weights

    # Case3: manual regularization mode, no reweighting
    else:
        reweight_op = None
        nb_of_reweights = 0

    # Define the Condat Vu optimizer: define the tau and sigma in the
    # Condat-Vu proximal-dual splitting algorithm if not already provided.
    # Check also that the combination of values will lead to convergence.
    norm = linear_op.l2norm(x_init.shape)
    lipschitz_cst = gradient_op.spec_rad
    if sigma is None:
        sigma = 0.5
    if tau is None:
        # to avoid numerics troubles with the convergence bound
        eps = 1.0e-8
        # due to the convergence bound
        tau = 1.0 / (lipschitz_cst / 2 + sigma * norm**2 + eps)
    convergence_test = (1.0 / tau - sigma * norm**2 >= lipschitz_cst / 2.0)

    # Welcome message
    if verbose > 0:
        print(" - mu: ", dual_regularizer.weights)
        print(" - lipschitz constant: ", gradient_op.spec_rad)
        print(" - tau: ", tau)
        print(" - sigma: ", sigma)
        print(" - rho: ", relaxation_factor)
        print(" - std: ", std_est)
        print(" - 1/tau - sigma||L||^2 >= beta/2: ", convergence_test)
        print(" - data: ", gradient_op.fourier_op.shape)
        if hasattr(linear_op, "nb_scale"):
            print(" - wavelet: ", linear_op, "-", linear_op.nb_scale)
        print(" - max iterations: ", max_nb_of_iter)
        print(" - number of reweights: ", nb_of_reweights)
        print(" - primal variable shape: ", primal.shape)
        print(" - dual variable shape: ", dual.shape)
        print("-" * 40)

    prox_op = Identity()

    # Define the optimizer
    opt = Condat(x=primal,
                 y=dual,
                 grad=gradient_op,
                 prox=prox_op,
                 prox_dual=dual_regularizer,
                 linear=linear_op,
                 cost=cost_op,
                 rho=relaxation_factor,
                 sigma=sigma,
                 tau=tau,
                 rho_update=None,
                 sigma_update=None,
                 tau_update=None,
                 auto_iterate=False,
                 metric_call_period=metric_call_period,
                 metrics=metrics)
    cost_op = opt._cost_func

    # Perform the first reconstruction
    if verbose > 0:
        print("Starting optimization...")
    opt.iterate(max_iter=max_nb_of_iter)

    # Loop through the number of reweightings
    for reweight_index in range(nb_of_reweights):

        # Generate the new weights following reweighting prescription
        if std_est_method == "primal":
            reweight_op.reweight(linear_op.op(opt._x_new))
        else:
            std_est = reweight_op.reweight(opt._x_new)

        # Welcome message
        if verbose > 0:
            print(" - reweight: ", reweight_index + 1)
            print(" - std: ", std_est)

        # Update the weights in the dual proximity operator
        dual_regularizer.weights = reweight_op.weights

        # Perform optimisation with new weights
        opt.iterate(max_iter=max_nb_of_iter)

    # Goodbye message
    end = time.clock()
    if verbose > 0:
        if hasattr(cost_op, "cost"):
            print(" - final iteration number: ", cost_op._iteration)
            print(" - final cost value: ", cost_op.cost)
        print(" - converged: ", opt.converge)
        print("Done.")
        print("Execution time: ", end - start, " seconds")
        print("-" * 40)

    # Get the final solution
    x_final = opt.x_final
    y_final = opt.y_final
    if hasattr(cost_op, "cost"):
        costs = cost_op._cost_list
    else:
        costs = None

    return x_final, costs, opt.metrics, y_final
Exemple #8
0
def sparse_rec_condatvu(gradient_op,
                        linear_op,
                        std_est=None,
                        std_est_method=None,
                        std_thr=2.,
                        mu=1e-6,
                        tau=None,
                        sigma=None,
                        relaxation_factor=1.0,
                        nb_of_reweights=1,
                        max_nb_of_iter=150,
                        add_positivity=False,
                        atol=1e-4,
                        verbose=0):
    """ The Condat-Vu sparse reconstruction with reweightings.

    .. note:: At the moment, supports only 2D data.

    Parameters
    ----------
    data: ndarray
        the data to reconstruct: observation are expected in Fourier space.
    wavelet_name: str
        the wavelet name to be used during the decomposition.
    samples: np.ndarray
        the mask samples in the Fourier domain.
    nb_scales: int, default 4
        the number of scales in the wavelet decomposition.
    std_est: float, default None
        the noise std estimate.
        If None use the MAD as a consistent estimator for the std.
    std_est_method: str, default None
        if the standard deviation is not set, estimate this parameter using
        the mad routine in the image ('primal') or in the sparse wavelet
        decomposition ('dual') domain.
    std_thr: float, default 2.
        use this treshold expressed as a number of sigma in the residual
        proximity operator during the thresholding.
    mu: float, default 1e-6
        regularization hyperparameter.
    tau, sigma: float, default None
        parameters of the Condat-Vu proximal-dual splitting algorithm.
        If None estimates these parameters.
    relaxation_factor: float, default 0.5
        parameter of the Condat-Vu proximal-dual splitting algorithm.
        If 1, no relaxation.
    nb_of_reweights: int, default 1
        the number of reweightings.
    max_nb_of_iter: int, default 150
        the maximum number of iterations in the Condat-Vu proximal-dual
        splitting algorithm.
    add_positivity: bool, default False
        by setting this option, set the proximity operator to identity or
        positive.
    atol: float, default 1e-4
        tolerance threshold for convergence.
    verbose: int, default 0
        the verbosity level.

    Returns
    -------
    x_final: ndarray
        the estimated CONDAT-VU solution.
    transform: a WaveletTransformBase derived instance
        the wavelet transformation instance.
    """
    # Check inputs
    # analysis = True
    # if hasattr(gradient_op, 'linear_op'):
    #     analysis = False

    start = time.clock()

    if std_est_method not in (None, "primal", "dual"):
        raise ValueError(
            "Unrecognize std estimation method '{0}'.".format(std_est_method))

    # Define the initial primal and dual solutions
    x_init = np.zeros(gradient_op.fourier_op.shape, dtype=np.complex)
    weights = linear_op.op(x_init)

    # Define the weights used during the thresholding in the dual domain,
    # the reweighting strategy, and the prox dual operator

    # Case1: estimate the noise std in the image domain
    if std_est_method == "primal":
        if std_est is None:
            std_est = sigma_mad(gradient_op.MtX(gradient_op.y))
        weights[...] = std_thr * std_est
        reweight_op = cwbReweight(weights)
        prox_dual_op = Threshold(reweight_op.weights)

    # Case2: estimate the noise std in the sparse wavelet domain
    elif std_est_method == "dual":
        if std_est is None:
            std_est = 0.0
        weights[...] = std_thr * std_est
        reweight_op = mReweight(weights, linear_op, thresh_factor=std_thr)
        prox_dual_op = Threshold(reweight_op.weights)

    # Case3: manual regularization mode, no reweighting
    else:
        weights[...] = mu
        reweight_op = None
        prox_dual_op = Threshold(weights)
        nb_of_reweights = 0

    # Define the Condat Vu optimizer: define the tau and sigma in the
    # Condat-Vu proximal-dual splitting algorithm if not already provided.
    # Check also that the combination of values will lead to convergence.
    norm = linear_op.l2norm(gradient_op.fourier_op.shape)
    lipschitz_cst = gradient_op.spec_rad
    if sigma is None:
        sigma = 0.5
    if tau is None:
        # to avoid numerics troubles with the convergence bound
        eps = 1.0e-8
        # due to the convergence bound
        tau = 1.0 / (lipschitz_cst / 2 + sigma * norm**2 + eps)
    convergence_test = (1.0 / tau - sigma * norm**2 >= lipschitz_cst / 2.0)

    # Define initial primal and dual solutions
    primal = np.zeros(gradient_op.fourier_op.shape, dtype=np.complex)
    dual = linear_op.op(primal)
    dual[...] = 0.0

    # Welcome message
    if verbose > 0:
        print(condatvu_logo())
        print(" - mu: ", mu)
        print(" - lipschitz constant: ", gradient_op.spec_rad)
        print(" - tau: ", tau)
        print(" - sigma: ", sigma)
        print(" - rho: ", relaxation_factor)
        print(" - std: ", std_est)
        print(" - 1/tau - sigma||L||^2 >= beta/2: ", convergence_test)
        print(" - data: ", gradient_op.obs_data.shape)
        print(" - max iterations: ", max_nb_of_iter)
        print(" - number of reweights: ", nb_of_reweights)
        print(" - primal variable shape: ", primal.shape)
        print(" - dual variable shape: ", dual.shape)
        print("-" * 40)

    # Define the proximity operator
    if add_positivity:
        prox_op = Positivity()
    else:
        prox_op = Identity()

    # Define the cost function
    cost_op = DualGapCost(linear_op=linear_op,
                          initial_cost=1e6,
                          tolerance=1e-4,
                          cost_interval=1,
                          test_range=4,
                          verbose=0,
                          plot_output=None)

    # Define the optimizer
    opt = Condat(x=primal,
                 y=dual,
                 grad=gradient_op,
                 prox=prox_op,
                 prox_dual=prox_dual_op,
                 linear=linear_op,
                 cost=cost_op,
                 rho=relaxation_factor,
                 sigma=sigma,
                 tau=tau,
                 rho_update=None,
                 sigma_update=None,
                 tau_update=None,
                 auto_iterate=False)

    # Perform the first reconstruction
    if verbose > 0:
        print("Starting optimization...")

    for i in range(max_nb_of_iter):
        opt._update()

    opt.x_final = opt._x_new
    opt.y_final = opt._y_new

    # Loop through the number of reweightings
    for reweight_index in range(nb_of_reweights):

        # Generate the new weights following reweighting prescription
        if std_est_method == "primal":
            reweight_op.reweight(linear_op.op(opt._x_new))
        else:
            std_est = reweight_op.reweight(opt._x_new)

        # Welcome message
        if verbose > 0:
            print(" - reweight: ", reweight_index + 1)
            print(" - std: ", std_est)

        # Update the weights in the dual proximity operator
        prox_dual_op.weights = reweight_op.weights

        # Perform optimisation with new weights
        opt.iterate(max_iter=max_nb_of_iter)

    # Goodbye message
    end = time.clock()
    if verbose > 0:
        print(" - final iteration number: ", cost_op._iteration)
        print(" - final cost value: ", cost_op.cost)
        print(" - converged: ", opt.converge)
        print("Done.")
        print("Execution time: ", end - start, " seconds")
        print("-" * 40)

    # Get the final solution
    x_final = opt.x_final
    linear_op.transform.analysis_data = unflatten(opt.y_final,
                                                  linear_op.coeffs_shape)

    return x_final, linear_op.transform