def sparse_rec_condatvu(gradient_op, linear_op, std_est=None, std_est_method=None, std_thr=2., mu=1e-6, tau=None, sigma=None, relaxation_factor=1.0, nb_of_reweights=1, max_nb_of_iter=150, add_positivity=False, atol=1e-4, verbose=0): """ The Condat-Vu sparse reconstruction with reweightings. .. note:: At the moment, supports only 2D data. Parameters ---------- data: ndarray the data to reconstruct: observation are expected in Fourier space. wavelet_name: str the wavelet name to be used during the decomposition. samples: np.ndarray the mask samples in the Fourier domain. nb_scales: int, default 4 the number of scales in the wavelet decomposition. std_est: float, default None the noise std estimate. If None use the MAD as a consistent estimator for the std. std_est_method: str, default None if the standard deviation is not set, estimate this parameter using the mad routine in the image ('primal') or in the sparse wavelet decomposition ('dual') domain. std_thr: float, default 2. use this treshold expressed as a number of sigma in the residual proximity operator during the thresholding. mu: float, default 1e-6 regularization hyperparameter. tau, sigma: float, default None parameters of the Condat-Vu proximal-dual splitting algorithm. If None estimates these parameters. relaxation_factor: float, default 0.5 parameter of the Condat-Vu proximal-dual splitting algorithm. If 1, no relaxation. nb_of_reweights: int, default 1 the number of reweightings. max_nb_of_iter: int, default 150 the maximum number of iterations in the Condat-Vu proximal-dual splitting algorithm. add_positivity: bool, default False by setting this option, set the proximity operator to identity or positive. atol: float, default 1e-4 tolerance threshold for convergence. verbose: int, default 0 the verbosity level. Returns ------- x_final: ndarray the estimated CONDAT-VU solution. transform: a WaveletTransformBase derived instance the wavelet transformation instance. """ # Check inputs # analysis = True # if hasattr(gradient_op, 'linear_op'): # analysis = False start = time.clock() if std_est_method not in (None, "primal", "dual"): raise ValueError( "Unrecognize std estimation method '{0}'.".format(std_est_method)) # Define the initial primal and dual solutions x_init = np.zeros(gradient_op.fourier_op.shape, dtype=np.complex) weights = linear_op.op(x_init) # Define the weights used during the thresholding in the dual domain, # the reweighting strategy, and the prox dual operator # Case1: estimate the noise std in the image domain if std_est_method == "primal": if std_est is None: std_est = sigma_mad(gradient_op.MtX(gradient_op.y)) weights[...] = std_thr * std_est reweight_op = cwbReweight(weights) prox_dual_op = Threshold(reweight_op.weights) # Case2: estimate the noise std in the sparse wavelet domain elif std_est_method == "dual": if std_est is None: std_est = 0.0 weights[...] = std_thr * std_est reweight_op = mReweight(weights, linear_op, thresh_factor=std_thr) prox_dual_op = Threshold(reweight_op.weights) # Case3: manual regularization mode, no reweighting else: weights[...] = mu reweight_op = None prox_dual_op = Threshold(weights) nb_of_reweights = 0 # Define the Condat Vu optimizer: define the tau and sigma in the # Condat-Vu proximal-dual splitting algorithm if not already provided. # Check also that the combination of values will lead to convergence. norm = linear_op.l2norm(gradient_op.fourier_op.shape) lipschitz_cst = gradient_op.spec_rad if sigma is None: sigma = 0.5 if tau is None: # to avoid numerics troubles with the convergence bound eps = 1.0e-8 # due to the convergence bound tau = 1.0 / (lipschitz_cst / 2 + sigma * norm**2 + eps) convergence_test = (1.0 / tau - sigma * norm**2 >= lipschitz_cst / 2.0) # Define initial primal and dual solutions primal = np.zeros(gradient_op.fourier_op.shape, dtype=np.complex) dual = linear_op.op(primal) dual[...] = 0.0 # Welcome message if verbose > 0: print(condatvu_logo()) print(" - mu: ", mu) print(" - lipschitz constant: ", gradient_op.spec_rad) print(" - tau: ", tau) print(" - sigma: ", sigma) print(" - rho: ", relaxation_factor) print(" - std: ", std_est) print(" - 1/tau - sigma||L||^2 >= beta/2: ", convergence_test) print(" - data: ", gradient_op.obs_data.shape) print(" - max iterations: ", max_nb_of_iter) print(" - number of reweights: ", nb_of_reweights) print(" - primal variable shape: ", primal.shape) print(" - dual variable shape: ", dual.shape) print("-" * 40) # Define the proximity operator if add_positivity: prox_op = Positivity() else: prox_op = Identity() # Define the cost function cost_op = DualGapCost(linear_op=linear_op, initial_cost=1e6, tolerance=1e-4, cost_interval=1, test_range=4, verbose=0, plot_output=None) # Define the optimizer opt = Condat(x=primal, y=dual, grad=gradient_op, prox=prox_op, prox_dual=prox_dual_op, linear=linear_op, cost=cost_op, rho=relaxation_factor, sigma=sigma, tau=tau, rho_update=None, sigma_update=None, tau_update=None, auto_iterate=False) # Perform the first reconstruction if verbose > 0: print("Starting optimization...") for i in range(max_nb_of_iter): opt._update() opt.x_final = opt._x_new opt.y_final = opt._y_new # Loop through the number of reweightings for reweight_index in range(nb_of_reweights): # Generate the new weights following reweighting prescription if std_est_method == "primal": reweight_op.reweight(linear_op.op(opt._x_new)) else: std_est = reweight_op.reweight(opt._x_new) # Welcome message if verbose > 0: print(" - reweight: ", reweight_index + 1) print(" - std: ", std_est) # Update the weights in the dual proximity operator prox_dual_op.weights = reweight_op.weights # Perform optimisation with new weights opt.iterate(max_iter=max_nb_of_iter) # Goodbye message end = time.clock() if verbose > 0: print(" - final iteration number: ", cost_op._iteration) print(" - final cost value: ", cost_op.cost) print(" - converged: ", opt.converge) print("Done.") print("Execution time: ", end - start, " seconds") print("-" * 40) # Get the final solution x_final = opt.x_final linear_op.transform.analysis_data = unflatten(opt.y_final, linear_op.coeffs_shape) return x_final, linear_op.transform
def sparse_rec_condatvu(gradient_op, linear_op, prox_dual_op, cost_op, std_est=None, std_est_method=None, std_thr=2., mu=1e-6, tau=None, sigma=None, relaxation_factor=1.0, nb_of_reweights=1, max_nb_of_iter=150, add_positivity=False, atol=1e-4, metric_call_period=5, metrics=None, verbose=0): """ The Condat-Vu sparse reconstruction with reweightings. .. note:: At the moment, tested only with 2D data. Parameters ---------- gradient_op: instance of class GradBase the gradient operator. linear_op: instance of LinearBase the linear operator: seek the sparsity, ie. a wavelet transform. prox_dual_op: instance of ProximityParent the proximal dual operator. cost_op: instance of costObj the cost function used to check for convergence during the optimization. std_est: float, default None the noise std estimate. If None use the MAD as a consistent estimator for the std. std_est_method: str, default None if the standard deviation is not set, estimate this parameter using the mad routine in the image ('primal') or in the sparse wavelet decomposition ('dual') domain. std_thr: float, default 2. use this treshold expressed as a number of sigma in the residual proximity operator during the thresholding. mu: float, default 1e-6 regularization hyperparameter. tau, sigma: float, default None parameters of the Condat-Vu proximal-dual splitting algorithm. If None estimates these parameters. relaxation_factor: float, default 0.5 parameter of the Condat-Vu proximal-dual splitting algorithm. If 1, no relaxation. nb_of_reweights: int, default 1 the number of reweightings. max_nb_of_iter: int, default 150 the maximum number of iterations in the Condat-Vu proximal-dual splitting algorithm. add_positivity: bool, default False by setting this option, set the proximity operator to identity or positive. atol: float, default 1e-4 tolerance threshold for convergence. metric_call_period: int (default 5) the period on which the metrics are compute. metrics: dict (optional, default None) the list of desired convergence metrics: {'metric_name': [@metric, metric_parameter]}. See modopt for the metrics API. verbose: int, default 0 the verbosity level. Returns ------- x_final: ndarray the estimated CONDAT-VU solution. transform_output: a WaveletTransformBase derived instance or an array the wavelet transformation instance or the transformation coefficients. costs: list of float the cost function values. metrics: dict the requested metrics values during the optimization. """ # Check inputs start = time.clock() if std_est_method not in (None, "primal", "dual"): raise ValueError( "Unrecognize std estimation method '{0}'.".format(std_est_method)) # Define the initial primal and dual solutions x_init = np.zeros(gradient_op.fourier_op.shape, dtype=np.complex) weights = linear_op.op(x_init) # Define the weights used during the thresholding in the dual domain, # the reweighting strategy, and the prox dual operator # Case1: estimate the noise std in the image domain if std_est_method == "primal": if std_est is None: std_est = sigma_mad(gradient_op.MtX(data)) weights[...] = std_thr * std_est reweight_op = cwbReweight(weights) prox_dual_op.weights = reweight_op.weights # Case2: estimate the noise std in the sparse wavelet domain elif std_est_method == "dual": if std_est is None: std_est = 0.0 weights[...] = std_thr * std_est reweight_op = mReweight(weights, linear_op, thresh_factor=std_thr) prox_dual_op.weights = reweight_op.weights # Case3: manual regularization mode, no reweighting else: weights[...] = mu reweight_op = None prox_dual_op.weights = weights nb_of_reweights = 0 # Define the Condat Vu optimizer: define the tau and sigma in the # Condat-Vu proximal-dual splitting algorithm if not already provided. # Check also that the combination of values will lead to convergence. norm = linear_op.l2norm(gradient_op.fourier_op.shape) lipschitz_cst = gradient_op.spec_rad if sigma is None: sigma = 0.5 if tau is None: # to avoid numerics troubles with the convergence bound eps = 1.0e-8 # due to the convergence bound tau = 1.0 / (lipschitz_cst / 2 + sigma * norm**2 + eps) convergence_test = (1.0 / tau - sigma * norm**2 >= lipschitz_cst / 2.0) # Define initial primal and dual solutions primal = np.zeros(gradient_op.fourier_op.shape, dtype=np.complex) dual = linear_op.op(primal) dual[...] = 0.0 # Welcome message if verbose > 0: print(condatvu_logo()) print(" - mu: ", mu) print(" - lipschitz constant: ", gradient_op.spec_rad) print(" - tau: ", tau) print(" - sigma: ", sigma) print(" - rho: ", relaxation_factor) print(" - std: ", std_est) print(" - 1/tau - sigma||L||^2 >= beta/2: ", convergence_test) print(" - data: ", gradient_op.fourier_op.shape) if hasattr(linear_op, "nb_scale"): print(" - wavelet: ", linear_op, "-", linear_op.nb_scale) print(" - max iterations: ", max_nb_of_iter) print(" - number of reweights: ", nb_of_reweights) print(" - primal variable shape: ", primal.shape) print(" - dual variable shape: ", dual.shape) print("-" * 40) # Define the proximity operator if add_positivity: prox_op = Positivity() else: prox_op = Identity() # Define the optimizer opt = Condat(x=primal, y=dual, grad=gradient_op, prox=prox_op, prox_dual=prox_dual_op, linear=linear_op, cost=cost_op, rho=relaxation_factor, sigma=sigma, tau=tau, rho_update=None, sigma_update=None, tau_update=None, auto_iterate=False, metric_call_period=metric_call_period, metrics=metrics or {}) cost_op = opt._cost_func # Perform the first reconstruction if verbose > 0: print("Starting optimization...") opt.iterate(max_iter=max_nb_of_iter) # Loop through the number of reweightings for reweight_index in range(nb_of_reweights): # Generate the new weights following reweighting prescription if std_est_method == "primal": reweight_op.reweight(linear_op.op(opt._x_new)) else: std_est = reweight_op.reweight(opt._x_new) # Welcome message if verbose > 0: print(" - reweight: ", reweight_index + 1) print(" - std: ", std_est) # Update the weights in the dual proximity operator prox_dual_op.weights = reweight_op.weights # Perform optimisation with new weights opt.iterate(max_iter=max_nb_of_iter) # Goodbye message end = time.clock() if verbose > 0: if hasattr(cost_op, "cost"): print(" - final iteration number: ", cost_op._iteration) print(" - final cost value: ", cost_op.cost) print(" - converged: ", opt.converge) print("Done.") print("Execution time: ", end - start, " seconds") print("-" * 40) # Get the final solution x_final = opt.x_final if hasattr(linear_op, "transform"): linear_op.transform.analysis_data = unflatten(opt.y_final, linear_op.coeffs_shape) transform_output = linear_op.transform else: linear_op.coeff = opt.y_final transform_output = linear_op.coeff if hasattr(cost_op, "cost"): costs = cost_op._cost_list else: costs = None return x_final, transform_output, costs, opt.metrics
def sparse_deconv_condatvu(data, psf, n_iter=300, n_reweights=1): """Sparse Deconvolution with Condat-Vu Parameters ---------- data : np.ndarray Input data, 2D image psf : np.ndarray Input PSF, 2D image n_iter : int, optional Maximum number of iterations n_reweights : int, optional Number of reweightings Returns ------- np.ndarray deconvolved image """ # Print the algorithm set-up print(condatvu_logo()) # Define the wavelet filters filters = (get_cospy_filters( data.shape, transform_name='LinearWaveletTransformATrousAlgorithm')) # Set the reweighting scheme reweight = cwbReweight(get_weights(data, psf, filters)) # Set the initial variable values primal = np.ones(data.shape) dual = np.ones(filters.shape) # Set the gradient operators grad_op = GradBasic(data, lambda x: psf_convolve(x, psf), lambda x: psf_convolve(x, psf, psf_rot=True)) # Set the linear operator linear_op = WaveletConvolve2(filters) # Set the proximity operators prox_op = Positivity() prox_dual_op = SparseThreshold(linear_op, reweight.weights) # Set the cost function cost_op = costObj([grad_op, prox_op, prox_dual_op], tolerance=1e-6, cost_interval=1, plot_output=True, verbose=False) # Set the optimisation algorithm alg = Condat(primal, dual, grad_op, prox_op, prox_dual_op, linear_op, cost_op, rho=0.8, sigma=0.5, tau=0.5, auto_iterate=False) # Run the algorithm alg.iterate(max_iter=n_iter) # Implement reweigting for rw_num in range(n_reweights): print(' - Reweighting: {}'.format(rw_num + 1)) reweight.reweight(linear_op.op(alg.x_final)) alg.iterate(max_iter=n_iter) # Return the final result return alg.x_final