Esempio n. 1
0
    def __init__(self,
                 u,
                 x,
                 y,
                 z,
                 grad,
                 prox,
                 cost='auto',
                 linear=None,
                 beta_param=1.0,
                 sigma_bar=1.0,
                 auto_iterate=True,
                 metric_call_period=5,
                 metrics={},
                 **kwargs):

        # Set default algorithm properties
        super(POGM, self).__init__(metric_call_period=metric_call_period,
                                   metrics=metrics,
                                   linear=linear,
                                   **kwargs)

        # set the initial variable values
        (self._check_input_data(data) for data in (u, x, y, z))
        self._u_old = np.copy(u)
        self._x_old = np.copy(x)
        self._y_old = np.copy(y)
        self._z = np.copy(z)

        # Set the algorithm operators
        (self._check_operator(operator) for operator in (grad, prox, cost))
        self._grad = grad
        self._prox = prox
        self._linear = linear
        if cost == 'auto':
            self._cost_func = costObj([self._grad, self._prox])
        else:
            self._cost_func = cost
        # If linear is None, make it Identity for call of metrics
        if self._linear is None:
            self._linear = Identity()
        # Set the algorithm parameters
        (self._check_param(param) for param in (beta_param, sigma_bar))
        if not (0 <= sigma_bar <= 1):
            raise ValueError('The sigma bar parameter needs to be in [0, 1]')
        self._beta = self.step_size or beta_param
        self._sigma_bar = sigma_bar
        self._xi = self._sigma = self._t_old = 1.0
        self._grad.get_grad(self._x_old)
        self._g_old = self._grad.grad

        # Automatically run the algorithm
        if auto_iterate:
            self.iterate()
Esempio n. 2
0
    def setUp(self):
        """Set test parameter values."""
        dummy_inst1 = Dummy()
        dummy_inst1.cost = func_sq
        dummy_inst2 = Dummy()
        dummy_inst2.cost = func_cube

        self.inst1 = cost.costObj([dummy_inst1, dummy_inst2])
        self.inst2 = cost.costObj([dummy_inst1, dummy_inst2], cost_interval=2)
        # Test that by default cost of False if interval is None
        self.inst_none = cost.costObj(
            [dummy_inst1, dummy_inst2],
            cost_interval=None,
        )
        for _ in range(2):
            self.inst1.get_cost(2)
        for _ in range(6):
            self.inst2.get_cost(2)
            self.inst_none.get_cost(2)
        self.dummy = Dummy()
Esempio n. 3
0
    def __init__(
        self,
        x,
        grad,
        prox,
        cost,
        eta=1.0,
        eta_update=None,
        epsilon=1e-6,
        epoch_size=1,
        metric_call_period=5,
        metrics=None,
        **kwargs,
    ):
        # Set the initial variable values
        if metrics is None:
            metrics = {}
        # Set default algorithm properties
        super().__init__(
            metric_call_period=metric_call_period,
            metrics=metrics,
            **kwargs,
        )
        self.iter = 0
        self._check_input_data(x)
        self._x_old = np.copy(x)
        self._x_new = np.copy(x)
        self._speed_grad = np.zeros(x.shape, dtype=float)
        self._dir_grad = np.zeros_like(x)
        # Set the algorithm operators
        for operator in (grad, prox, cost):
            self._check_operator(operator)
        self._grad = grad
        self._prox = prox
        if cost == 'auto':
            self._cost_func = costObj([self._grad, self._prox])
        else:
            self._cost_func = cost
        # Set the algorithm parameters
        for param_val in (eta, epsilon):
            self._check_param(param_val)
        self._eta = eta
        self._eps = epsilon

        # Set the algorithm parameter update methods
        self._check_param_update(eta_update)
        self._eta_update = eta_update
        self.idx = 0
        self.epoch_size = epoch_size
Esempio n. 4
0
    def setUp(self):
        """Set test parameter values."""
        self.data1 = np.arange(9).reshape(3, 3).astype(float)
        self.data2 = self.data1 + np.random.randn(*self.data1.shape) * 1e-6
        self.data3 = np.arange(9).reshape(3, 3).astype(float) + 1

        grad_inst = gradient.GradBasic(
            self.data1,
            func_identity,
            func_identity,
        )

        prox_inst = proximity.Positivity()
        prox_dual_inst = proximity.IdentityProx()
        linear_inst = linear.Identity()
        reweight_inst = reweight.cwbReweight(self.data3)
        cost_inst = cost.costObj([grad_inst, prox_inst, prox_dual_inst])
        self.setup = algorithms.SetUp()
        self.max_iter = 20

        self.fb_all_iter = algorithms.ForwardBackward(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            cost=None,
            auto_iterate=False,
            beta_update=func_identity,
        )
        self.fb_all_iter.iterate(self.max_iter)

        self.fb1 = algorithms.ForwardBackward(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            beta_update=func_identity,
        )

        self.fb2 = algorithms.ForwardBackward(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            cost=cost_inst,
            lambda_update=None,
        )

        self.fb3 = algorithms.ForwardBackward(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            beta_update=func_identity,
            a_cd=3,
        )

        self.fb4 = algorithms.ForwardBackward(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            beta_update=func_identity,
            r_lazy=3,
            p_lazy=0.7,
            q_lazy=0.7,
        )

        self.fb5 = algorithms.ForwardBackward(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            restart_strategy='adaptive',
            xi_restart=0.9,
        )

        self.fb6 = algorithms.ForwardBackward(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            restart_strategy='greedy',
            xi_restart=0.9,
            min_beta=1.0,
            s_greedy=1.1,
        )

        self.gfb_all_iter = algorithms.GenForwardBackward(
            self.data1,
            grad=grad_inst,
            prox_list=[prox_inst, prox_dual_inst],
            cost=None,
            auto_iterate=False,
            gamma_update=func_identity,
            beta_update=func_identity,
        )
        self.gfb_all_iter.iterate(self.max_iter)

        self.gfb1 = algorithms.GenForwardBackward(
            self.data1,
            grad=grad_inst,
            prox_list=[prox_inst, prox_dual_inst],
            gamma_update=func_identity,
            lambda_update=func_identity,
        )

        self.gfb2 = algorithms.GenForwardBackward(
            self.data1,
            grad=grad_inst,
            prox_list=[prox_inst, prox_dual_inst],
            cost=cost_inst,
        )

        self.gfb3 = algorithms.GenForwardBackward(
            self.data1,
            grad=grad_inst,
            prox_list=[prox_inst, prox_dual_inst],
            cost=cost_inst,
            step_size=2,
        )

        self.condat_all_iter = algorithms.Condat(
            self.data1,
            self.data2,
            grad=grad_inst,
            prox=prox_inst,
            cost=None,
            prox_dual=prox_dual_inst,
            sigma_update=func_identity,
            tau_update=func_identity,
            rho_update=func_identity,
            auto_iterate=False,
        )
        self.condat_all_iter.iterate(self.max_iter)

        self.condat1 = algorithms.Condat(
            self.data1,
            self.data2,
            grad=grad_inst,
            prox=prox_inst,
            prox_dual=prox_dual_inst,
            sigma_update=func_identity,
            tau_update=func_identity,
            rho_update=func_identity,
        )

        self.condat2 = algorithms.Condat(
            self.data1,
            self.data2,
            grad=grad_inst,
            prox=prox_inst,
            prox_dual=prox_dual_inst,
            linear=linear_inst,
            cost=cost_inst,
            reweight=reweight_inst,
        )

        self.condat3 = algorithms.Condat(
            self.data1,
            self.data2,
            grad=grad_inst,
            prox=prox_inst,
            prox_dual=prox_dual_inst,
            linear=Dummy(),
            cost=cost_inst,
            auto_iterate=False,
        )

        self.pogm_all_iter = algorithms.POGM(
            u=self.data1,
            x=self.data1,
            y=self.data1,
            z=self.data1,
            grad=grad_inst,
            prox=prox_inst,
            auto_iterate=False,
            cost=None,
        )
        self.pogm_all_iter.iterate(self.max_iter)

        self.pogm1 = algorithms.POGM(
            u=self.data1,
            x=self.data1,
            y=self.data1,
            z=self.data1,
            grad=grad_inst,
            prox=prox_inst,
        )

        self.vanilla_grad = algorithms.VanillaGenericGradOpt(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            cost=cost_inst,
        )
        self.ada_grad = algorithms.AdaGenericGradOpt(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            cost=cost_inst,
        )
        self.adam_grad = algorithms.ADAMGradOpt(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            cost=cost_inst,
        )
        self.momentum_grad = algorithms.MomentumGradOpt(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            cost=cost_inst,
        )
        self.rms_grad = algorithms.RMSpropGradOpt(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            cost=cost_inst,
        )
        self.saga_grad = algorithms.SAGAOptGradOpt(
            self.data1,
            grad=grad_inst,
            prox=prox_inst,
            cost=cost_inst,
        )

        self.dummy = Dummy()
        self.dummy.cost = func_identity
        self.setup._check_operator(self.dummy.cost)
Esempio n. 5
0
    def __init__(
        self,
        x,
        grad,
        prox_list,
        cost='auto',
        gamma_param=1.0,
        lambda_param=1.0,
        gamma_update=None,
        lambda_update=None,
        weights=None,
        auto_iterate=True,
        metric_call_period=5,
        metrics=None,
        linear=None,
        **kwargs,
    ):

        # Set default algorithm properties
        super().__init__(
            metric_call_period=metric_call_period,
            metrics=metrics,
            **kwargs,
        )

        # Set the initial variable values
        self._check_input_data(x)
        self._x_old = self.xp.copy(x)

        # Set the algorithm operators
        for operator in [grad, cost] + prox_list:
            self._check_operator(operator)

        self._grad = grad
        self._prox_list = self.xp.array(prox_list)
        self._linear = linear

        if cost == 'auto':
            self._cost_func = costObj([self._grad] + prox_list)
        else:
            self._cost_func = cost

        # Check if there is a linear op, needed for metrics in the FB algoritm
        if metrics and self._linear is None:
            raise ValueError(
                'When using metrics, you must pass a linear operator', )

        if self._linear is None:
            self._linear = Identity()

        # Set the algorithm parameters
        for param_val in (gamma_param, lambda_param):
            self._check_param(param_val)

        self._gamma = self.step_size or gamma_param
        self._lambda_param = lambda_param

        # Set the algorithm parameter update methods
        for param_update in (gamma_update, lambda_update):
            self._check_param_update(param_update)

        self._gamma_update = gamma_update
        self._lambda_update = lambda_update

        # Set the proximity weights
        self._set_weights(weights)

        # Set initial z
        self._z = self.xp.array(
            [self._x_old for i in range(self._prox_list.size)])

        # Automatically run the algorithm
        if auto_iterate:
            self.iterate()
Esempio n. 6
0
    def __init__(
        self,
        x,
        grad,
        prox,
        cost='auto',
        beta_param=1.0,
        lambda_param=1.0,
        beta_update=None,
        lambda_update='fista',
        auto_iterate=True,
        metric_call_period=5,
        metrics=None,
        linear=None,
        **kwargs,
    ):

        # Set default algorithm properties
        super().__init__(
            metric_call_period=metric_call_period,
            metrics=metrics,
            **kwargs,
        )

        # Set the initial variable values
        self._check_input_data(x)
        self._x_old = self.copy_data(x)
        self._z_old = self.copy_data(x)

        # Set the algorithm operators
        for operator in (grad, prox, cost):
            self._check_operator(operator)

        self._grad = grad
        self._prox = prox
        self._linear = linear

        if cost == 'auto':
            self._cost_func = costObj([self._grad, self._prox])
        else:
            self._cost_func = cost

        # Check if there is a linear op, needed for metrics in the FB algoritm
        if metrics and self._linear is None:
            raise ValueError(
                'When using metrics, you must pass a linear operator', )

        if self._linear is None:
            self._linear = Identity()

        # Set the algorithm parameters
        for param_val in (beta_param, lambda_param):
            self._check_param(param_val)

        self._beta = self.step_size or beta_param
        self._lambda = lambda_param

        # Set the algorithm parameter update methods
        self._check_param_update(beta_update)
        self._beta_update = beta_update
        if isinstance(lambda_update, str) and lambda_update == 'fista':
            fista = FISTA(**kwargs)
            self._lambda_update = fista.update_lambda
            self._is_restart = fista.is_restart
            self._beta_update = fista.update_beta
        else:
            self._check_param_update(lambda_update)
            self._lambda_update = lambda_update
            self._is_restart = lambda *args, **kwargs: False

        # Automatically run the algorithm
        if auto_iterate:
            self.iterate()
Esempio n. 7
0
    def __init__(
        self,
        x,
        y,
        grad,
        prox,
        prox_dual,
        linear=None,
        cost='auto',
        reweight=None,
        rho=0.5,
        sigma=1.0,
        tau=1.0,
        rho_update=None,
        sigma_update=None,
        tau_update=None,
        auto_iterate=True,
        max_iter=150,
        n_rewightings=1,
        metric_call_period=5,
        metrics=None,
        **kwargs,
    ):

        # Set default algorithm properties
        super().__init__(
            metric_call_period=metric_call_period,
            metrics=metrics,
            **kwargs,
        )

        # Set the initial variable values
        for input_data in (x, y):
            self._check_input_data(input_data)

        self._x_old = self.xp.copy(x)
        self._y_old = self.xp.copy(y)

        # Set the algorithm operators
        for operator in (grad, prox, prox_dual, linear, cost):
            self._check_operator(operator)

        self._grad = grad
        self._prox = prox
        self._prox_dual = prox_dual
        self._reweight = reweight
        if isinstance(linear, type(None)):
            self._linear = Identity()
        else:
            self._linear = linear
        if cost == 'auto':
            self._cost_func = costObj([
                self._grad,
                self._prox,
                self._prox_dual,
            ])
        else:
            self._cost_func = cost

        # Set the algorithm parameters
        for param_val in (rho, sigma, tau):
            self._check_param(param_val)

        self._rho = rho
        self._sigma = sigma
        self._tau = self.step_size or tau

        # Set the algorithm parameter update methods
        for param_update in (rho_update, sigma_update, tau_update):
            self._check_param_update(param_update)

        self._rho_update = rho_update
        self._sigma_update = sigma_update
        self._tau_update = tau_update

        # Automatically run the algorithm
        if auto_iterate:
            self.iterate(max_iter=max_iter, n_rewightings=n_rewightings)
Esempio n. 8
0
    def __init__(self,
                 x,
                 grad,
                 prox,
                 cost='auto',
                 beta_param=1.0,
                 lambda_param=1.0,
                 beta_update=None,
                 lambda_update='fista',
                 auto_iterate=True,
                 metric_call_period=5,
                 metrics={},
                 linear=None):

        # Set default algorithm properties
        super(ForwardBackward,
              self).__init__(metric_call_period=metric_call_period,
                             metrics=metrics,
                             linear=linear)

        # Set the initial variable values
        self._check_input_data(x)
        self._x_old = np.copy(x)
        self._z_old = np.copy(x)

        # Set the algorithm operators
        (self._check_operator(operator) for operator in (grad, prox, cost))
        self._grad = grad
        self._prox = prox
        self._linear = linear

        if cost == 'auto':
            self._cost_func = costObj([self._grad, self._prox])
        else:
            self._cost_func = cost

        # Check if there is a linear op, needed for metrics in the FB algoritm
        if metrics != {} and self._linear is None:
            raise ValueError('When using metrics, you must pass a linear '
                             'operator')

        if self._linear is None:
            self._linear = Identity()

        # Set the algorithm parameters
        (self._check_param(param) for param in (beta_param, lambda_param))
        self._beta = beta_param
        self._lambda = lambda_param

        # Set the algorithm parameter update methods
        if isinstance(lambda_update, str) and lambda_update == 'fista':
            self._lambda_update = FISTA().update_lambda
        else:
            self._check_param_update(lambda_update)
            self._lambda_update = lambda_update
        self._check_param_update(beta_update)
        self._beta_update = beta_update

        # Automatically run the algorithm
        if auto_iterate:
            self.iterate()
Esempio n. 9
0
def sparse_deconv_condatvu(data, psf, n_iter=300, n_reweights=1):
    """Sparse Deconvolution with Condat-Vu

    Parameters
    ----------
    data : np.ndarray
        Input data, 2D image
    psf : np.ndarray
        Input PSF, 2D image
    n_iter : int, optional
        Maximum number of iterations
    n_reweights : int, optional
        Number of reweightings

    Returns
    -------
    np.ndarray deconvolved image

    """

    # Print the algorithm set-up
    print(condatvu_logo())

    # Define the wavelet filters
    filters = (get_cospy_filters(
        data.shape, transform_name='LinearWaveletTransformATrousAlgorithm'))

    # Set the reweighting scheme
    reweight = cwbReweight(get_weights(data, psf, filters))

    # Set the initial variable values
    primal = np.ones(data.shape)
    dual = np.ones(filters.shape)

    # Set the gradient operators
    grad_op = GradBasic(data, lambda x: psf_convolve(x, psf),
                        lambda x: psf_convolve(x, psf, psf_rot=True))

    # Set the linear operator
    linear_op = WaveletConvolve2(filters)

    # Set the proximity operators
    prox_op = Positivity()
    prox_dual_op = SparseThreshold(linear_op, reweight.weights)

    # Set the cost function
    cost_op = costObj([grad_op, prox_op, prox_dual_op],
                      tolerance=1e-6,
                      cost_interval=1,
                      plot_output=True,
                      verbose=False)

    # Set the optimisation algorithm
    alg = Condat(primal,
                 dual,
                 grad_op,
                 prox_op,
                 prox_dual_op,
                 linear_op,
                 cost_op,
                 rho=0.8,
                 sigma=0.5,
                 tau=0.5,
                 auto_iterate=False)

    # Run the algorithm
    alg.iterate(max_iter=n_iter)

    # Implement reweigting
    for rw_num in range(n_reweights):
        print(' - Reweighting: {}'.format(rw_num + 1))
        reweight.reweight(linear_op.op(alg.x_final))
        alg.iterate(max_iter=n_iter)

    # Return the final result
    return alg.x_final
Esempio n. 10
0
def polychromatic_psf_field_est_2(im_stack_in,spectrums,wvl,D,opt_shift_est,nb_comp,field_pos=None,nb_iter=4,nb_subiter=100,mu=0.3,\
                        tol = 0.1,sig_supp = 3,sig=None,shifts=None,flux=None,nsig_shift_est=4,pos_en = True,simplex_en=False,\
                        wvl_en=True,wvl_opt=None,nsig=3,graph_cons_en=False):
    """ Main LambdaRCA function.
    
    Calls:
    
    * :func:`utils.get_noise_arr`
    * :func:`utils.diagonally_dominated_mat_stack` 
    * :func:`psf_learning_utils.full_displacement` 
    * :func:`utils.im_gauss_nois_est_cube` 
    * :func:`utils.thresholding_3D` 
    * :func:`utils.shift_est` 
    * :func:`utils.shift_ker_stack` 
    * :func:`utils.flux_estimate_stack` 
    * :func:`optim_utils.analysis` 
    * :func:`utils.cube_svd`
    * :func:`grads.polychrom_eigen_psf`
    * :func:`grads.polychrom_eigen_psf_coeff_graph`
    * :func:`grads.polychrom_eigen_psf_coeff`
    * :func:`psf_learning_utils.field_reconstruction`
    * :func:`operators.transport_plan_lin_comb_wavelet`
    * :func:`operators.transport_plan_marg_wavelet`
    * :func:`operators.transport_plan_lin_comb`
    * :func:`operators.transport_plan_lin_comb_coeff`
    * :func:`proxs.simplex_threshold`
    * :func:`proxs.Simplex`
    * :func:`proxs.KThreshold`
    """

    im_stack = copy(im_stack_in)
    if wvl_en:
        from utils import get_noise_arr

    print "--------------- Transport architecture setting ------------------"
    nb_im = im_stack.shape[-1]
    shap_obs = im_stack.shape
    shap = (shap_obs[0]*D,shap_obs[1]*D)
    P_stack = utils.diagonally_dominated_mat_stack(shap,nb_comp,sig=sig_supp,thresh_en=True)
    i,j = where(P_stack[:,:,0]>0)
    supp = transpose(array([i,j]))
    t = (wvl-wvl.min()).astype(float)/(wvl.max()-wvl.min())

    neighbors_graph,weights_neighbors,cent,coord_map,knn = psf_learning_utils.full_displacement(shap,supp,t,\
    pol_en=True,cent=None,theta_param=1,pol_mod=True,coord_map=None,knn=None)

    print "------------------- Forward operator parameters estimation ------------------------"
    centroids = None
    if sig is None:
        sig,filters = utils.im_gauss_nois_est_cube(copy(im_stack),opt=opt_shift_est)

    if shifts is None:
        map = ones(im_stack.shape)
        for i in range(0,shap_obs[2]):
            map[:,:,i] *= nsig_shift_est*sig[i]
        print 'Shifts estimation...'
        psf_stack_shift = utils.thresholding_3D(copy(im_stack),map,0)
        shifts,centroids = utils.shift_est(psf_stack_shift)
        print 'Done...'
    else:
        print "---------- /!\ Warning: shifts provided /!\ ---------"
    ker,ker_rot = utils.shift_ker_stack(shifts,D)
    sig /=sig.min()
    for k in range(0,shap_obs[2]):
        im_stack[:,:,k] = im_stack[:,:,k]/sig[k]
    print " ------ ref energy: ",(im_stack**2).sum()," ------- "
    if flux is None:
        flux = utils.flux_estimate_stack(copy(im_stack),rad=4)

    if graph_cons_en:
        print "-------------------- Spatial constraint setting -----------------------"
        e_opt,p_opt,weights,comp_temp,data,basis,alph  = analysis(im_stack,0.1*prod(shap_obs)*sig.min()**2,field_pos,nb_max=nb_comp)

    print "------------- Coeff init ------------"
    A,comp,cube_est = utils.cube_svd(im_stack,nb_comp=nb_comp)

    i=0
    print " --------- Optimization instances setting ---------- "

    # Data fidelity related instances
    polychrom_grad = grad.polychrom_eigen_psf(im_stack, supp, neighbors_graph, \
                weights_neighbors, spectrums, A, flux, sig, ker, ker_rot, D)

    if graph_cons_en:
        polychrom_grad_coeff = grad.polychrom_eigen_psf_coeff_graph(im_stack, supp, neighbors_graph, \
                weights_neighbors, spectrums, P_stack, flux, sig, ker, ker_rot, D, basis)
    else:
        polychrom_grad_coeff = grad.polychrom_eigen_psf_coeff(im_stack, supp, neighbors_graph, \
                weights_neighbors, spectrums, P_stack, flux, sig, ker, ker_rot, D)


    # Dual variable related linear operators instances
    dual_var_coeff = zeros((supp.shape[0],nb_im))
    if wvl_en and pos_en:
        lin_com = lambdaops.transport_plan_lin_comb_wavelet(A,supp,weights_neighbors,neighbors_graph,shap,wavelet_opt=wvl_opt)
    else:
        if wvl_en:
            lin_com = lambdaops.transport_plan_marg_wavelet(supp,weights_neighbors,neighbors_graph,shap,wavelet_opt=wvl_opt)
        else:
            lin_com = lambdaops.transport_plan_lin_comb(A, supp,shap)

    if not graph_cons_en:
        lin_com_coeff = lambdaops.transport_plan_lin_comb_coeff(P_stack, supp)

    # Proximity operators related instances
    id_prox = Identity()
    if wvl_en and pos_en:
        noise_map = get_noise_arr(lin_com.op(polychrom_grad.MtX(im_stack))[1])
        dual_var_plan = np.array([zeros((supp.shape[0],nb_im)),zeros(noise_map.shape)])
        dual_prox_plan = lambdaprox.simplex_threshold(lin_com, nsig*noise_map,pos_en=(not simplex_en))
    else:
        if wvl_en:
            # Noise estimation
            noise_map = get_noise_arr(lin_com.op(polychrom_grad.MtX(im_stack)))
            dual_var_plan = zeros(noise_map.shape)
            dual_prox_plan = prox.SparseThreshold(lin_com, nsig*noise_map)
        else:
            dual_var_plan = zeros((supp.shape[0],nb_im))
            if simplex_en:
                dual_prox_plan = lambdaprox.Simplex()
            else:
                dual_prox_plan = prox.Positivity()

    if graph_cons_en:
        iter_func = lambda x: floor(sqrt(x))
        prox_coeff = lambdaprox.KThreshold(iter_func)
    else:
        if simplex_en:
            dual_prox_coeff = lambdaprox.Simplex()
        else:
            dual_prox_coeff = prox.Positivity()

    # ---- (Re)Setting hyperparameters
    delta  = (polychrom_grad.inv_spec_rad**(-1)/2)**2 + 4*lin_com.mat_norm**2
    w = 0.9
    sigma_P = w*(np.sqrt(delta)-polychrom_grad.inv_spec_rad**(-1)/2)/(2*lin_com.mat_norm**2)
    tau_P = sigma_P
    rho_P = 1

    # Cost function instance
    cost_op = costObj([polychrom_grad])

    condat_min = optimalg.Condat(P_stack, dual_var_plan, polychrom_grad, id_prox, dual_prox_plan, lin_com, cost=cost_op,\
                 rho=rho_P,  sigma=sigma_P, tau=tau_P, rho_update=None, sigma_update=None,
                 tau_update=None, auto_iterate=False)
    print "------------------- Transport plans estimation ------------------"

    condat_min.iterate(max_iter=nb_subiter) # ! actually runs optimisation
    P_stack = condat_min.x_final
    dual_var_plan = condat_min.y_final

    obs_est = polychrom_grad.MX(P_stack)
    res = im_stack - obs_est

    for i in range(0,nb_iter):
        print "----------------Iter ",i+1,"/",nb_iter,"-------------------"

        # Parameters update
        polychrom_grad_coeff.set_P(P_stack)
        if not graph_cons_en:
            lin_com_coeff.set_P_stack(P_stack)
            # ---- (Re)Setting hyperparameters
            delta  = (polychrom_grad_coeff.inv_spec_rad**(-1)/2)**2 + 4*lin_com_coeff.mat_norm**2
            w = 0.9
            sigma_coeff = w*(np.sqrt(delta)-polychrom_grad_coeff.inv_spec_rad**(-1)/2)/(2*lin_com_coeff.mat_norm**2)
            tau_coeff = sigma_coeff
            rho_coeff = 1

        # Coefficients cost function instance
        cost_op_coeff = costObj([polychrom_grad_coeff])

        if graph_cons_en:
            beta_param = polychrom_grad_coeff.inv_spec_rad# set stepsize to inverse spectral radius of coefficient gradient
            min_coeff = optimalg.ForwardBackward(alph, polychrom_grad_coeff, prox_coeff, beta_param=beta_param, 
                                                 cost=cost_op_coeff,auto_iterate=False)
        else:
            min_coeff = optimalg.Condat(A, dual_var_coeff, polychrom_grad_coeff, id_prox, dual_prox_coeff, lin_com_coeff, cost=cost_op_coeff,\
                                            rho=rho_coeff,  sigma=sigma_coeff, tau=tau_coeff, rho_update=None, sigma_update=None,\
                                            tau_update=None, auto_iterate=False)

        print "------------------- Coefficients estimation ----------------------"
        min_coeff.iterate(max_iter=nb_subiter) # ! actually runs optimisation
        if graph_cons_en:
            prox_coeff.reset_iter()
            alph = min_coeff.x_final
            A = alph.dot(basis)
        else:
            A = min_coeff.x_final
            dual_var_coeff = min_coeff.y_final

        # Parameters update
        polychrom_grad.set_A(A)
        if not wvl_en:
            lin_com.set_A(A)
        if wvl_en:
            # Noise estimate update
            noise_map = get_noise_arr(lin_com.op(polychrom_grad.MtX(im_stack))[1])
            dual_prox_plan.update_weights(noise_map)

        # ---- (Re)Setting hyperparameters
        delta  = (polychrom_grad.inv_spec_rad**(-1)/2)**2 + 4*lin_com.mat_norm**2
        w = 0.9
        sigma_P = w*(np.sqrt(delta)-polychrom_grad.inv_spec_rad**(-1)/2)/(2*lin_com.mat_norm**2)
        tau_P = sigma_P
        rho_P = 1

        # Cost function instance
        condat_min = optimalg.Condat(P_stack, dual_var_plan, polychrom_grad, id_prox, dual_prox_plan, lin_com, cost=cost_op,\
                     rho=rho_P,  sigma=sigma_P, tau=tau_P, rho_update=None, sigma_update=None,
                     tau_update=None, auto_iterate=False)
        print "------------------- Transport plans estimation ------------------"

        condat_min.iterate(max_iter=nb_subiter) # ! actually runs optimisation
        P_stack = condat_min.x_final
        dual_var_plan = condat_min.y_final

        # Normalization
        for j in range(0,nb_comp):
            l1_P = sum(abs(P_stack[:,:,j]))
            P_stack[:,:,j]/= l1_P
            A[j,:] *= l1_P
            if graph_cons_en:
                alph[j,:] *= l1_P
        polychrom_grad.set_A(A)
        # Flux update
        obs_est = polychrom_grad.MX(P_stack)
        err_ref = 0.5*sum((obs_est-im_stack)**2)
        flux_new = (obs_est*im_stack).sum(axis=(0,1))/(obs_est**2).sum(axis=(0,1))
        print "Flux correction: ",flux_new
        polychrom_grad.set_flux(polychrom_grad.get_flux()*flux_new)
        polychrom_grad_coeff.set_flux(polychrom_grad_coeff.get_flux()*flux_new)

        obs_est = polychrom_grad.MX(P_stack)
        res = im_stack - obs_est
        err_rec = 0.5*sum(res**2)
        print "err_ref : ",err_ref," ; err_rec : ", err_rec
        # Computing residual


    psf_est = psf_learning_utils.field_reconstruction(P_stack,shap,supp,neighbors_graph,weights_neighbors,A)

    return psf_est,P_stack,A,res
Esempio n. 11
0
    def _fit(self):
        weights = self.A
        comp = self.S
        alpha = self.alpha
        #### Source updates set-up ####
        # initialize dual variable and compute Starlet filters for Condat source updates
        dual_var = np.zeros((self.im_hr_shape))
        if self.default_filters:
            self.Phi_filters = get_mr_filters(self.im_hr_shape[:2],
                                              opt=self.opt,
                                              coarse=True)
        rho_phi = np.sqrt(
            np.sum(np.sum(np.abs(self.Phi_filters), axis=(1, 2))**2))

        # Set up source updates, starting with the gradient
        source_grad = grads.SourceGrad(self.obs_data, self.obs_weights,
                                       weights, self.flux, self.sigs,
                                       self.shift_ker_stack,
                                       self.shift_ker_stack_adj, self.upfact,
                                       self.Phi_filters)

        # sparsity in Starlet domain prox (this is actually assuming synthesis form)
        sparsity_prox = rca_prox.StarletThreshold(
            0)  # we'll update to the actual thresholds later

        # and the linear recombination for the positivity constraint
        lin_recombine = rca_prox.LinRecombine(weights, self.Phi_filters)

        #### Weight updates set-up ####
        # gradient
        weight_grad = grads.CoeffGrad(self.obs_data, self.obs_weights, comp,
                                      self.VT, self.flux, self.sigs,
                                      self.shift_ker_stack,
                                      self.shift_ker_stack_adj, self.upfact)

        # cost function
        weight_cost = costObj([weight_grad], verbose=self.modopt_verb)
        source_cost = costObj([source_grad], verbose=self.modopt_verb)

        # k-thresholding for spatial constraint
        iter_func = lambda x: np.floor(np.sqrt(x)) + 1
        coeff_prox = rca_prox.KThreshold(iter_func)

        for k in range(self.nb_iter):
            #### Eigenpsf update ####
            # update gradient instance with new weights...
            source_grad.update_A(weights)

            # ... update linear recombination weights...
            lin_recombine.update_A(weights)

            # ... set optimization parameters...
            beta = source_grad.spec_rad + rho_phi
            tau = 1. / beta
            sigma = 1. / lin_recombine.norm * beta / 2

            # ... update sparsity prox thresholds...
            thresh = utils.reg_format(
                utils.acc_sig_maps(self.shap,
                                   self.shift_ker_stack_adj,
                                   self.sigs,
                                   self.flux,
                                   self.flux_ref,
                                   self.upfact,
                                   weights,
                                   sig_data=np.ones(
                                       (self.shap[2], )) * self.sig_min))
            thresholds = self.ksig * np.sqrt(
                np.array([
                    filter_convolve(Sigma_k**2, self.Phi_filters**2)
                    for Sigma_k in thresh
                ]))

            sparsity_prox.update_threshold(tau * thresholds)

            # and run source update:
            transf_comp = utils.apply_transform(comp, self.Phi_filters)
            if self.nb_reweight:
                reweighter = cwbReweight(thresholds)
                for _ in range(self.nb_reweight):
                    source_optim = optimalg.Condat(transf_comp,
                                                   dual_var,
                                                   source_grad,
                                                   sparsity_prox,
                                                   Positivity(),
                                                   linear=lin_recombine,
                                                   cost=source_cost,
                                                   max_iter=self.nb_subiter_S,
                                                   tau=tau,
                                                   sigma=sigma)
                    transf_comp = source_optim.x_final
                    reweighter.reweight(transf_comp)
                    thresholds = reweighter.weights
            else:
                source_optim = optimalg.Condat(transf_comp,
                                               dual_var,
                                               source_grad,
                                               sparsity_prox,
                                               Positivity(),
                                               linear=lin_recombine,
                                               cost=source_cost,
                                               max_iter=self.nb_subiter_S,
                                               tau=tau,
                                               sigma=sigma)
                transf_comp = source_optim.x_final
            comp = utils.rca_format(
                np.array([
                    filter_convolve(transf_compj, self.Phi_filters, True)
                    for transf_compj in transf_comp
                ]))

            #TODO: replace line below with Fred's component selection (to be extracted from `low_rank_global_src_est_comb`)
            ind_select = range(comp.shape[2])

            #### Weight update ####
            if k < self.nb_iter - 1:
                # update sources and reset iteration counter for K-thresholding
                weight_grad.update_S(comp)
                coeff_prox.reset_iter()
                weight_optim = optimalg.ForwardBackward(
                    alpha,
                    weight_grad,
                    coeff_prox,
                    cost=weight_cost,
                    beta_param=weight_grad.inv_spec_rad,
                    auto_iterate=False)
                weight_optim.iterate(max_iter=self.nb_subiter_weights)
                alpha = weight_optim.x_final
                weights_k = alpha.dot(self.VT)

                # renormalize to break scale invariance
                weight_norms = np.sqrt(np.sum(weights_k**2, axis=1))
                comp *= weight_norms
                weights_k /= weight_norms.reshape(-1, 1)
                #TODO: replace line below with Fred's component selection
                ind_select = range(weights.shape[0])
                weights = weights_k[ind_select, :]
                supports = None  #TODO

        self.A = weights
        self.S = comp
        self.alpha = alpha
        source_grad.MX(transf_comp)
        self.current_rec = source_grad._current_rec
Esempio n. 12
0
def set_prox_op_and_cost(data, **kwargs):
    """Set the proximity operators and cost function

    This method sets the proximity operators and cost function instances.

    Parameters
    ----------
    data : np.ndarray
        Input noisy data (3D array)

    Returns
    -------
    dict Updated keyword arguments

    """

    # Create a list of proximity operators
    kwargs['prox_op'] = []

    # Set the first operator as positivity contraint or simply identity
    if not kwargs['no_pos']:
        kwargs['prox_op'].append(Positivity())

    else:
        kwargs['prox_op'].append(IdentityProx())

    # Add a second proximity operator
    if kwargs['mode'] == 'all':

        kwargs['prox_op'].append(
            ProximityCombo([
                SparseThreshold(
                    kwargs['linear_op'].operators[0],
                    kwargs['reweight'].weights,
                ),
                LowRankMatrix(kwargs['lambda'],
                              thresh_type=kwargs['lowr_thresh_type'],
                              lowr_type=kwargs['lowr_type'],
                              operator=kwargs['grad_op'].trans_op)
            ]))

    elif kwargs['mode'] == 'lowr':

        kwargs['prox_op'].append(
            LowRankMatrix(kwargs['lambda'],
                          thresh_type=kwargs['lowr_thresh_type'],
                          lowr_type=kwargs['lowr_type'],
                          operator=kwargs['grad_op'].trans_op))

        operator_list = [kwargs['grad_op']] + kwargs['prox_op']

    elif kwargs['mode'] == 'sparse':

        kwargs['prox_op'].append(
            SparseThreshold(kwargs['linear_op'], kwargs['reweight'].weights))

    elif kwargs['mode'] == 'grad':

        kwargs['prox_op'].append(IdentityProx())

    # Set the cost function
    kwargs['cost_op'] = (costObj([kwargs['grad_op']] + kwargs['prox_op'],
                                 tolerance=kwargs['convergence'],
                                 cost_interval=kwargs['cost_window'],
                                 plot_output=kwargs['output'],
                                 verbose=not kwargs['quiet']))

    return kwargs
Esempio n. 13
0
    def __init__(self,
                 x,
                 y,
                 grad,
                 prox,
                 prox_dual,
                 linear=None,
                 cost='auto',
                 reweight=None,
                 rho=0.5,
                 sigma=1.0,
                 tau=1.0,
                 rho_update=None,
                 sigma_update=None,
                 tau_update=None,
                 auto_iterate=True,
                 metric_call_period=5,
                 metrics={}):

        # Set default algorithm properties
        super(Condat, self).__init__(
            metric_call_period=metric_call_period,
            metrics=metrics,
        )

        # Set the initial variable values
        (self._check_input_data(data) for data in (x, y))
        self._x_old = np.copy(x)
        self._y_old = np.copy(y)

        # Set the algorithm operators
        (self._check_operator(operator)
         for operator in (grad, prox, prox_dual, linear, cost))
        self._grad = grad
        self._prox = prox
        self._prox_dual = prox_dual
        self._reweight = reweight
        if isinstance(linear, type(None)):
            self._linear = Identity()
        else:
            self._linear = linear
        if cost == 'auto':
            self._cost_func = costObj(
                [self._grad, self._prox, self._prox_dual])
        else:
            self._cost_func = cost

        # Set the algorithm parameters
        (self._check_param(param) for param in (rho, sigma, tau))
        self._rho = rho
        self._sigma = sigma
        self._tau = tau

        # Set the algorithm parameter update methods
        (self._check_param_update(param_update)
         for param_update in (rho_update, sigma_update, tau_update))
        self._rho_update = rho_update
        self._sigma_update = sigma_update
        self._tau_update = tau_update

        # Automatically run the algorithm
        if auto_iterate:
            self.iterate()
Esempio n. 14
0
def set_prox_op_and_cost(data, **kwargs):
    """Set the proximity operators and cost function

    This method sets the proximity operators and cost function instances.

    Parameters
    ----------
    data : np.ndarray
        Input noisy data (3D array)

    Returns
    -------
    dict Updated keyword arguments

    """

    # Create a list of proximity operators
    kwargs['prox_op'] = []

    # Set the first operator as positivity contraint or simply identity
    if not kwargs['no_pos']:
        kwargs['prox_op'].append(Positivity())

    else:
        kwargs['prox_op'].append(IdentityProx())

    # Add a second proximity operator
    if kwargs['mode'] == 'all':

        kwargs['prox_op'].append(ProximityCombo(
                                 [SparseThreshold(
                                  kwargs['linear_op'].operators[0],
                                  kwargs['reweight'].weights,),
                                  LowRankMatrix(kwargs['lambda'],
                                  thresh_type=kwargs['lowr_thresh_type'],
                                  lowr_type=kwargs['lowr_type'],
                                  operator=kwargs['grad_op'].trans_op)]))

    elif kwargs['mode'] == 'lowr':

        kwargs['prox_op'].append(LowRankMatrix(kwargs['lambda'],
                                 thresh_type=kwargs['lowr_thresh_type'],
                                 lowr_type=kwargs['lowr_type'],
                                 operator=kwargs['grad_op'].trans_op))

        operator_list = [kwargs['grad_op']] + kwargs['prox_op']

    elif kwargs['mode'] == 'sparse':

        kwargs['prox_op'].append(SparseThreshold(kwargs['linear_op'],
                                 kwargs['reweight'].weights))

    elif kwargs['mode'] == 'grad':

        kwargs['prox_op'].append(IdentityProx())

    # Set the cost function
    kwargs['cost_op'] = (costObj([kwargs['grad_op']] + kwargs['prox_op'],
                         tolerance=kwargs['convergence'],
                         cost_interval=kwargs['cost_window'],
                         plot_output=kwargs['output'],
                         verbose=not kwargs['quiet']))

    return kwargs