def svd_thresh(data, threshold=None, n_pc=None, thresh_type='hard'):
    """Threshold the singular values

    This method thresholds the input data using singular value decomposition

    Parameters
    ----------
    data : np.ndarray
        Input data array
    threshold : float, optional
        Threshold value
    n_pc : int or str, optional
        Number of principal components, specify an integer value or 'all'
    threshold_type : str {'hard', 'soft'}
        Type of noise to be added (default is 'hard')

    Returns
    -------
    np.ndarray thresholded data

    Raises
    ------
    ValueError
        For invalid string entry for n_pc

    """

    if isinstance(n_pc, str) and n_pc != 'all':
        raise ValueError('Invalid value for "n_pc", specify an integer value '
                         'or "all"')

    # Get SVD of input data.

    u, s, v = svd(data, check_finite=False, lapack_driver='gesvd')

    # Find the threshold if not provided.
    if isinstance(threshold, type(None)):

        # Find the required number of principal components if not specified.
        if isinstance(n_pc, type(None)):
            n_pc = find_n_pc(u, factor=0.1)

        # If the number of PCs is too large use all of the singular values.
        if n_pc >= s.size or n_pc == 'all':
            n_pc = s.size - 1
            warn('Using all singular values.')

        threshold = s[n_pc]

    # Remove noise from singular values.
    s_new = thresh(s, threshold, thresh_type)

    #if np.all(s_new == s):
    #    warn('No change to singular values.')

    # Reshape the singular values to the shape of the input image.
    s_new = diagsvd(s_new, *data.shape)

    # Return the thresholded image.
    return np.dot(u, np.dot(s_new, v))
def svd_thresh_coef(data, operator, threshold, thresh_type='hard'):
    """Threshold the singular values coefficients

    This method thresholds the input data using singular value decomposition

    Parameters
    ----------
    data : np.ndarray
        Input data array
    operator : class
        Operator class instance
    threshold : float, optional
        Threshold value
    threshold_type : str {'hard', 'soft'}
        Type of noise to be added (default is 'hard')

    Returns
    -------
    np.ndarray thresholded data

    Raises
    ------
    ValueError
        For invalid string entry for n_pc

    """

    # Convert data cube to matrix.
    data_matrix = cube2matrix(data)

    # Get SVD of data matrix.
    u, s, v = np.linalg.svd(data_matrix, full_matrices=False)

    # Compute coefficients.
    a = np.dot(np.diag(s), v)

    # Compute threshold matrix.
    u_cube = matrix2cube(u, data.shape[1:])
    ti = np.array([np.linalg.norm(x) for x in operator(u_cube)])
    ti = np.repeat(ti, a.shape[1]).reshape(a.shape)
    threshold *= ti

    # Remove noise from coefficients.
    a_new = thresh(a, threshold, thresh_type)

    # Return the thresholded image.
    return np.dot(u, a_new)
예제 #3
0
    def op(self, data, extra_factor=1.0):
        """Operator

        This method returns the input data thresholded by the weights

        Parameters
        ----------
        data : np.ndarray
            Input data array
        extra_factor : float
            Additional multiplication factor

        Returns
        -------
        np.ndarray thresholded data

        """

        threshold = self.weights * extra_factor

        return thresh(data, threshold, self.thresh_type)
def sp_runupdate_sparse(din,linear_filters, rho,sigma,tau,wv_thr_factor, noise_est):

    """
    Sparsity-based update of the primal-dual optimization variables (Condat optimization) over a bundled block of RDD data (cluster)
    
    Input arguments
    ----------------
    din: data bundle in the form (np.ndarray(3D), np.ndarray(3D), np.ndarray(3D), np.ndarray(3D))
                   input tuple of data (RDD blocks)
                   
    linear_filters: np.array
                    The wavelet filters
    
    pho, sigma, tau: float
                     optimization parameters
                     
    wv_thr_factor:  np.arrray (1x3)
                    the wavelet threshold factor
                    
                    
     noise_est: float
               The noise standard deviation in the observed galaxy images.
    
    
    Returns
    -----------------
    the updated bundled block of RDD data
    
    
    """
    
    
    
    
    #unbuddle the RDD block into (datain, psfin,xin, yin)
     
    tmp, yin = zip(*zip(*zip(*zip(*zip(din)))))
    
    tmp11, xin = zip(*zip(*zip(*tmp)))
    datain, psfin = zip(*zip(*zip(*tmp11)))
    
    tauin = tau
    sigmain = sigma
    rhoin = rho
    extrafactor = 1.0 / sigmain
    
    psfinrot = np.rot90(psfin,2)
    win = noise_est* mycalc_norm(filter_convolve_stack(psfinrot,linear_filters), wv_thr_factor)
    
    gtemp1 = psf_convolve(xin,psfin,psf_rot=False, psf_type='obj_var')
    
    gtemp = psf_convolve(gtemp1 - datain, psfin, psf_rot=True, 
                         psf_type='obj_var')
   
                         
    xtemp = xin - tauin * gtemp - tauin * filter_convolve_stack(yin, linear_filters, filter_rot=True)
    
   
    
    
    x_prox = xtemp * (xtemp > 0)
    
    # Step 2 from eq.9.
    threshold = np.squeeze(np.array(win)) * extrafactor
        
    
    ytemp = yin + sigmain*filter_convolve_stack(2*x_prox-xin, linear_filters)
    yytemp = thresh(ytemp / sigmain,threshold, threshold_type='soft')
    
    y_prox = (ytemp - sigmain * yytemp)
    
    
    # Step 3 from eq.9.
    xout = rhoin * x_prox + (1 - rhoin) * np.array(xin)
    yout = rhoin * y_prox + (1 - rhoin) * np.array(yin)
    
    
    #combine and return the buddled RDD block with the updated primal (xout) and dual (yout) parameters
    tt = zip(zip(zip(datain,psfin),xout),yout)
    
    tt = tuple(tt)
    return tt[0]