예제 #1
0
def conjugate_gradient_tv(K, Kadj, data, Lambda, n_it, mu=1e-4, return_energy=True):
    '''
    Conjugate Gradient algorithm to minimize the objective function
        0.5*||K*x - d||_2^2 + Lambda*TV_mu (x)
    where TV_mu is the Moreau-Yosida regularization of the Total Variation.

    K : forward operator
    Kadj : backward operator, adjoint of K
    data: acquired data
    Lambda : parameter weighting the TV regularization
    mu : parameter of Moreau-Yosida approximation of TV (small positive value)
    n_it : number of iterations
    '''

    x = 0*Kadj(data) # start from 0
    grad_f = -Kadj(data)
    grad_F = grad_f + Lambda*grad_tv_smoothed(x, mu)
    d = -np.copy(grad_F)

    if return_energy: en = np.zeros(n_it)

    for k in range(0, n_it):
        grad_f_old = grad_f
        grad_F_old = grad_F
        ATAd = Kadj(K(d))
        # Calculate step size
        alpha = dot(d, -grad_F_old)/dot(d, ATAd)
        # Update variables
        x = x + alpha*d
        grad_f = grad_f_old + alpha*ATAd # TODO: re-compute gradient every K iterations to avoid error accumulation
        grad_F = grad_f + Lambda*grad_tv_smoothed(x,mu)
        beta = dot(grad_F, grad_F - grad_F_old)/norm2sq(grad_F_old) # Polak-Ribiere
        if beta < 0:
            beta = 0
        d = -grad_F + beta*d
        # Energy
        if return_energy:
            fid = norm2sq(K(x)-data)
            tv = tv_smoothed(x,mu)
            eng = fid+Lambda*tv
            en[k] = eng
            if (k % 10 == 0): # TODO: more flexible
                print("%d : Energy = %e \t Fid = %e\t TV = %e" %(k, eng, fid, tv))

        # Stoping criterion
        if np.abs(alpha) < 1e-15: # TODO : try other bounds
            print("Warning : minimum step reached, interrupting at iteration %d" %k)
            break;
    if return_energy: return en, x
    else: return x
예제 #2
0
파일: fista.py 프로젝트: pierrepaleo/portal
def fista_wavelets(data, W, K, Kadj, Lambda, Lip=None, n_it=100, return_all=True, normalize=False):
    """
    Algorithm for solving the regularized inverse problem
        ||K x - data||_2^2  +  Lambda*||W x||_1
    Where K is some forward operator, and W is a Wavelet transform.
    FISTA is used to solve this algorithm provided that the Wavelet transform is semi-orthogonal:
        W^T W = alpha* Id
    which is the case for DWT/SWT with orthogonal filters.

    Parameters
    ----------
    data: numpy.ndarray
        data to reconstruct from
    W: Wavelets instance
        Wavelet instance (from pypwt import Wavelets; W = Wavelets(img, "wname", levels, ...)
    K: function
        Operator of the  forward model
    Kadj: function
        Adjoint operator of K. We should have ||Kadj K x||_2^2 = < x | Kadj K x >
    Lambda: float
        Regularization parameter.
    Lip: float (Optional, default is None)
        Largest eigenvalue of (Kadj K). If None, it is automatically computed.
    n_it: integer
        Number of iterations
    return_all: bool
        If True, two arrays are returned: the objective function and the result.
    normalize: bool (Optional, default is False)
        If True, the thresholding is normalized (the threshold is smaller for the coefficients in finer scales).
        Mind that the threshold should be adapted (should be ~ twice bigger than for normalize=False).
    """
    if Lip is None:
        print("Warn: Lipschitz constant not provided, computing it with 20 iterations")
        Lip = power_method(K, Kadj, data, 20)**2 * 1.2
        print("Lip = %e" % Lip)

    if return_all: en = np.zeros(n_it)
    x = np.zeros_like(Kadj(data))
    y = np.zeros_like(x)
    for k in range(0, n_it):
        grad_y = Kadj(K(y) - data)
        x_old = x
        W.set_image((y - (1.0/Lip)*grad_y).astype(np.float32))
        W.forward()
        W.soft_threshold(Lambda/Lip, normalize=normalize)
        W.inverse()
        x = W.image
        y = x + ((k-1.0)/(k+10.1))*(x - x_old) # TODO : see what would be the best parameter "a"
        # Calculate norms
        if return_all:
            fidelity = 0.5*norm2sq(K(x)-data)
            l1 = W.norm1()
            energy = fidelity + Lambda*l1
            en[k] = energy
            if (k%10 == 0): # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t L1 %e" % (k, energy, fidelity, l1))
    if return_all: return en, x
    else: return x
예제 #3
0
파일: fista.py 프로젝트: pierrepaleo/portal
def fista_l1_operator(data, K, Kadj, Lambda, H, Hinv, soft_thresh, Lip=None, n_it=100, return_all=True):
    '''
    Beck-Teboulle's forward-backward algorithm to minimize the objective function
        ||K*x - d||_2^2 + Lambda*||H*x||_1
    When K and H are linear operators, and H is invertible.

    K : forward operator
    Kadj : backward operator
    Lambda : weight of the regularization (the higher Lambda, the more sparse is the solution in the H domain)
    H : *invertible* linear operator (eg. sparsifying transform, like Wavelet transform).
    Hinv : inverse operator of H
    soft_thresh : *in-place* function doing the soft thresholding (proximal operator of L1 norm) of the coefficients H(image)
    Lip : largest eigenvalue of Kadj*K
    n_it : number of iterations
    return_all: if True, an array containing the values of the objective function will be returned
    '''

    # Check if H, Hinv and soft_thresh are callable
    if not callable(H) or not callable(Hinv) or not callable(soft_thresh): raise ValueError('fista_l1() : the H, Hinv and soft_thresh parameters be callable')
    # Check if H and Hinv are inverse of eachother
    u = np.random.rand(512, 512)
    Hu = H(u)
    if np.max(np.abs(u - Hinv(Hu))) > 1e-3: # FIXME: not sure what tolerance I should take
        raise ValueError('fista_l1() : the H operator inverse does not seem reliable')
    # Check that soft_thresh is an in-place operator
    thr = soft_thresh(Hu, 1.0)
    if thr is not None: raise ValueError('fista_l1(): the soft_thresh parameter must be an in-place modification of the coefficients')
    # Check if the coefficients H(u) have a method "norm1"
    can_compute_l1 = True if callable(getattr(Hu, "norm1", None)) else False

    if Lip is None:
        print("Warn: fista_l1(): Lipschitz constant not provided, computing it with 20 iterations")
        Lip = power_method(K, Kadj, data, 20)**2 * 1.2
        print("Lip = %e" % Lip)

    if return_all: en = np.zeros(n_it)
    x = np.zeros_like(Kadj(data))
    y = np.zeros_like(x)
    for k in range(0, n_it):
        grad_y = Kadj(K(y) - data)
        x_old = x
        w = H(y - (1.0/Lip)*grad_y)
        soft_thresh(w, Lambda/Lip)
        x = Hinv(w)
        y = x + ((k-1.0)/(k+10.1))*(x - x_old) # TODO : see what would be the best parameter "a"
        # Calculate norms
        if return_all:
            fidelity = 0.5*norm2sq(K(x)-data)
            l1 = w.norm1() if can_compute_l1 else 0.
            energy = fidelity + Lambda*l1
            en[k] = energy
            if (k%10 == 0): # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t L1 %e" % (k, energy, fidelity, l1))
        elif (k%10 == 0): print("Iteration %d" % k)
    if return_all: return en, x
    else: return x
예제 #4
0
def conjugate_gradient(K, Kadj, data, n_it, return_energy=True):
    '''
    Conjugate Gradient algorithm for least squares fitting :
        0.5*||K*x - d||_2^2

    K : forward operator
    Kadj : backward operator, adjoint of K
    data: acquired data
    n_it : number of iterations
    '''

    x = 0*Kadj(data) # start from 0
    grad_f = -Kadj(data)
    d = -np.copy(grad_f)

    if return_energy: en = np.zeros(n_it)

    for k in range(0, n_it):
        grad_f_old = grad_f
        ATAd = Kadj(K(d))
        # Calculate step size
        alpha = dot(d, -grad_f_old)/dot(d, ATAd)
        # Update variables
        x = x + alpha*d
        grad_f = grad_f_old + alpha*ATAd # TODO: re-compute gradient every K iterations to avoid error accumulation
        beta = dot(grad_f, grad_f - grad_f_old)/norm2sq(grad_f_old) # Polak-Ribiere
        if beta < 0:
            beta = 0
        d = -grad_f + beta*d
        # Energy
        if return_energy:
            eng = norm2sq(K(x)-data)
            en[k] = eng
            if (k % 10 == 0): # TODO: more flexible
                print("%d : Energy = %e" %(k, eng))

        # Stoping criterion
        if np.abs(alpha) < 1e-15: # TODO : try other bounds
            print("Warning : minimum step reached, interrupting at iteration %d" %k)
            break;
    if return_energy: return en, x
    else: return x
예제 #5
0
파일: misc.py 프로젝트: pierrepaleo/portal
def power_method(K, Kadj, data, n_it=10):
    '''
    Calculates the norm of operator K
    i.e the sqrt of the largest eigenvalue of K^T*K
        ||K|| = sqrt(lambda_max(K^T*K))

    K : forward operator
    Kadj : backward operator (adjoint of K)
    data : initial data
    '''
    x = np.copy(Kadj(data)) # Copy in case of Kadj = Id
    for k in range(0, n_it):
        x = Kadj(K(x))
        s = sqrt(norm2sq(x))
        x /= s
    return sqrt(s)
예제 #6
0
파일: fista.py 프로젝트: pierrepaleo/portal
def fista_l1(data, K, Kadj, Lambda, Lip=None, n_it=100, return_all=True):
    '''
    Beck-Teboulle's forward-backward algorithm to minimize the objective function
        ||K*x - d||_2^2 + Lambda*||x||_1
    When K is a linear operators.

    K : forward operator
    Kadj : backward operator
    Lambda : weight of the regularization (the higher Lambda, the more sparse is the solution in the H domain)
    Lip : largest eigenvalue of Kadj*K
    n_it : number of iterations
    return_all: if True, an array containing the values of the objective function will be returned
    '''

    if Lip is None:
        print("Warn: fista_l1(): Lipschitz constant not provided, computing it with 20 iterations")
        Lip = power_method(K, Kadj, data, 20)**2 * 1.2
        print("Lip = %e" % Lip)

    if return_all: en = np.zeros(n_it)
    x = np.zeros_like(Kadj(data))
    y = np.zeros_like(x)
    for k in range(0, n_it):
        grad_y = Kadj(K(y) - data)
        x_old = x
        w = y - (1.0/Lip)*grad_y
        w = _soft_thresh(w, Lambda/Lip)
        x = w
        y = x + ((k-1.0)/(k+10.1))*(x - x_old) # TODO : see what would be the best parameter "a"
        # Calculate norms
        if return_all:
            fidelity = 0.5*norm2sq(K(x)-data)
            l1 = norm1(w)
            energy = fidelity + Lambda*l1
            en[k] = energy
            if (k%10 == 0): # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t L1 %e" % (k, energy, fidelity, l1))
        #~ elif (k%10 == 0): print("Iteration %d" % k)
    if return_all: return en, x
    else: return x