示例#1
0
def conjugate_gradient_TV(P, PT, sino, Lambda, mu=1e-4, n_it=300):
    '''
    Conjugate Gradient algorithm to minimize the objective function
        0.5*||P*x - d||_2^2 + Lambda*TV_mu (x)

    P : projection operator
    PT : backprojection operator
    sino: acquired data as a sinogram
    Lambda : parameter weighting the TV regularization
    mu : parameter of Moreau-Yosida approximation of TV (small positive value)
    n_it : number of iterations
    '''

    x = 0 * PT(sino)  # start from 0
    grad_f = -PT(sino)
    grad_F = grad_f + Lambda * grad_tv_smoothed(x, mu)
    d = -np.copy(grad_F)
    en = np.zeros(n_it)
    for k in range(0, n_it):
        grad_f_old = grad_f
        grad_F_old = grad_F
        ATAd = PT(P(d))
        # Calculate step size
        alpha = mydot(d, -grad_F_old) / mydot(d, ATAd)
        # Update variables
        x = x + alpha * d
        grad_f = grad_f_old + alpha * ATAd
        grad_F = grad_f + Lambda * grad_tv_smoothed(x, mu)
        beta = mydot(grad_F, grad_F - grad_F_old) / norm2sq(
            grad_F_old)  # Polak-Ribiere
        if beta < 0:
            beta = 0
        d = -grad_F + beta * d
        # Energy
        fid = norm2sq(P(x) - sino)
        tv = tv_smoothed(x, mu)
        eng = fid + Lambda * tv
        en[k] = eng
        if VERBOSE and (k % 10 == 0):
            print("%d : Energy = %e \t Fid = %e\t TV = %e" % (k, eng, fid, tv))
        # Stoping criterion
        if np.abs(alpha) < 1e-34:  # TODO : try other bounds
            print(
                "Warning : minimum step reached, interrupting at iteration %d"
                % k)
            break
    return en, x
示例#2
0
def power_method(P, PT, data, n_it=10):
    '''
    Calculates the norm of operator K = [grad, P],
    i.e the sqrt of the largest eigenvalue of K^T*K = -div(grad) + P^T*P :
        ||K|| = sqrt(lambda_max(K^T*K))

    P : forward projection
    PT : back projection
    data : acquired sinogram
    '''
    x = PT(data)
    for k in range(0, n_it):
        x = PT(P(x)) - div(gradient(x))
        s = sqrt(norm2sq(x))
        x /= s
    return sqrt(s)
示例#3
0
def chambolle_pock(P, PT, data, Lambda, L, n_it, return_energy=True):
    """
    Chambolle-Pock algorithm for the minimization of the objective function
        ||P*x - d||_2^2 + Lambda*TV(x)

    P : projection operator
    PT : backprojection operator
    Lambda : weight of the TV penalization (the higher Lambda, the more sparse is the solution)
    L : norm of the operator [P, Lambda*grad] (see power_method)
    n_it : number of iterations
    return_energy: if True, an array containing the values of the objective function will be returned
    """

    sigma = 1.0 / L
    tau = 1.0 / L

    x = 0 * PT(data)
    p = 0 * gradient(x)
    q = 0 * data
    x_tilde = 0 * x
    theta = 1.0

    if return_energy:
        en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        p = proj_l2(p + sigma * gradient(x_tilde), Lambda)
        q = (q + sigma * P(x_tilde) - sigma * data) / (1.0 + sigma)
        # Update primal variables
        x_old = x
        x = x + tau * div(p) - tau * PT(q)
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_energy:
            fidelity = 0.5 * norm2sq(P(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if VERBOSE and k % 10 == 0:
                print(
                    "[%d] : energy %e \t fidelity %e \t TV %e"
                    % (k, energy, fidelity, tv)
                )
    if return_energy:
        return en, x
    else:
        return x