示例#1
0
def check_adjoint(K, Kadj, K_input_shape, Kadj_input_shape):
    '''
    Checks if the operators K and Kadj are actually adjoint of eachother, i.e if
        < K(x), y > = < x, Kadj(y) >
    '''

    x = np.random.rand(*K_input_shape)
    y = np.random.rand(*Kadj_input_shape)
    err = abs(dot(K(x), y) - dot(x, Kadj(y)))
    return err
示例#2
0
def conjugate_gradient_tv(K, Kadj, data, Lambda, n_it, mu=1e-4, return_energy=True):
    '''
    Conjugate Gradient algorithm to minimize the objective function
        0.5*||K*x - d||_2^2 + Lambda*TV_mu (x)
    where TV_mu is the Moreau-Yosida regularization of the Total Variation.

    K : forward operator
    Kadj : backward operator, adjoint of K
    data: acquired data
    Lambda : parameter weighting the TV regularization
    mu : parameter of Moreau-Yosida approximation of TV (small positive value)
    n_it : number of iterations
    '''

    x = 0*Kadj(data) # start from 0
    grad_f = -Kadj(data)
    grad_F = grad_f + Lambda*grad_tv_smoothed(x, mu)
    d = -np.copy(grad_F)

    if return_energy: en = np.zeros(n_it)

    for k in range(0, n_it):
        grad_f_old = grad_f
        grad_F_old = grad_F
        ATAd = Kadj(K(d))
        # Calculate step size
        alpha = dot(d, -grad_F_old)/dot(d, ATAd)
        # Update variables
        x = x + alpha*d
        grad_f = grad_f_old + alpha*ATAd # TODO: re-compute gradient every K iterations to avoid error accumulation
        grad_F = grad_f + Lambda*grad_tv_smoothed(x,mu)
        beta = dot(grad_F, grad_F - grad_F_old)/norm2sq(grad_F_old) # Polak-Ribiere
        if beta < 0:
            beta = 0
        d = -grad_F + beta*d
        # Energy
        if return_energy:
            fid = norm2sq(K(x)-data)
            tv = tv_smoothed(x,mu)
            eng = fid+Lambda*tv
            en[k] = eng
            if (k % 10 == 0): # TODO: more flexible
                print("%d : Energy = %e \t Fid = %e\t TV = %e" %(k, eng, fid, tv))

        # Stoping criterion
        if np.abs(alpha) < 1e-15: # TODO : try other bounds
            print("Warning : minimum step reached, interrupting at iteration %d" %k)
            break;
    if return_energy: return en, x
    else: return x
示例#3
0
def conjugate_gradient(K, Kadj, data, n_it, return_energy=True):
    '''
    Conjugate Gradient algorithm for least squares fitting :
        0.5*||K*x - d||_2^2

    K : forward operator
    Kadj : backward operator, adjoint of K
    data: acquired data
    n_it : number of iterations
    '''

    x = 0*Kadj(data) # start from 0
    grad_f = -Kadj(data)
    d = -np.copy(grad_f)

    if return_energy: en = np.zeros(n_it)

    for k in range(0, n_it):
        grad_f_old = grad_f
        ATAd = Kadj(K(d))
        # Calculate step size
        alpha = dot(d, -grad_f_old)/dot(d, ATAd)
        # Update variables
        x = x + alpha*d
        grad_f = grad_f_old + alpha*ATAd # TODO: re-compute gradient every K iterations to avoid error accumulation
        beta = dot(grad_f, grad_f - grad_f_old)/norm2sq(grad_f_old) # Polak-Ribiere
        if beta < 0:
            beta = 0
        d = -grad_f + beta*d
        # Energy
        if return_energy:
            eng = norm2sq(K(x)-data)
            en[k] = eng
            if (k % 10 == 0): # TODO: more flexible
                print("%d : Energy = %e" %(k, eng))

        # Stoping criterion
        if np.abs(alpha) < 1e-15: # TODO : try other bounds
            print("Warning : minimum step reached, interrupting at iteration %d" %k)
            break;
    if return_energy: return en, x
    else: return x