예제 #1
0
def conjugate_gradient(K, Kadj, data, n_it, return_all=True):
    '''
    Conjugate Gradient algorithm for least squares fitting :
    0.5 ||K x - d||_2^2

    Parameters
    -----------
    K : forward operator
    Kadj : backward operator, adjoint of K
    data: acquired data
    n_it : number of iterations
    '''

    x = 0 * Kadj(data)  # start from 0
    grad_f = -Kadj(data)
    d = -np.copy(grad_f)

    if return_all: en = np.zeros(n_it)

    for k in range(0, n_it):
        grad_f_old = grad_f
        ATAd = Kadj(K(d))
        # Calculate step size
        alpha = dot(d, -grad_f_old) / dot(d, ATAd)
        # Update variables
        x = x + alpha * d
        grad_f = grad_f_old + alpha * ATAd  # TODO: re-compute gradient every K iterations to avoid error accumulation
        beta = dot(grad_f, grad_f - grad_f_old) / norm2sq(
            grad_f_old)  # Polak-Ribiere
        if beta < 0:
            beta = 0
        d = -grad_f + beta * d
        # Energy
        if return_all:
            eng = norm2sq(K(x) - data)
            en[k] = eng
            if (k % 10 == 0):  # TODO: more flexible
                print("%d : Energy = %e" % (k, eng))

        # Stoping criterion
        if np.abs(alpha) < 1e-15:  # TODO : try other bounds
            print(
                "Warning : minimum step reached, interrupting at iteration %d"
                % k)
            break
    if return_all: return en, x
    else: return x
예제 #2
0
def chambolle_pock_tv_wavelets(data,
                               K,
                               Kadj,
                               W,
                               Lambda1,
                               Lambda2,
                               L=None,
                               n_it=100,
                               return_all=True,
                               x0=None):

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        L = sqrt(8. + L**2) * 1.2
        print("L = %e" % L)

    sigma = 1.0 / L
    tau = 1.0 / L

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = 0 * gradient(x)
    q = 0 * data
    x_tilde = 0 * x
    theta = 1.0

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        p = proj_linf(p + sigma * gradient(x_tilde), Lambda1)
        q = (q + sigma * K(x_tilde) - sigma * data) / (1.0 + sigma)
        # Update primal variables
        x_old = x
        x = x + tau * div(p) - tau * Kadj(q)
        #
        W.set_image(x)
        W.forward()
        W.soft_threshold(Lambda2, 0, 1)
        wnorm1 = W.norm1()
        W.inverse()
        x = W.image
        #
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda1 * tv + Lambda2 * wnorm1
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #3
0
def chambolle_pock_tv_relaxed2(data,
                               K,
                               Kadj,
                               Lambda,
                               L=None,
                               rho=1.9,
                               tau=None,
                               n_it=100,
                               return_all=True,
                               x0=None):

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        L = sqrt(8. + L**2) * 1.2
        print("L = %e" % L)

    if tau is None: tau = 1.0 / L
    sigma = 1.0 / (tau * L * L)

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = 0 * gradient(x)
    q = 0 * data
    x_tilde = 0 * x
    theta = 1.0

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update primal variables
        x_tilde = x + tau * div(p) - tau * Kadj(q)
        # Update dual variables
        p_tilde = proj_linf(p + sigma * gradient(2 * x_tilde - x), Lambda)
        q_tilde = (q + sigma * K(2 * x_tilde - x) - sigma * data) / (1.0 +
                                                                     sigma)
        # Relaxed version
        #~ x = rho*x_tilde + (1-rho)*x
        #~ p = rho*p_tilde + (1-rho)*p
        #~ q = rho*q_tilde + (1-rho)*q
        x = x_tilde + (rho - 1) * (x_tilde - x)
        p = p_tilde + (rho - 1) * (p_tilde - p)
        q = q_tilde + (rho - 1) * (q_tilde - q)

        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #4
0
def chambolle_pock_l1_wavelets(data,
                               W,
                               K,
                               Kadj,
                               Lambda,
                               L=None,
                               n_it=100,
                               return_all=True,
                               x0=None,
                               pos_constraint=False):

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        L = sqrt(8. + L**2) * 1.2
        print("L = %e" % L)

    sigma = 1.0 / L
    tau = 1.0 / L

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = gradient(x)
    q = 0 * data
    x_tilde = 1.0 * x
    theta = 1.0

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        # For isotropic TV, the prox is a projection onto the L2 unit ball.
        # For anisotropic TV, this is a projection onto the L-infinity unit ball.
        q = proj_linf(q + sigma * K(x_tilde) - sigma * data)
        # Update primal variables
        x_old = x
        W.set_image(x - tau * Kadj(q))
        W.forward()
        W.soft_threshold(Lambda * tau, do_threshold_appcoeffs=1)
        W.inverse()
        x = W.image
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #5
0
def chambolle_pock_tv_l2(data,
                         K,
                         Kadj,
                         U,
                         Uadj,
                         Lambda,
                         Lambda2,
                         L=None,
                         n_it=100,
                         return_all=True,
                         x0=None):

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        Lr = power_method(U, Uadj, data, 20)
        L = sqrt(8. + L**2 + Lr**2) * 1.2
        print("L = %e" % L)

    sigma = 1.0 / L
    tau = 1.0 / L

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = 0 * gradient(x)
    q = 0 * data
    r = 0 * x
    x_tilde = 0 * x
    theta = 1.0

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        p = proj_linf(p + sigma * gradient(x_tilde), Lambda)
        q = (q + sigma * K(x_tilde) - sigma * data) / (1.0 + sigma)
        r = (r + sigma * U(x_tilde)) / (1.0 + sigma / Lambda2)
        # Update primal variables
        x_old = x
        x = x + tau * div(p) - tau * Kadj(q) - tau * Uadj(r)
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #6
0
def chambolle_pock_tv2(data,
                       K,
                       Kadj,
                       Lambda,
                       L=None,
                       gamma=1.0,
                       theta=1.0,
                       n_it=100,
                       return_all=True,
                       x0=None):

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        L = sqrt(8. + L**2) * 1.2
        print("L = %e" % L)

    sigma = 1.0 / L
    tau = 1.0 / L
    #gamma = 0.5 # Should be the uniform convexity parameter of "F"

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = 0 * gradient(x)
    q = 0 * data
    x_tilde = 0 * x
    # theta = 1.0 # theta = 0 gives another fast algorithm

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        p = proj_linf(p + sigma * gradient(x_tilde), Lambda)
        q = (q + sigma * K(x_tilde) - sigma * data) / (1.0 + sigma)
        # Update primal variables
        x_old = x
        x = x + tau * div(p) - tau * Kadj(q)
        theta = 1. / sqrt(1. + 2 * gamma * tau)
        tau = theta * tau
        sigma = sigma / theta
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #7
0
def chambolle_pock_l1_wavelets_precond(data,
                                       W,
                                       K,
                                       Kadj,
                                       Lambda,
                                       n_it=100,
                                       return_all=True,
                                       x0=None,
                                       pos_constraint=False):

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = 0 * gradient(x)
    q = 0 * data
    x_tilde = x
    theta = 1.0

    # Compute the diagonal preconditioner "Sigma" for alpha=1
    # Assuming K is a positive integral operator
    Sigma_k = 1. / K(np.ones_like(x))
    Sigma_grad = 1 / 2.0
    Sigma = 1 / (1. / Sigma_k + 1. / Sigma_grad)
    # Compute the diagonal preconditioner "Tau" for alpha = 1
    # Assuming Kadj is a positive operator
    Tau = 1. / (Kadj(np.ones_like(data)) + 2.)

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update primal variables
        x_old = x
        W.set_image(x - Tau * Kadj(q))
        W.forward()
        W.soft_threshold(Lambda, do_threshold_appcoeffs=1)
        W.inverse()
        x = W.image
        # Update dual variables
        q = proj_linf(q + Sigma_k * K(x + theta * (x - x_old)) -
                      Sigma_k * data)  # <=
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #8
0
def chambolle_pock_tv_precond(data,
                              K,
                              Kadj,
                              Lambda,
                              n_it=100,
                              return_all=True,
                              x0=None,
                              pos_constraint=False):

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = 0 * gradient(x)
    q = 0 * data
    x_tilde = x
    theta = 1.0

    # Compute the diagonal preconditioner "Sigma" for alpha=1
    # Assuming K is a positive integral operator
    Sigma_k = 1. / K(np.ones_like(x))
    Sigma_grad = 1 / 2.0
    Sigma = 1 / (1. / Sigma_k + 1. / Sigma_grad)
    # Compute the diagonal preconditioner "Tau" for alpha = 1
    # Assuming Kadj is a positive operator
    Tau = 1. / (Kadj(np.ones_like(data)) + 2.)

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update primal variables
        x_old = x
        x = x + Tau * div(p) - Tau * Kadj(q)
        if pos_constraint:
            x[x < 0] = 0
        # Update dual variables
        p = proj_linf(
            p + Sigma_grad * gradient(x + theta * (x - x_old)), Lambda
        )  # For discrete gradient, sum|D_i,j| = 2 along lines or cols
        q = (q + Sigma_k * K(x + theta * (x - x_old)) - Sigma_k * data) / (
            1.0 + Sigma_k)  # <=
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #9
0
def chambolle_pock_laplace(data,
                           K,
                           Kadj,
                           Lambda,
                           L=None,
                           n_it=100,
                           return_all=True):

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        L = sqrt(
            8.**2 + L**2
        ) * 1.2  # Laplacian is self-adjoint, and have a norm 8 (squared of gradient)
        print("L = %e" % L)

    sigma = 1.0 / L
    tau = 1.0 / L

    x = 0 * Kadj(data)
    p = 0 * laplacian(x)
    q = 0 * data
    x_tilde = 0 * x
    theta = 1.0

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        # For isotropic TV, the prox is a projection onto the L2 unit ball.
        # For anisotropic TV, this is a projection onto the L-infinity unit ball.
        p = proj_linf(p + sigma * laplacian(x_tilde), Lambda)
        q = (q + sigma * K(x_tilde) - sigma * data) / (1.0 + sigma)
        # Update primal variables
        x_old = x
        x = x - tau * laplacian(p) - tau * Kadj(q)
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(laplacian(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #10
0
def power_method(K, Kadj, data, n_it=10):
    '''
    Calculates the norm of operator K
    i.e the sqrt of the largest eigenvalue of K^T K
    ||K|| = sqrt(lambda_max(K^T K))

    K : forward operator
    Kadj : backward operator (adjoint of K)
    data : initial data
    '''
    x = np.copy(Kadj(data)) # Copy in case of Kadj = Id
    for k in range(0, n_it):
        x = Kadj(K(x))
        s = sqrt(norm2sq(x*1.0))
        x /= s
    return sqrt(s)
예제 #11
0
def fista_l1(data, K, Kadj, Lambda, Lip=None, n_it=100, return_all=True):
    '''
    Beck-Teboulle's forward-backward algorithm to minimize the objective function
    ||K*x - d||_2^2 + Lambda*||x||_1
    When K is a linear operators.

    Parameters
    -----------
    K : forward operator
    Kadj : backward operator
    Lambda : weight of the regularization (the higher Lambda, the more sparse is the solution in the H domain)
    Lip : largest eigenvalue of Kadj*K
    n_it : number of iterations
    return_all: if True, an array containing the values of the objective function will be returned
    '''

    if Lip is None:
        print(
            "Warn: fista_l1(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        Lip = power_method(K, Kadj, data, 20)**2 * 1.2
        print("Lip = %e" % Lip)

    if return_all: en = np.zeros(n_it)
    x = np.zeros_like(Kadj(data))
    y = np.zeros_like(x)
    for k in range(0, n_it):
        grad_y = Kadj(K(y) - data)
        x_old = x
        w = y - (1.0 / Lip) * grad_y
        w = _soft_thresh(w, Lambda / Lip)
        x = w
        y = x + ((k - 1.0) / (k + 10.1)) * (
            x - x_old)  # TODO : see what would be the best parameter "a"
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            l1 = norm1(w)
            energy = fidelity + Lambda * l1
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t L1 %e" %
                      (k, energy, fidelity, l1))
        #~ elif (k%10 == 0): print("Iteration %d" % k)
    if return_all: return en, x
    else: return x
예제 #12
0
def chambolle_pock_deblur_tv(img,
                             G,
                             kern,
                             Lambda,
                             L=None,
                             n_it=100,
                             return_all=True,
                             bw=10):
    """
    prototype
    """

    if L is None: L = 2.83  # sqrt(8)
    sigma = 1.0 / L
    tau = 1.0 / L
    theta = 1.0

    x = 0 * img
    x_tilde = 0 * x
    y = 0 * x

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # y_{k+1} = prox_{sigma G^*} (y_k + sigma K xtilde_k)
        y = _soft_thresh(y + sigma * gradient(x_tilde), Lambda * sigma)
        # x_{k+1} = prox_{tau F} (x_n - tau K^* y_{k+1})
        x_old = np.copy(x)
        x = pinv_fourier(x + tau * div(y) + tau * G(img), kern, tau=tau, bw=bw)
        # xtilde{k+1} = x_{k+1} + theta (x_{k+1} - x_k)
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(G(x) - img)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #13
0
def fista_wavelets_synth(data,
                         W,
                         K,
                         Kadj,
                         Lambda,
                         Lip=None,
                         n_it=100,
                         normalize=False,
                         dta=False):
    """
    Solve
    ||K W^T w - data||_2^2  +  Lambda*||w||_1
    """
    dta = 1 if bool(dta) else 0
    normalize = 1 if bool(normalize) else 0
    if Lip is None:
        print(
            "Warn: Lipschitz constant not provided, computing it with 20 iterations"
        )
        Lip = power_method(K, Kadj, data, 20)**2 * 1.2 * (
            4**W.levels)  # composition with "W"
        print("Lip = %e" % Lip)

    def opW(x):
        W.set_image(x)
        W.forward()
        return deepcopy(W.coeffs)

    def opWadj(x):
        W.set_coeff(x[0], 0, check=True * 0)
        for i in range(1, len(x)):
            W.set_coeff(x[i][0], 3 * (i - 1) + 1, check=True * 0)
            W.set_coeff(x[i][1], 3 * (i - 1) + 2, check=True * 0)
            W.set_coeff(x[i][2], 3 * (i - 1) + 3, check=True * 0)
        W.inverse()
        return W.image * 4.**W.levels  # scaling to get the transpose

    def coeffs_add_scaled(y, x, alpha):
        """
        y = y + alpha*x
        """
        y[0] += alpha * x[0]
        for i in range(1, len(x)):
            y[i][0] += alpha * x[i][0]
            y[i][1] += alpha * x[i][1]
            y[i][2] += alpha * x[i][2]

    #

    def coeffs_soft_thresh(out,
                           x,
                           beta,
                           do_threshold_appcoeffs=False,
                           normalize=False):
        if do_threshold_appcoeffs: out[0] = _soft_thresh(x[0], beta)
        for i in range(1, len(x)):
            if normalize: beta /= 1.4142135623730951
            out[i][0] = _soft_thresh(x[i][0], beta)
            out[i][1] = _soft_thresh(x[i][1], beta)
            out[i][2] = _soft_thresh(x[i][2], beta)

    #

    def coeffs_memset(x):
        x[0] *= 0
        for i in range(1, len(x)):
            x[i][0] *= 0
            x[i][1] *= 0
            x[i][2] *= 0

    #

    def coeffs_norm2sq(x, order):
        res = 0
        res = x[0].ravel().dot(x[0].ravel())
        for i in range(1, len(x)):
            res += x[i][0].ravel().dot(x[i][0].ravel())
            res += x[i][1].ravel().dot(x[i][1].ravel())
            res += x[i][2].ravel().dot(x[i][2].ravel())
        return res

    def coeffs_norm1(x):
        res = 0
        res = np.sum(np.abs(x[0]))
        for i in range(1, len(x)):
            res += np.sum(np.abs(x[i][0]))
            res += np.sum(np.abs(x[i][1]))
            res += np.sum(np.abs(x[i][2]))
        return res

    x = opW(np.zeros_like(Kadj(data)))
    y = deepcopy(x)
    for k in range(0, n_it):

        grad_y = opW(Kadj(K(opWadj(y)) - data))
        x_old = deepcopy(x)
        # y - (1.0/Lip)*grad_y
        coeffs_add_scaled(y, grad_y, (-1.0 / Lip))
        # soft threshold this
        coeffs_soft_thresh(x,
                           y,
                           Lambda / Lip,
                           do_threshold_appcoeffs=dta,
                           normalize=normalize)

        # y = x + theta*(x - x_old)   = (1 + theta)*x - theta*x_old
        theta = (k - 1.0) / (
            k + 10.1)  # TODO: determine best parameter "a" (here 10.1)
        coeffs_memset(y)
        coeffs_add_scaled(y, x, 1 + theta)
        coeffs_add_scaled(y, x_old, -theta)

        # Calculate norms
        if (k % 10 == 0):
            fidelity = 0.5 * norm2sq(K(opWadj(x)) - data)
            l1 = coeffs_norm1(x)
            energy = fidelity + Lambda * l1
            print("[%d] : energy %e \t fidelity %e \t L1 %e" %
                  (k, energy, fidelity, l1))
    return opWadj(x)
예제 #14
0
def fista_l1_operator(data,
                      K,
                      Kadj,
                      Lambda,
                      H,
                      Hinv,
                      soft_thresh,
                      Lip=None,
                      n_it=100,
                      return_all=True):
    '''
    Beck-Teboulle's forward-backward algorithm to minimize the objective function
    ||K*x - d||_2^2 + Lambda*||H*x||_1
    When K and H are linear operators, and H is invertible.

    Parameters
    -----------
    K : forward operator
    Kadj : backward operator
    Lambda : weight of the regularization (the higher Lambda, the more sparse is the solution in the H domain)
    H : *invertible* linear operator (eg. sparsifying transform, like Wavelet transform).
    Hinv : inverse operator of H
    soft_thresh : *in-place* function doing the soft thresholding (proximal operator of L1 norm) of the coefficients H(image)
    Lip : largest eigenvalue of Kadj*K
    n_it : number of iterations
    return_all: if True, an array containing the values of the objective function will be returned
    '''

    # Check if H, Hinv and soft_thresh are callable
    if not callable(H) or not callable(Hinv) or not callable(soft_thresh):
        raise ValueError(
            'fista_l1() : the H, Hinv and soft_thresh parameters be callable')
    # Check if H and Hinv are inverse of eachother
    u = np.random.rand(512, 512)
    Hu = H(u)
    if np.max(np.abs(u - Hinv(Hu))
              ) > 1e-3:  # FIXME: not sure what tolerance I should take
        raise ValueError(
            'fista_l1() : the H operator inverse does not seem reliable')
    # Check that soft_thresh is an in-place operator
    thr = soft_thresh(Hu, 1.0)
    if thr is not None:
        raise ValueError(
            'fista_l1(): the soft_thresh parameter must be an in-place modification of the coefficients'
        )
    # Check if the coefficients H(u) have a method "norm1"
    can_compute_l1 = True if callable(getattr(Hu, "norm1", None)) else False

    if Lip is None:
        print(
            "Warn: fista_l1(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        Lip = power_method(K, Kadj, data, 20)**2 * 1.2
        print("Lip = %e" % Lip)

    if return_all: en = np.zeros(n_it)
    x = np.zeros_like(Kadj(data))
    y = np.zeros_like(x)
    for k in range(0, n_it):
        grad_y = Kadj(K(y) - data)
        x_old = x
        w = H(y - (1.0 / Lip) * grad_y)
        soft_thresh(w, Lambda / Lip)
        x = Hinv(w)
        y = x + ((k - 1.0) / (k + 10.1)) * (
            x - x_old)  # TODO : see what would be the best parameter "a"
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            l1 = w.norm1() if can_compute_l1 else 0.
            energy = fidelity + Lambda * l1
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t L1 %e" %
                      (k, energy, fidelity, l1))
        elif (k % 10 == 0):
            print("Iteration %d" % k)
    if return_all: return en, x
    else: return x
예제 #15
0
def chambolle_pock_tv(data,
                      K,
                      Kadj,
                      Lambda,
                      L=None,
                      n_it=100,
                      return_all=True,
                      x0=None,
                      pos_constraint=False):
    '''
    Chambolle-Pock algorithm for Total Variation regularization.
    The following objective function is minimized : ||K x - d||_2^2 + Lambda TV(x)

    Parameters
    -----------

    K : function
        forward operator
    Kadj : function
        backward operator
    Lambda : float
        weight of the TV penalization (the higher Lambda, the more sparse is the solution)
    L : float
        norm of the operator [P, Lambda*grad] (see power_method)
    n_it : int
        number of iterations
    return_all: bool
        if True, an array containing the values of the objective function will be returned
    x0: numpy.ndarray
        initial solution estimate
    '''

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        L = sqrt(8. + L**2) * 1.2
        print("L = %e" % L)

    sigma = 1.0 / L
    tau = 1.0 / L

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = gradient(x)
    q = 0 * data
    x_tilde = 1.0 * x
    theta = 1.0

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        # For isotropic TV, the prox is a projection onto the L2 unit ball.
        # For anisotropic TV, this is a projection onto the L-infinity unit ball.
        p = proj_linf(p + sigma * gradient(x_tilde), Lambda)
        q = (q + sigma * K(x_tilde) - sigma * data) / (1.0 + sigma)
        # Update primal variables
        x_old = x
        x = x + tau * div(p) - tau * Kadj(q)
        if pos_constraint:
            x[x < 0] = 0
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #16
0
def conjugate_gradient_tv(data,
                          K,
                          Kadj,
                          Lambda,
                          n_it,
                          mu=1e-4,
                          return_all=True,
                          x0=None,
                          recalculate_gradient=50):
    '''
    Conjugate Gradient algorithm to minimize the objective function
    1/2 ||K x - d||_2^2 + Lambda TV_mu (x)
    where TV_mu is the Moreau-Yosida regularization of the Total Variation.

    Parameters
    ------------
    K : forward operator
    Kadj : backward operator, adjoint of K
    data: acquired data
    Lambda : parameter weighting the TV regularization
    mu : parameter of Moreau-Yosida approximation of TV (small positive value)
    n_it : number of iterations
    x0: initial solution estimate
    recalculate_gradient: the gradient is periodically re-computed to avoid error accumulation
    '''

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)  # start from 0
    grad_f = -Kadj(data)
    grad_F = grad_f + Lambda * grad_tv_smoothed(x, mu)
    d = -np.copy(grad_F)

    if return_all: en = np.zeros(n_it)

    for k in range(0, n_it):
        grad_f_old = grad_f
        grad_F_old = grad_F
        ATAd = Kadj(K(d))
        # Calculate step size
        alpha = dot(d, -grad_F_old) / dot(d, ATAd)
        # Update variables
        x = x + alpha * d
        if (k % recalculate_gradient) == 0:
            grad_f = Kadj(K(x) - data)
        else:
            grad_f = grad_f_old + alpha * ATAd
        grad_F = grad_f + Lambda * grad_tv_smoothed(x, mu)
        beta = dot(grad_F, grad_F - grad_F_old) / norm2sq(
            grad_F_old)  # Polak-Ribiere
        if beta < 0:
            beta = 0
        d = -grad_F + beta * d
        # Energy
        if return_all:
            fid = norm2sq(K(x) - data)
            tv = tv_smoothed(x, mu)
            eng = fid + Lambda * tv
            en[k] = eng
            if (k % 10 == 0):  # TODO: more flexible
                print("%d : Energy = %e \t Fid = %e\t TV = %e" %
                      (k, eng, fid, tv))

        # Stoping criterion
        if np.abs(alpha) < 1e-15:  # TODO : try other bounds
            print(
                "Warning : minimum step reached, interrupting at iteration %d"
                % k)
            break
    if return_all: return en, x
    else: return x
예제 #17
0
def fista_wavelets(data,
                   W,
                   K,
                   Kadj,
                   Lambda,
                   Lip=None,
                   n_it=100,
                   return_all=True,
                   normalize=False,
                   dta=False,
                   x0=None):
    """
    Algorithm for solving the regularized inverse problem
    ||K x - data||_2^2  +  Lambda*||W x||_1
    Where K is some forward operator, and W is a Wavelet transform.
    FISTA is used to solve this algorithm provided that the Wavelet transform is semi-orthogonal:
    W^T W = alpha* Id
    which is the case for DWT/SWT with orthogonal filters.

    Parameters
    ----------
    data: numpy.ndarray
        data to reconstruct from
    W: Wavelets instance
        Wavelet instance (from pypwt import Wavelets; W = Wavelets(img, "wname", levels, ...)
    K: function
        Operator of the  forward model
    Kadj: function
        Adjoint operator of K. We should have ||Kadj K x||_2^2 = < x | Kadj K x >
    Lambda: float
        Regularization parameter.
    Lip: float (Optional, default is None)
        Largest eigenvalue of (Kadj K). If None, it is automatically computed.
    n_it: integer
        Number of iterations
    return_all: bool
        If True, two arrays are returned: the objective function and the result.
    normalize: bool (Optional, default is False)
        If True, the thresholding is normalized (the threshold is smaller for the coefficients in finer scales).
        Mind that the threshold should be adapted (should be ~ twice bigger than for normalize=False).
    dta: bool, optional, default is False.
        Do Threshold Appcoefficients. If set to True, the approximation coefficients are thresholded.
    x0: numpy.ndarray
        initial solution estimate
    """
    dta = 1 if bool(dta) else 0
    normalize = 1 if bool(normalize) else 0
    if Lip is None:
        print(
            "Warn: Lipschitz constant not provided, computing it with 20 iterations"
        )
        Lip = power_method(K, Kadj, data, 20)**2 * 1.2
        print("Lip = %e" % Lip)

    if return_all: en = np.zeros(n_it)
    if x0 is not None:
        x = x0
        y = x0
    else:
        x = np.zeros_like(Kadj(data))
        y = np.zeros_like(x)
    for k in range(0, n_it):
        grad_y = Kadj(K(y) - data)
        x_old = x
        W.set_image((y - (1.0 / Lip) * grad_y).astype(np.float32))
        W.forward()
        W.soft_threshold(Lambda / Lip,
                         do_threshold_appcoeffs=dta,
                         normalize=normalize)
        W.inverse()
        x = W.image
        y = x + ((k - 1.0) / (k + 10.1)) * (
            x - x_old)  # TODO : see what would be the best parameter "a"
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            l1 = W.norm1()
            energy = fidelity + Lambda * l1
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t L1 %e" %
                      (k, energy, fidelity, l1))
    if return_all: return en, x
    else: return x
예제 #18
0
def chambolle_pock_kl_tv(data,
                         K,
                         Kadj,
                         Lambda,
                         L=None,
                         n_it=100,
                         return_all=True,
                         x0=None):
    '''
    Chambolle-Pock algorithm for KL-TV.
    The following objective function is minimized : KL(K x , d) + Lambda TV(x)
    Where KL(x, y) is a modified Kullback-Leibler divergence.
    This method might be more effective than L2-TV for Poisson noise.

    K : forward operator
    Kadj : backward operator
    Lambda : weight of the TV penalization (the higher Lambda, the more sparse is the solution)
    L : norm of the operator [P, Lambda*grad] (see power_method)
    n_it : number of iterations
    return_all: if True, an array containing the values of the objective function will be returned
    x0: initial solution estimate
    '''

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        L = sqrt(8. + L**2) * 1.2
        print("L = %e" % L)
    sigma = 1.0 / L
    tau = 1.0 / L

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = 0 * gradient(x)
    q = 0 * data
    x_tilde = 0 * x
    theta = 1.0

    #
    O = np.ones_like(q)
    #

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        tmp = q + sigma * K(x_tilde)
        q = 0.5 * (O + tmp - np.sqrt((tmp - O)**2 + 4 * sigma * data))
        tmp = p + sigma * gradient(x_tilde)
        p = Lambda * (tmp) / np.maximum(Lambda, np.abs(tmp))

        # Update primal variables
        x_old = x
        x = x + tau * div(p) - tau * Kadj(q)
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x
예제 #19
0
def chambolle_pock_l1_tv(data,
                         K,
                         Kadj,
                         Lambda,
                         L=None,
                         n_it=100,
                         return_all=True,
                         x0=None):
    '''
    Chambolle-Pock algorithm for L1-TV.
    The following objective function is minimized : ||K x - d||_1 + Lambda TV(x).
    This method is recommended against L2-TV for noise with strong outliers (eg. salt & pepper).

    Parameters
    ------------
    K : forward operator
    Kadj : backward operator
    Lambda : weight of the TV penalization (the higher Lambda, the more sparse is the solution)
    L : norm of the operator [P, Lambda*grad] (see power_method)
    n_it : number of iterations
    return_all: if True, an array containing the values of the objective function will be returned
    x0: initial solution estimate
    '''

    if L is None:
        print(
            "Warn: chambolle_pock(): Lipschitz constant not provided, computing it with 20 iterations"
        )
        L = power_method(K, Kadj, data, 20)
        L = sqrt(8. + L**2) * 1.2
        print("L = %e" % L)
    sigma = 1.0 / L
    tau = 1.0 / L

    if x0 is not None:
        x = x0
    else:
        x = 0 * Kadj(data)
    p = 0 * gradient(x)
    q = 0 * data
    x_tilde = 0 * x
    theta = 1.0

    if return_all: en = np.zeros(n_it)
    for k in range(0, n_it):
        # Update dual variables
        p = proj_l2(p + sigma * gradient(x_tilde), Lambda)
        q = proj_linf(
            q + sigma * K(x_tilde) - sigma * data
        )  # Here the projection onto the l-infinity ball is absolutely required !

        # Update primal variables
        x_old = x
        x = x + tau * div(p) - tau * Kadj(q)
        x_tilde = x + theta * (x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5 * norm2sq(K(x) - data)
            tv = norm1(gradient(x))
            energy = 1.0 * fidelity + Lambda * tv
            en[k] = energy
            if (k % 10 == 0):  # TODO: more flexible
                print("[%d] : energy %e \t fidelity %e \t TV %e" %
                      (k, energy, fidelity, tv))
    if return_all: return en, x
    else: return x