def tgv(operator, data, alpha, beta, grad=None, nonneg=True, datafit=None):

    space = operator.domain

    if grad is None:
        grad = gradient(space)

    E = symm_derivative(space)
    I = odl.IdentityOperator(grad.range)

    A1 = odl.ReductionOperator(operator,
                               odl.ZeroOperator(grad.range, operator.range))
    A2 = odl.ReductionOperator(grad, -I)
    A3 = odl.ReductionOperator(odl.ZeroOperator(space, E.range), E)
    A = odl.BroadcastOperator(*[A1, A2, A3])

    F1 = get_data_fit(datafit, data)
    F2 = alpha * odl.solvers.GroupL1Norm(grad.range)
    F3 = alpha * beta * odl.solvers.GroupL1Norm(E.range)
    F = odl.solvers.SeparableSum(F1, F2, F3)

    if nonneg:
        G = odl.solvers.SeparableSum(odl.solvers.ZeroFunctional(space),
                                     odl.solvers.ZeroFunctional(E.domain))

    else:
        G = odl.solvers.SeparableSum(odl.solvers.IndicatorNonnegativity(space),
                                     odl.solvers.ZeroFunctional(E.domain))

    return G, F, A
Пример #2
0
def test_admm_lin_input_handling():
    """Test to see that input is handled correctly."""

    space = odl.uniform_discr(0, 1, 10)

    L = odl.ZeroOperator(space)
    f = g = odl.solvers.ZeroFunctional(space)

    # Check that the algorithm runs. With the above operators and functionals,
    # the algorithm should not modify the initial value.
    x0 = noise_element(space)
    x = x0.copy()
    niter = 3

    admm_linearized(x, f, g, L, tau=1.0, sigma=1.0, niter=niter)

    assert x == x0

    # Check that a provided callback is actually called
    class CallbackTest(Callback):
        was_called = False

        def __call__(self, *args, **kwargs):
            self.was_called = True

    callback = CallbackTest()
    assert not callback.was_called
    admm_linearized(x, f, g, L, tau=1.0, sigma=1.0, niter=niter,
                    callback=callback)
    assert callback.was_called
def get_operators(space):
    # Create the forward operator
    filter_width = 4  # standard deviation of the Gaussian filter
    ft = odl.trafos.FourierTransform(space)
    c = filter_width**2 / 4.0**2
    gaussian = ft.range.element(lambda x: np.exp(-(x[0]**2 + x[1]**2) * c))
    operator = ft.inverse * gaussian * ft

    # Normalize the operator and create pseudo-inverse
    opnorm = odl.power_method_opnorm(operator)
    operator = (1 / opnorm) * operator

    # Do not need good pseudo-inverse, but keep to have same interface.
    pseudoinverse = odl.ZeroOperator(space)

    # Create gradient operator and normalize it
    part_grad_0 = odl.PartialDerivative(space,
                                        0,
                                        method='forward',
                                        pad_mode='order0')
    part_grad_1 = odl.PartialDerivative(space,
                                        1,
                                        method='forward',
                                        pad_mode='order0')

    grad_norm = odl.power_method_opnorm(
        odl.BroadcastOperator(part_grad_0, part_grad_1),
        xstart=odl.util.testutils.noise_element(space))

    part_grad_0 = (1 / grad_norm) * part_grad_0
    part_grad_1 = (1 / grad_norm) * part_grad_1

    # Create tensorflow layer from odl operator
    with tf.name_scope('odl_layers'):
        odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            operator, 'RayTransform')
        odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            operator.adjoint, 'RayTransformAdjoint')
        odl_grad0_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_0, 'PartialGradientDim0')
        odl_grad0_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_0.adjoint, 'PartialGradientDim0Adjoint')
        odl_grad1_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_1, 'PartialGradientDim1')
        odl_grad1_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_1.adjoint, 'PartialGradientDim1Adjoint')

    return (odl_op_layer, odl_op_layer_adjoint, odl_grad0_layer,
            odl_grad0_layer_adjoint, odl_grad1_layer, odl_grad1_layer_adjoint,
            part_grad_0, part_grad_1, operator, pseudoinverse)
def jtv(operator, data, alpha, sinfo, eta, nonneg=True, datafit=None):

    space = operator.domain

    Dx = odl.PartialDerivative(space, 0)
    Dy = odl.PartialDerivative(space, 1)
    Z = odl.ZeroOperator(space)
    D = odl.BroadcastOperator(Dx, Dy, Z, Z)
    A = odl.BroadcastOperator(operator, D)

    F1 = get_data_fit(datafit, data)
    Q = odl.BroadcastOperator(Z, Z, Dx, Dy)
    N = odl.solvers.GroupL1Norm(D.range)
    F2 = alpha * N.translated(-eta * Q(sinfo))
    F = odl.solvers.SeparableSum(F1, F2)

    if nonneg:
        G = odl.solvers.IndicatorNonnegativity(space)
    else:
        G = odl.solvers.ZeroFunctional(space)

    return G, F, A
Пример #5
0
def test_forward_backward_basic():
    """Test for the forward-backward solver by minimizing ||x||_2^2.

    The general problem is of the form

        ``min_x f(x) + sum_i g_i(L_i x) + h(x)``

    and here we take f(x) = g(x) = 0, h(x) = ||x||_2^2 and L is the
    zero-operator.
    """

    space = odl.rn(10)

    lin_ops = [odl.ZeroOperator(space)]
    g = [odl.solvers.ZeroFunctional(space)]
    f = odl.solvers.ZeroFunctional(space)
    h = odl.solvers.L2NormSquared(space)

    x = noise_element(space)
    x_global_min = space.zero()

    forward_backward_pd(x, f, g, lin_ops, h, tau=0.5, sigma=[1.0], niter=10)

    assert all_almost_equal(x, x_global_min, places=HIGH_ACCURACY)
Пример #6
0
def proximal_power_method(data, J, H, folder=None, proj=None, save2disk=False,\
                          i_max=10, rule='Constant'):

    X = data.space
    g = J.left
    L = J.right

    if proj is None:
        proj = odl.ZeroOperator(X)

    # get primal dual step sizes
    (tau, sigma) = odl.solvers.pdhg_stepsize(L.norm)

    #%% intialize

    # initialize iterates and eigenvalues
    ev_m = np.inf
    data -= proj(data)
    data /= H(data)
    u = data.copy()
    um = u.copy()

    # choose step size constant 0 < c < 1
    c = 0.9

    # plot initialization
    u.show('Iteration ' + str(0))

    #
    rules = rule.split(' ')

    # define step size
    if rules[0] == 'Constant':
        alpha = c * H(um - proj(um)) / J(um)

    # initialize iteration counter and output vectors
    i = 0
    affs = []
    angles = []

    # output
    if save2disk and folder is not None:
        filename = '_it_' + str(i)
        plt.imsave(folder + '/' + filename + '.png', u, cmap='gray')

    #%% run power method
    it = 0  # couter for proximal backtracking
    it_max = 10  # maximum number of prox evaluations
    while i < i_max:

        # define variable step size
        if rules[0] == 'Variable':
            alpha = c / J(um)
        elif rules[0] == 'Feld':
            if len(rules) > 1:
                dt = float(rules[1])
            else:
                dt = 1.
            alpha = c * dt / (1 + dt * J(um))

        # define data fidelity
        f = (0.5 / alpha) * odl.solvers.L2NormSquared(X).translated(um)

        # evaluate the proximal operator
        odl.solvers.pdhg(u, f, g, L, 1000, tau, sigma)

        # compute subgradient
        sg = (um - u) / alpha

        # subtract mean to be on the safe side
        u -= proj(u)

        # evaluate proximal eigenvalue
        ev_prox = u.inner(um) / (H(um)**2)

        # normalize to create the iterate
        u /= H(u)

        # evaluate Rayleigh quotient
        ev = J(u) / H(u - proj(u))

        # check whether Rayleigh quotient decreased
        if ev < ev_m:
            it = 0

            # compute eigenvector affinity in <= 1
            aff = odl.solvers.L2NormSquared(X)(sg) / J(sg)
            affs.append(aff)

            # evaluate angle
            angle = 1 - um.inner(u) / (H(u) * H(um))
            angles.append(angle)

            # output
            print(20 * '<>')
            print('Affinity: {:.2f}'.format(aff))
            print('Angle: {:.2E} '.format(angle))
            print('Subdiff Eigenvalue: {:.2f} '.format(ev))
            print('Prox Eigenvalue: {:.2f} '.format(ev_prox))
            print('Theoretical Prox Eigenvalue: {:.2f} '.format(1 -
                                                                alpha * ev))
            print(20 * '<>')

            # update iteration counter and plot image
            i += 1
            u.show('Iteration ' + str(i))

            # save result to disk
            if save2disk and folder is not None:
                filename = 'it_' + str(i)
                plt.imsave(folder + '/' + filename + '.png', u, cmap='gray')

            # update old variables
            um = u.copy()
            ev_m = ev
        else:
            it += 1
            c *= 0.9

            if it >= it_max:

                #evaluate angle
                tmp = um.copy()
                f = (0.5 /
                     alpha) * odl.solvers.L2NormSquared(X).translated(tmp)
                odl.solvers.pdhg(um, f, g, L, 1000, tau, sigma)
                angle = 1 - um.inner(tmp) / (H(um) * H(tmp))
                angles.append(angle)

                #evaluate affinity
                sg = (um - tmp) / alpha
                aff = odl.solvers.L2NormSquared(X)(sg) / J(sg)
                affs.append(aff)

                print(
                    'Stopped iterations because Rayleigh quotient cannot be improved'
                )
                break
            else:
                continue

    return um, affs, angles
Пример #7
0
def nossek_flow(data, J, H, folder=None, proj=None, \
                save2disk=False, i_max=10):

    X = data.space
    g = J.left
    L = J.right

    if proj is None:
        proj = odl.ZeroOperator(X)

    # get primal dual step sizes
    (tau, sigma) = odl.solvers.pdhg_stepsize(L.norm)

    #%% intialize

    # initialize iterates and eigenvalues
    ev_m = np.inf
    data -= proj(data)
    data /= H(data)
    u = data.copy()
    um = u.copy()

    # choose step size constant 0 < c < 1
    c = 0.9

    # plot initialization
    u.show('Iteration ' + str(0))

    dt = c * H(um)

    # initialize iteration counter and output vectors
    i = 0
    affs = []
    angles = []

    # output
    if save2disk and folder is not None:
        filename = '_it_' + str(i)
        plt.imsave(folder + '/' + filename + '.png', u, cmap='gray')

    #%% run flow
    it = 0  # couter for proximal backtracking
    it_max = 10  # maximum number of prox evaluations
    while i < i_max:

        # define proximal parameter
        if i == 0:
            sg = L.adjoint(L(um))

        norm_sg = H(sg)
        alpha = dt / norm_sg

        # define data fidelity
        f = (0.5 / alpha) * odl.solvers.L2NormSquared(X).translated(um)

        # evaluate the proximal operator
        odl.solvers.pdhg(u, f, g, L, 1000, tau, sigma)

        # compute subgradient
        sg = (um - u) / alpha

        # subtract mean to be on the safe side
        u -= proj(u)

        # normalize to create the iterate
        u /= 1 - dt / H(um)

        # evaluate Rayleigh quotient
        ev = J(u) / H(u - proj(u))

        # check whether Rayleigh quotient decreased
        if ev < ev_m:
            it = 0

            # compute eigenvector affinity in <= 1
            aff = odl.solvers.L2NormSquared(X)(sg) / J(sg)
            affs.append(aff)

            # evaluate angle
            # angle = 1 - um.inner(u)/(H(u)*H(um))
            angle = 1 - sg.inner(u) / (H(sg) * H(u))
            angles.append(angle)

            # output
            print(20 * '<>')
            print('Affinity: {:.2f}'.format(aff))
            print('Angle: {:.2E} '.format(angle))
            print('Subdiff Eigenvalue: {:.2f} '.format(ev))
            print(20 * '<>')

            # update iteration counter and plot image
            i += 1
            u.show('Iteration ' + str(i))

            # save result to disk
            if save2disk and folder is not None:
                filename = 'it_' + str(i)
                plt.imsave(folder + '/' + filename + '.png', u, cmap='gray')

            # update old variables
            um = u.copy()
            ev_m = ev
        else:
            it += 1

            if it >= it_max:

                #evaluate angle
                tmp = um.copy()
                f = (0.5 /
                     alpha) * odl.solvers.L2NormSquared(X).translated(tmp)
                odl.solvers.pdhg(um, f, g, L, 1000, tau, sigma)
                angle = 1 - um.inner(tmp) / (H(um) * H(tmp))
                angles.append(angle)

                #evaluate affinity
                sg = (um - tmp) / alpha
                aff = odl.solvers.L2NormSquared(X)(sg) / J(sg)
                affs.append(aff)

                print(
                    'Stopped iterations because Rayleigh quotient cannot be improved'
                )
                break
            else:
                continue

    return um, affs, angles
Пример #8
0
def test_forward_backward_input_handling():
    """Test to see that input is handled correctly."""

    space1 = odl.uniform_discr(0, 1, 10)

    lin_ops = [odl.ZeroOperator(space1), odl.ZeroOperator(space1)]
    g = [
        odl.solvers.ZeroFunctional(space1),
        odl.solvers.ZeroFunctional(space1)
    ]
    f = odl.solvers.ZeroFunctional(space1)
    h = odl.solvers.ZeroFunctional(space1)

    # Check that the algorithm runs. With the above operators, the algorithm
    # returns the input.
    x0 = noise_element(space1)
    x = x0.copy()
    niter = 3

    forward_backward_pd(x,
                        f,
                        g,
                        lin_ops,
                        h,
                        tau=1.0,
                        sigma=[1.0, 1.0],
                        niter=niter)

    assert x == x0

    # Testing that sizes needs to agree:
    # Too few sigma_i:s
    with pytest.raises(ValueError):
        forward_backward_pd(x,
                            f,
                            g,
                            lin_ops,
                            h,
                            tau=1.0,
                            sigma=[1.0],
                            niter=niter)

    # Too many operators
    g_too_many = [
        odl.solvers.ZeroFunctional(space1),
        odl.solvers.ZeroFunctional(space1),
        odl.solvers.ZeroFunctional(space1)
    ]
    with pytest.raises(ValueError):
        forward_backward_pd(x,
                            f,
                            g_too_many,
                            lin_ops,
                            h,
                            tau=1.0,
                            sigma=[1.0, 1.0],
                            niter=niter)

    # Test for correct space
    space2 = odl.uniform_discr(1, 2, 10)
    x = noise_element(space2)
    with pytest.raises(ValueError):
        forward_backward_pd(x,
                            f,
                            g,
                            lin_ops,
                            h,
                            tau=1.0,
                            sigma=[1.0, 1.0],
                            niter=niter)
Пример #9
0
 def inf_action(self, lie_alg_element):
     assert lie_alg_element in self.lie_group.associated_algebra
     return odl.ZeroOperator(self.domain)