Esempio n. 1
0
    def Myfunc(self):

        huber_norm = odl.solvers.Huber(
            odl.Gradient(self.space).range, self.gamma)
        func = odl.solvers.L2NormSquared(self.ray_trafo.range) * (
            self.ray_trafo -
            self.g) + self.lambda_ * huber_norm * odl.Gradient(self.space)
        return func
Esempio n. 2
0
    def _reconstruct(self, observation, *args, **kwargs):
        xspace = self.ray_trafo.domain
        yspace = self.ray_trafo.range

        grad = odl.Gradient(self.ray_trafo.domain)

        # Assemble all operators into a list.
        lin_ops = [self.ray_trafo, grad]

        # Create functionals for the l2 distance and l1 norm.
        g_funcs = [odl.solvers.L2NormSquared(yspace).translated(observation),
                   self.gamma * odl.solvers.L1Norm(grad.range)]

        # Functional of the bound constraint 0 <= x <= 1
        f = odl.solvers.IndicatorBox(xspace, 0, 1)

        # Find scaling constants so that the solver converges.
        # See the douglas_rachford_pd documentation for more information.
        xstart = self.fbp_op(observation)
        opnorm_A = odl.power_method_opnorm(self.ray_trafo, xstart=xstart)
        opnorm_grad = odl.power_method_opnorm(grad, xstart=xstart)
        sigma = [1 / opnorm_A ** 2, 1 / opnorm_grad ** 2]
        tau = 1.0

        # Solve using the Douglas-Rachford Primal-Dual method
        x = xspace.zero()
        odl.solvers.douglas_rachford_pd(x, f, g_funcs, lin_ops,
                                        tau=tau,
                                        sigma=sigma,
                                        niter=self.iterations)
        return x
Esempio n. 3
0
File: misc.py Progetto: zwq1230/odl
def total_variation(domain, grad=None):
    """Total variation functional.

    Parameters
    ----------
    domain : odlspace
        domain of TV functional
    grad : gradient operator, optional
        Gradient operator of the total variation functional. This may be any
        linear operator and thereby generalizing TV. default=forward
        differences with Neumann boundary conditions

    Examples
    --------
    Check that the total variation of a constant is zero

    >>> import odl.contrib.spdhg as spdhg, odl
    >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
    >>> tv = spdhg.total_variation(space)
    >>> x = space.one()
    >>> tv(x) < 1e-10
    """

    if grad is None:
        grad = odl.Gradient(domain, method='forward', pad_mode='symmetric')
        grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2))
    else:
        grad = grad

    f = odl.solvers.GroupL1Norm(grad.range, exponent=2)

    return f * grad
Esempio n. 4
0
def TVdenoise2D(x, la, mu, iterations=1):
    diff = odl.Gradient(x.space, method='forward')

    dimension = diff.range.size

    f = x.copy()
    b = diff.range.zero()
    d = diff.range.zero()

    fig = None

    scale = 1 / diff.domain.grid.cell_volume
    for i in odl.util.ProgressRange("denoising", iterations):
        # Iterate using gauss-seidel
        x = (f * mu + (diff.adjoint(diff(x)) + scale*x + diff.adjoint(d-b)) * la)/(mu+dimension*la)

        # d = sign(diff(x)+b) * max(|diff(x)+b|-la^-1,0)
        s = diff(x) + b
        d = s.ufunc.sign() * (s.ufunc.absolute().
                              ufunc.add(-1.0/la).
                              ufunc.maximum(0.0))

        b = b + diff(x) - d

        fig = x.show(fig=fig)
Esempio n. 5
0
    def tv_reconstruction(self, y, param=def_lambda):
        """
        NOTE: I'll follow the example from the odl github: https://github.com/odlgroup/odl/blob/master/examples/solvers/pdhg_tomography.py
        NOTE: The only thing I changed was swap what g and functional were supposed to be. That's it.
        """
        # internal method to evaluate tv on a single element y with shape [width, height]

        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        broad_op = odl.BroadcastOperator(self.operator, gradients)
        # define empty functional to fit the chambolle_pock framework
        functional = odl.solvers.ZeroFunctional(broad_op.domain)

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(self.range).translated(y)
        g = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(broad_op)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 200

        # find starting point
        x = self.space.element(self.model.inverse(np.expand_dims(y, axis=-1))[...,0])

        # Run the optimization algoritm
        # odl.solvers.chambolle_pock_solver(x, functional, g, broad_op, tau = tau, sigma = sigma, niter=niter)
        odl.solvers.pdhg(x, functional, g, broad_op, tau=tau, sigma=sigma, niter=niter)
        return x
Esempio n. 6
0
    def tv_reconstruction(self, y, param=def_lambda):
        # internal method to evaluate tv on a single element y with shape [width, height]

        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        broad_op = odl.BroadcastOperator(self.operator, gradients)
        # define empty functional to fit the chambolle_pock framework
        g = odl.solvers.ZeroFunctional(broad_op.domain)

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(self.range).translated(y)
        functional = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(broad_op)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 200

        # find starting point
        x = self.space.element(
            self.model.inverse(np.expand_dims(y, axis=-1))[..., 0])

        # Run the optimization algoritm
        # odl.solvers.chambolle_pock_solver(x, functional, g, broad_op, tau = tau, sigma = sigma, niter=niter)
        odl.solvers.pdhg(x,
                         functional,
                         g,
                         broad_op,
                         tau=tau,
                         sigma=sigma,
                         niter=niter)
        return x
Esempio n. 7
0
    def reconstruction(proj_data, lam):
        lam = float(lam)

        print('lam = {}'.format(lam))

        # We do not allow negative paramters, so return a bogus result
        if lam <= 0:
            return np.inf * space.one()

        # Construct operators and functionals
        gradient = odl.Gradient(space)
        op = odl.BroadcastOperator(ray_trafo, gradient)

        f = odl.solvers.ZeroFunctional(op.domain)

        l2_norm = odl.solvers.L2NormSquared(
            ray_trafo.range).translated(proj_data)
        l1_norm = lam * odl.solvers.GroupL1Norm(gradient.range)
        g = odl.solvers.SeparableSum(l2_norm, l1_norm)

        # Select solver parameters
        op_norm = 1.5 * odl.power_method_opnorm(op, maxiter=10)

        # Run the algorithm
        x = op.domain.zero()
        odl.solvers.pdhg(x,
                         f,
                         g,
                         op,
                         niter=200,
                         tau=1.0 / op_norm,
                         sigma=1.0 / op_norm)

        return x
Esempio n. 8
0
 def __init__(self, lie_group, domain, gradient=None):
     LieAction.__init__(self, lie_group, domain)
     assert lie_group.size == domain.ndim + 1
     if gradient is None:
         # should use method=central, others introduce a bias.
         self.gradient = odl.Gradient(self.domain, method='central')
     else:
         self.gradient = gradient
Esempio n. 9
0
 def Matrix_Comp_C(self):
     if hasattr(self, 'CtC'):
         pass
     else:
         grad = odl.Gradient(self.reco_space)
         self.grad_FDK = grad * self.FDK_bin
         self.CtC = odl.operator.oputils.matrix_representation(
             self.grad_FDK.adjoint * self.grad_FDK)
         self.gradFDK_norm = np.linalg.norm(self.CtC)**(1 / 2)
def gradient(space,
             sinfo=None,
             mode=None,
             gamma=1,
             eta=1e-2,
             show_sinfo=False,
             prefix=None):

    grad = odl.Gradient(space, method='forward', pad_mode='symmetric')

    if sinfo is not None:
        if mode == 'direction':
            norm = odl.PointwiseNorm(grad.range)
            grad_sinfo = grad(sinfo)
            ngrad_sinfo = norm(grad_sinfo)

            for i in range(len(grad_sinfo)):
                grad_sinfo[i] /= ngrad_sinfo.ufuncs.max()

            ngrad_sinfo = norm(grad_sinfo)
            ngrad_sinfo_eta = np.sqrt(ngrad_sinfo**2 + eta**2)

            xi = grad.range.element([g / ngrad_sinfo_eta
                                     for g in grad_sinfo])  # UGLY

            Id = odl.operator.IdentityOperator(grad.range)
            xiT = odl.PointwiseInner(grad.range, xi)
            xixiT = odl.BroadcastOperator(*[x * xiT for x in xi])

            grad = (Id - gamma * xixiT) * grad

            if show_sinfo:
                misc.save_image(ngrad_sinfo, prefix + '_sinfo_norm')
                misc.save_vfield(xi.asarray(), filename=prefix + '_sinfo_xi')
                misc.save_vfield_cmap(filename=prefix + '_sinfo_xi_cmap')

        elif mode == 'location':
            norm = odl.PointwiseNorm(grad.range)
            ngrad_sinfo = norm(grad(sinfo))
            ngrad_sinfo /= ngrad_sinfo.ufuncs.max()

            w = eta / np.sqrt(ngrad_sinfo**2 + eta**2)
            grad = odl.DiagonalOperator(odl.MultiplyOperator(w), 2) * grad

            if show_sinfo:
                misc.save_image(ngrad_sinfo, prefix + '_sinfo_norm')
                misc.save_image(w, prefix + '_w')

        else:
            grad = None

    return grad
def operators_smooth():
    size = 512
    space = odl.uniform_discr([-256, -256], [256, 256], [size, size],
                              dtype='float32',
                              weighting=1.0)
    angle_partition = odl.uniform_partition(0, 2 * np.pi, 1000)
    detector_partition = odl.uniform_partition(-360, 360, 1000)
    geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)
    T = odl.tomo.RayTransform(space, geometry)
    fbp = odl.tomo.fbp_op(T, frequency_scaling=0.45, filter_type='Hann')
    T_norm = T.norm(estimate=True)
    T = (1 / T_norm) * T
    W = odl.Gradient(space)
    return [T, W]
Esempio n. 12
0
def check_params(res_level):
    """Check the convergence criterion for the DR solver at ``res_level``."""
    ray_trafo = odl.tomo.RayTransform(res_level.space, geometry,
                                      impl='astra_cuda')
    ray_trafo_norm = 1.2 * odl.power_method_opnorm(ray_trafo, maxiter=4)
    print('norm of the ray transform: {}'.format(ray_trafo_norm))
    grad = odl.Gradient(res_level.space, pad_mode='order1')
    grad_xstart = odl.phantom.shepp_logan(grad.domain, modified=True)
    grad_norm = 1.5 * odl.power_method_opnorm(grad, xstart=grad_xstart,
                                              maxiter=10)
    print('norm of the gradient: {}'.format(grad_norm))

    # Here we check the convergence criterion for the Douglas-Rachford solver
    check_value = tau * (res_level.sigma_ray * ray_trafo_norm ** 2 +
                         res_level.sigma_grad * grad_norm ** 2)
    print('check_value = {}, must be < 4 for convergence'.format(check_value))
    convergence_criterion = check_value < 4
    assert convergence_criterion
 def __init__(self, space, lagr_mult, tau, grad=None):
     self.N = round(len(space) / 2)
     self.space = space
     self.image_space = self.space[0]
     self.vf_space = self.space[self.N]
     self.lagr_mult = lagr_mult
     self.tau = tau
     if grad is None:
         grad = odl.Gradient(space[0],
                             method='forward',
                             pad_mode='symmetric')
         grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2))
     else:
         grad = grad
     self.grad = grad
     super(AugmentedLagrangeTerm, self).__init__(space=space,
                                                 linear=False,
                                                 grad_lipschitz=np.nan)
    def __init__(self, space, tau=1., alpha=1., grad=None):
        self.N = round(len(space) / 2)  # number of time steps

        if grad is None:
            grad = odl.Gradient(space[0],
                                method='forward',
                                pad_mode='symmetric')
            grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2))
        else:
            grad = grad

        self.grad = grad
        self.space = space
        self.image_space = self.space[0]
        self.vf_space = self.space[self.N]
        self.alpha = alpha
        self.tau = tau
        super(L2OpticalFlowConstraint, self).__init__(space=space,
                                                      linear=False,
                                                      grad_lipschitz=np.nan)
Esempio n. 15
0
def check_params(res_level):
    """Check the convergence criterion for the DR solver at ``res_level``."""
    grad = odl.Gradient(res_level.space, pad_mode='order1')
    grad_xstart = odl.phantom.shepp_logan(grad.domain, modified=True)
    grad_norm = 1.5 * odl.power_method_opnorm(grad, xstart=grad_xstart,
                                              maxiter=10)
    print('norm of the gradient: {}'.format(grad_norm))

    res_level = ResLevel(res_level.space, res_level.num_iter,
                         res_level.regularizer, res_level.reg_param,
                         sigma_ray=1.5 / tau,
                         sigma_grad=1.5 / (tau * grad_norm ** 2))

    # Here we check the convergence criterion for the Douglas-Rachford solver
    check_value = tau * (res_level.sigma_ray +
                         res_level.sigma_grad * grad_norm ** 2)
    print('check_value = {}, must be < 4 for convergence'.format(check_value))
    convergence_criterion = check_value < 4
    assert convergence_criterion

    return res_level
def tnv(operator, data, alpha, sinfo, eta, nonneg=True, datafit=None):

    space = operator.domain
    grad = odl.Gradient(space)

    P = odl.ComponentProjection(grad.range**2, 0)
    D = P.adjoint * grad
    Q = odl.ComponentProjection(grad.range**2, 1)
    A = odl.BroadcastOperator(operator, D)

    F1 = get_data_fit(datafit, data)
    N = odl.solvers.NuclearNorm(D.range, outer_exp=1, singular_vector_exp=1)
    F2 = alpha * N.translated(-Q.adjoint(eta * grad(sinfo)))
    F = odl.solvers.SeparableSum(F1, F2)

    if nonneg:
        G = odl.solvers.IndicatorNonnegativity(space)
    else:
        G = odl.solvers.ZeroFunctional(space)

    return G, F, A
Esempio n. 17
0
    def tv_reconsruction(self, y, param=1000000):
        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        operator = odl.BroadcastOperator(self.ray_transf, gradients)
        # define empty functional to fit the chambolle_pock framework
        g = odl.solvers.ZeroFunctional(operator.domain)

        # compute transformed data
        # ensure y stays away from 0
        y_cut = np.maximum(y, 0.03)
        data = -(np.log(y_cut)) / self.attenuation_coeff

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(
            self.ray_transf.range).translated(data)
        functional = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(operator)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 5000

        # find starting point
        x = self.fbp(data)

        # Run the optimization algoritm
        odl.solvers.chambolle_pock_solver(x,
                                          functional,
                                          g,
                                          operator,
                                          tau=tau,
                                          sigma=sigma,
                                          niter=niter)

        # plot results
        plt.figure(1)
        plt.imshow(x)
        plt.show()
    def gradient(self):
        """Gradient operator of this functional."""

        func = self
        spatial_grad = odl.Gradient(func.f.space, pad_mode='order1')

        class TranslationCostFixedTemplGrad(odl.Operator):
            """Gradient operator of `TranslationCostFixedTempl`."""
            def __init__(self):
                """Initialize a new instance."""
                super(TranslationCostFixedTemplGrad,
                      self).__init__(domain=func.domain,
                                     range=func.domain,
                                     linear=False)

            def _call(self, t):
                """Evaluate the gradient in ``t``."""
                # Translated f
                f_transl = func.trans_op(t)

                # Compute the cost gradient
                cost_arg = func.op(f_transl)
                if func.g is not None:
                    cost_arg -= func.g
                grad_cost = func.cost.gradient(cost_arg)

                # Apply derivative adjoint of `op` at the translated f
                # to the cost gradient. This is the left factor in the
                # inner product.
                factor_l = func.op.derivative(f_transl).adjoint(grad_cost)

                # Compute the right factors, consisting in grad(f_t)
                factors_r = spatial_grad(f_transl)

                # Take the inner products in f.space of factor_l and
                # the components of factors_r. The negative of this vector
                # is the desired result.
                return [-factor_l.inner(fac_r) for fac_r in factors_r]

        return TranslationCostFixedTemplGrad()
Esempio n. 19
0
    def reconstruction(proj_data, parameters):
        # Extract the separate parameters
        lam, sigma = parameters

        print('lam = {}, sigma = {}'.format(lam, sigma))

        # We do not allow negative parameters, so return a bogus result
        if lam <= 0 or sigma <= 0:
            return np.inf * space.one()

        # Create data term ||Ax - b||_2^2
        l2_norm = odl.solvers.L2NormSquared(ray_trafo.range)
        data_discrepancy = l2_norm * (ray_trafo - proj_data)

        # Create regularizing functional huber(|grad(x)|)
        gradient = odl.Gradient(space)
        l1_norm = odl.solvers.GroupL1Norm(gradient.range)
        smoothed_l1 = odl.solvers.MoreauEnvelope(l1_norm, sigma=sigma)
        regularizer = smoothed_l1 * gradient

        # Create full objective functional
        obj_fun = data_discrepancy + lam * regularizer

        # Pick parameters
        maxiter = 30
        num_store = 5

        # Run the algorithm
        x = ray_trafo.domain.zero()
        odl.solvers.bfgs_method(obj_fun,
                                x,
                                maxiter=maxiter,
                                num_store=num_store,
                                hessinv_estimate=odl.ScalingOperator(
                                    space,
                                    1 / odl.power_method_opnorm(ray_trafo)**2))

        return x
    def __init__(self, space, alpha=1, grad=None):
        if not len(space) == 2:
            raise ValueError('Domain has not the right shape. Len=2 expected')

        if grad is None:
            grad = odl.Gradient(space[0],
                                method='forward',
                                pad_mode='symmetric')
            grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2))
        else:
            grad = grad

        self.alpha = alpha
        self.grad = grad
        self.image_space_time = space[0]
        self.image_space = self.image_space_time[0]
        self.vf_space_time = space[1]
        self.vf_space = self.vf_space_time[0]
        self.im_vf_space = ProductSpace(self.image_space, self.vf_space)
        self.N = len(self.image_space_time)  # number of time steps
        super(L1OpticalFlowConstraint, self).__init__(space=space,
                                                      linear=False,
                                                      grad_lipschitz=np.nan)
image /= image.max()

# Discretized spaces
space = odl.uniform_discr([0, 0], shape, shape)

# Original image
orig = space.element(image)

# Add noise
image += np.random.normal(0, 0.1, shape)

# Data of noisy image
noisy = space.element(image)

# Gradient operator
gradient = odl.Gradient(space, method='forward')

# Matrix of operators
op = odl.BroadcastOperator(odl.IdentityOperator(space), gradient)

# Set up the functionals

# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(space).translated(noisy)

# Isotropic TV-regularization: l1-norm of grad(x)
l1_norm = 0.15 * odl.solvers.L1Norm(gradient.range)

# Make separable sum of functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(l2_norm, l1_norm)
Esempio n. 22
0
# Discretization
reco_space = adutils.get_discretization(use_2D=True)

# Forward operator (in the form of a broadcast operator)
ray_trafo = adutils.get_ray_trafo(reco_space, use_2D=True)

# Data
data = adutils.get_data(ray_trafo, use_2D=True)


# --- Set up the inverse problem --- #


# Initialize gradient operator
gradient = odl.Gradient(reco_space, method='forward')

gradient_back = odl.Gradient(reco_space, method='backward')
eps = odl.DiagonalOperator(gradient_back, reco_space.ndim)

# Create the domain of the problem, given by the reconstruction space and the
# range of the gradient on the reconstruction space.
domain = odl.ProductSpace(reco_space, gradient.range)

# Column vector of three operators defined as:
# 1. Computes ``A(x)``
# 2. Computes ``grad(x) - y``
# 3. Computes ``eps(y)``
op = odl.BroadcastOperator(
    ray_trafo * odl.ComponentProjection(domain, 0),
    odl.ReductionOperator(gradient, odl.ScalingOperator(gradient.range, -1)),
Esempio n. 23
0
For further details and a description of the solution method used, see
https://odlgroup.github.io/odl/guide/pdhg_guide.html in the ODL documentation.
"""

import numpy as np
import odl
import matplotlib.pyplot as plt

# Define ground truth, space and noisy data
shape = [100, 100]
space = odl.uniform_discr([0, 0], shape, shape)
orig = odl.phantom.smooth_cuboid(space)
d = odl.phantom.salt_pepper_noise(orig, fraction=0.2)

# Define objective functional
op = odl.Gradient(space)  # operator
norm_op = np.sqrt(8) + 1e-4  # norm with forward differences is well-known
lam = 2  # Regularization parameter
const = 0.5
g = const / lam * odl.solvers.L1Norm(space).translated(d)  # data fit
f = const * odl.solvers.Huber(op.range, gamma=.01)  # regularization
obj_fun = f * op + g  # combined functional
mu_f = 1 / f.grad_lipschitz  # Strong convexity of "f*"

# Define algorithm parameters


class CallbackStore(odl.solvers.Callback):  # Callback to store function values
    def __init__(self):
        self.iteration_count = 0
        self.iteration_counts = []
using a gradient descent method (ADAM).
"""

import tensorflow as tf
import numpy as np
import odl
import odl.contrib.tensorflow

sess = tf.InteractiveSession()

# Create ODL data structures
space = odl.uniform_discr([-64, -64], [64, 64], [128, 128], dtype='float32')
geometry = odl.tomo.parallel_beam_geometry(space)
ray_transform = odl.tomo.RayTransform(space, geometry)
grad = odl.Gradient(space)

# Create data
phantom = odl.phantom.shepp_logan(space, True)
data = ray_transform(phantom)
noisy_data = data + odl.phantom.white_noise(data.space)

# Create tensorflow layers from odl operators
ray_transform_layer = odl.contrib.tensorflow.as_tensorflow_layer(
    ray_transform, name='RayTransform')
grad_layer = odl.contrib.tensorflow.as_tensorflow_layer(grad, name='Gradient')
x = tf.Variable(tf.zeros(shape=space.shape), name="x")

# Create constant right hand side
y = tf.constant(np.asarray(noisy_data))
Esempio n. 25
0
    ellipses = [[1, 0.8, 0.8, 0, 0, 0],
                [1, 0.4, 0.4, 0.2, 0.2, 0]]

    domain = odl.ProductSpace(space, len(ellipses))
    phantom = domain.element()
    phantom[0] = odl.phantom.ellipse_phantom(space, [ellipses[0]])
    phantom[1] = odl.phantom.ellipse_phantom(space, [ellipses[1]])
    phantom[0] -= phantom[1]

phantom.show('phantom', indices=np.s_[:])

diagop = odl.DiagonalOperator(ray_trafo, domain.size)
redop = odl.ReductionOperator(ray_trafo, domain.size)

# gradient
grad = odl.Gradient(ray_trafo.domain)
grad_n = odl.DiagonalOperator(grad, domain.size)

# Create data
data = diagop(phantom)
data_sum = redop(phantom)

# Add noise to data
scale_poisson = 1 / np.mean(data)  # 1 quanta per pixel, on avg
data += odl.phantom.poisson_noise(data * scale_poisson) / scale_poisson

scale_white_noise = 0.1 * np.mean(data_sum)  # 10% white noise
data_sum += odl.phantom.white_noise(data_sum.space) * scale_white_noise

# Create box constraint functional
f = odl.solvers.IndicatorBox(domain, 0, 1)
Esempio n. 26
0
dpart = odl.uniform_partition(-3 * xlim, 3 * xlim, 100)
geometry = geometry.conebeam.FanFlatGeometry(apart,
                                             dpart,
                                             src_radius=2 * xlim,
                                             det_radius=2 * xlim)
#add noise:
ray_trafo = odl.tomo.RayTransform(space, geometry)
g = ray_trafo(f_true)
g_noisy = g + 0.2 * odl.phantom.white_noise(ray_trafo.range)

#%%
# implement the huber regularization and gc method.
lambda_ = 0.01  #regularization parameter
meanError = []
for gamma in gamma_array:
    huber_norm = odl.solvers.Huber(odl.Gradient(space).range, gamma)
    func = odl.solvers.L2NormSquared(ray_trafo.range) * (
        ray_trafo - g_noisy) + lambda_ * huber_norm * odl.Gradient(space)
    sig_ini = space.zero()
    if not os.path.exists("./pic/gamma_%s_lambda_0.01/" % gamma):
        os.makedirs("./pic/gamma_%s_lambda_0.01/" % gamma)
    error = []
    callback = (
        odl.solvers.CallbackPrintIteration(step=10)
        & save_error
        #& odl.solvers.CallbackShow(step=10,saveto='./pic/gamma_%s_lambda_0.01/real_transverse_iterate_{}.png'%gamma
    )
    # Now we use the steepest-decent solver and backtracking linesearch in order to
    # find the minimum of the functional.
    line_search = odl.solvers.BacktrackingLineSearch(func, max_num_iter=50)
    CGN(f=func,
Esempio n. 27
0
            with odl.util.Timer(reco_method):
                odl.solvers.conjugate_gradient_normal(sum_ray_trafo,
                                                      reco,
                                                      noisy_data,
                                                      niter=10,
                                                      callback=callback)
        else:
            odl.solvers.conjugate_gradient_normal(sum_ray_trafo,
                                                  reco,
                                                  noisy_data,
                                                  niter=10,
                                                  callback=callback)
        multigrid.graphics.show_both(*reco)

    elif reco_method == 'TV':
        insert_grad = odl.Gradient(insert_discr, pad_mode='order1')

        coarse_grad = odl.Gradient(coarse_discr, pad_mode='order1')

        # Differentiable part, build as ||. - g||^2 o P
        data_func = odl.solvers.L2NormSquared(
            sum_ray_trafo.range).translated(noisy_data) * sum_ray_trafo
        reg_param_1 = 8e-3
        reg_func_1 = reg_param_1 * (odl.solvers.L2NormSquared(coarse_discr) *
                                    odl.ComponentProjection(pspace, 0))
        smooth_func = data_func + reg_func_1

        # Non-differentiable part composed with linear operators
        reg_param = 8e-3
        nonsmooth_func = reg_param * odl.solvers.L1Norm(insert_grad.range)
Esempio n. 28
0
# Create the forward operator
ray_trafo = odl.tomo.RayTransform(reco_space, geometry)

# --- Generate artificial data --- #

# Create phantom
discr_phantom = odl.phantom.shepp_logan(reco_space, modified=True)

# Create sinogram of forward projected phantom with noise
data = ray_trafo(discr_phantom)
data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1

# --- Set up the inverse problem --- #

# Initialize gradient operator
gradient = odl.Gradient(reco_space)

# Column vector of two operators
op = odl.BroadcastOperator(ray_trafo, gradient)

# Do not use the f functional, set it to zero.
f = odl.solvers.ZeroFunctional(op.domain)

# Create functionals for the dual variable

# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(ray_trafo.range).translated(data)

# Isotropic TV-regularization i.e. the l1-norm
l1_norm = 0.015 * odl.solvers.L1Norm(gradient.range)
    rot_mat = np.array([[np.cos(phi), -np.sin(phi)],
                        [np.sin(phi), np.cos(phi)]])
    test_pts = rot_mat.T.dot(test_pts.T).T
    eps = 4 * np.max(space.cell_sides) / np.min([a, b])
    cond_pts = (test_pts[:, 0] / a)**2 + (test_pts[:, 1] / b)**2
    bdry = np.where((cond_pts >= 1 - eps) & (cond_pts <= 1 + eps))[0]

    normal = ((test_pts[:, 0] / a) / np.sqrt(cond_pts),
              (test_pts[:, 1] / b) / np.sqrt(cond_pts))

    angle = np.zeros(space.shape)
    angle.ravel()[bdry] = np.arctan2(normal[1][bdry], normal[0][bdry])
    return np.mod(angle, np.pi)


grad = odl.Gradient(space, pad_mode='symmetric')
pwnorm = odl.PointwiseNorm(grad.range)


def random_ellipses(n):
    assert n >= 1

    fval = np.random.uniform(0.1, 2)
    center = np.random.uniform(-1, 1, size=2)
    a, b = np.random.uniform(0.05, max_ab, size=2)
    phi = np.random.uniform(0, np.pi)

    ell = ellipse(fval, center, a, b, phi)

    grad_ell = [x.asarray() for x in grad(ell)]
    grad_ell_norm = pwnorm(grad_ell).asarray()
Esempio n. 30
0
X = odl.uniform_discr(min_pt=[0, 0], max_pt=shape, shape=shape)

# Wrap image as space element, generate noisy variant and display
image /= image.max()
x_true = X.element(np.rot90(image, -1))
# To get predictable randomness, we explicitly seed the random number generator
with odl.util.NumpyRandomSeed(123):
    y = x_true + 0.1 * odl.phantom.white_noise(X)

x_true.show(title='Original image (x_true)', force_show=True)
y.show(title='Noisy image (y)', force_show=True)

# %% Set up problem components

ident = odl.IdentityOperator(X)
grad = odl.Gradient(X)  # need this here for L1Norm below

# Function without linear operator
f = odl.solvers.IndicatorNonnegativity(X)

# Functions to be composed with linear operators. L[i] applies to g[i].
alpha = 0.15
g = [
    odl.solvers.L2NormSquared(X).translated(y),
    alpha * odl.solvers.L1Norm(grad.range)
]
L = [ident, grad]

# We check if everything makes sense by evaluating the total functional at 0
x = X.zero()
print(f(x) + sum(g[i](L[i](x)) for i in range(len(g))))