예제 #1
0
    def tv_reconstruction(self, y, param=def_lambda):
        """
        NOTE: I'll follow the example from the odl github: https://github.com/odlgroup/odl/blob/master/examples/solvers/pdhg_tomography.py
        NOTE: The only thing I changed was swap what g and functional were supposed to be. That's it.
        """
        # internal method to evaluate tv on a single element y with shape [width, height]

        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        broad_op = odl.BroadcastOperator(self.operator, gradients)
        # define empty functional to fit the chambolle_pock framework
        functional = odl.solvers.ZeroFunctional(broad_op.domain)

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(self.range).translated(y)
        g = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(broad_op)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 200

        # find starting point
        x = self.space.element(self.model.inverse(np.expand_dims(y, axis=-1))[...,0])

        # Run the optimization algoritm
        # odl.solvers.chambolle_pock_solver(x, functional, g, broad_op, tau = tau, sigma = sigma, niter=niter)
        odl.solvers.pdhg(x, functional, g, broad_op, tau=tau, sigma=sigma, niter=niter)
        return x
예제 #2
0
    def reconstruction(proj_data, lam):
        lam = float(lam)

        print('lam = {}'.format(lam))

        # We do not allow negative paramters, so return a bogus result
        if lam <= 0:
            return np.inf * space.one()

        # Construct operators and functionals
        gradient = odl.Gradient(space)
        op = odl.BroadcastOperator(ray_trafo, gradient)

        f = odl.solvers.ZeroFunctional(op.domain)

        l2_norm = odl.solvers.L2NormSquared(
            ray_trafo.range).translated(proj_data)
        l1_norm = lam * odl.solvers.GroupL1Norm(gradient.range)
        g = odl.solvers.SeparableSum(l2_norm, l1_norm)

        # Select solver parameters
        op_norm = 1.5 * odl.power_method_opnorm(op, maxiter=10)

        # Run the algorithm
        x = op.domain.zero()
        odl.solvers.pdhg(x,
                         f,
                         g,
                         op,
                         niter=200,
                         tau=1.0 / op_norm,
                         sigma=1.0 / op_norm)

        return x
예제 #3
0
    def tv_reconstruction(self, y, param=def_lambda):
        # internal method to evaluate tv on a single element y with shape [width, height]

        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        broad_op = odl.BroadcastOperator(self.operator, gradients)
        # define empty functional to fit the chambolle_pock framework
        g = odl.solvers.ZeroFunctional(broad_op.domain)

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(self.range).translated(y)
        functional = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(broad_op)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 200

        # find starting point
        x = self.space.element(
            self.model.inverse(np.expand_dims(y, axis=-1))[..., 0])

        # Run the optimization algoritm
        # odl.solvers.chambolle_pock_solver(x, functional, g, broad_op, tau = tau, sigma = sigma, niter=niter)
        odl.solvers.pdhg(x,
                         functional,
                         g,
                         broad_op,
                         tau=tau,
                         sigma=sigma,
                         niter=niter)
        return x
def tgv(operator, data, alpha, beta, grad=None, nonneg=True, datafit=None):

    space = operator.domain

    if grad is None:
        grad = gradient(space)

    E = symm_derivative(space)
    I = odl.IdentityOperator(grad.range)

    A1 = odl.ReductionOperator(operator,
                               odl.ZeroOperator(grad.range, operator.range))
    A2 = odl.ReductionOperator(grad, -I)
    A3 = odl.ReductionOperator(odl.ZeroOperator(space, E.range), E)
    A = odl.BroadcastOperator(*[A1, A2, A3])

    F1 = get_data_fit(datafit, data)
    F2 = alpha * odl.solvers.GroupL1Norm(grad.range)
    F3 = alpha * beta * odl.solvers.GroupL1Norm(E.range)
    F = odl.solvers.SeparableSum(F1, F2, F3)

    if nonneg:
        G = odl.solvers.SeparableSum(odl.solvers.ZeroFunctional(space),
                                     odl.solvers.ZeroFunctional(E.domain))

    else:
        G = odl.solvers.SeparableSum(odl.solvers.IndicatorNonnegativity(space),
                                     odl.solvers.ZeroFunctional(E.domain))

    return G, F, A
예제 #5
0
def test_chambolle_pock_solver_produce_space():
    """Test the Chambolle-Pock algorithm using a product space operator."""

    # Create a discretized image space
    space = odl.uniform_discr(0, 1, DATA.size)

    # Operator
    identity = odl.IdentityOperator(space)

    # Create broadcasting operator
    prod_op = odl.BroadcastOperator(identity, -2 * identity)

    # Starting point for explicit computation
    discr_vec_0 = prod_op.domain.element(DATA)

    # Copy to be overwritten by the algorithm
    discr_vec = discr_vec_0.copy()

    # Proximal operator using the same factory function for F^* and G
    g = odl.solvers.ZeroFunctional(prod_op.domain)
    f = odl.solvers.ZeroFunctional(prod_op.range).convex_conj

    # Run the algorithm
    chambolle_pock_solver(discr_vec, f, g, prod_op, tau=TAU, sigma=SIGMA,
                          theta=THETA, niter=1)

    vec_expl = discr_vec_0 - TAU * SIGMA * prod_op.adjoint(
        prod_op(discr_vec_0))
    assert all_almost_equal(discr_vec, vec_expl, PLACES)
 def _call(self, x, out):
     xi = vfield
     Id = IdentityOperator(domain)
     xiT = odl.PointwiseInner(domain, xi)
     xixiT = odl.BroadcastOperator(*[x * xiT for x in xi])
     gamma = 1
     P = (Id - gamma * xixiT)
     out.assign(P(x))
    def transl_op_fixed_im(self, im):

        if isinstance(self.image_space, odl.ProductSpace):
            deform_op = odl.BroadcastOperator(defm.LinDeformFixedTempl(im[0]),
                                              defm.LinDeformFixedTempl(im[1]))
        else:
            deform_op = defm.LinDeformFixedTempl(im)

        return deform_op * self.embedding
def gradient(space,
             sinfo=None,
             mode=None,
             gamma=1,
             eta=1e-2,
             show_sinfo=False,
             prefix=None):

    grad = odl.Gradient(space, method='forward', pad_mode='symmetric')

    if sinfo is not None:
        if mode == 'direction':
            norm = odl.PointwiseNorm(grad.range)
            grad_sinfo = grad(sinfo)
            ngrad_sinfo = norm(grad_sinfo)

            for i in range(len(grad_sinfo)):
                grad_sinfo[i] /= ngrad_sinfo.ufuncs.max()

            ngrad_sinfo = norm(grad_sinfo)
            ngrad_sinfo_eta = np.sqrt(ngrad_sinfo**2 + eta**2)

            xi = grad.range.element([g / ngrad_sinfo_eta
                                     for g in grad_sinfo])  # UGLY

            Id = odl.operator.IdentityOperator(grad.range)
            xiT = odl.PointwiseInner(grad.range, xi)
            xixiT = odl.BroadcastOperator(*[x * xiT for x in xi])

            grad = (Id - gamma * xixiT) * grad

            if show_sinfo:
                misc.save_image(ngrad_sinfo, prefix + '_sinfo_norm')
                misc.save_vfield(xi.asarray(), filename=prefix + '_sinfo_xi')
                misc.save_vfield_cmap(filename=prefix + '_sinfo_xi_cmap')

        elif mode == 'location':
            norm = odl.PointwiseNorm(grad.range)
            ngrad_sinfo = norm(grad(sinfo))
            ngrad_sinfo /= ngrad_sinfo.ufuncs.max()

            w = eta / np.sqrt(ngrad_sinfo**2 + eta**2)
            grad = odl.DiagonalOperator(odl.MultiplyOperator(w), 2) * grad

            if show_sinfo:
                misc.save_image(ngrad_sinfo, prefix + '_sinfo_norm')
                misc.save_image(w, prefix + '_w')

        else:
            grad = None

    return grad
def jtv(operator, data, alpha, sinfo, eta, nonneg=True, datafit=None):

    space = operator.domain

    Dx = odl.PartialDerivative(space, 0)
    Dy = odl.PartialDerivative(space, 1)
    Z = odl.ZeroOperator(space)
    D = odl.BroadcastOperator(Dx, Dy, Z, Z)
    A = odl.BroadcastOperator(operator, D)

    F1 = get_data_fit(datafit, data)
    Q = odl.BroadcastOperator(Z, Z, Dx, Dy)
    N = odl.solvers.GroupL1Norm(D.range)
    F2 = alpha * N.translated(-eta * Q(sinfo))
    F = odl.solvers.SeparableSum(F1, F2)

    if nonneg:
        G = odl.solvers.IndicatorNonnegativity(space)
    else:
        G = odl.solvers.ZeroFunctional(space)

    return G, F, A
def get_operators(space):
    # Create the forward operator
    filter_width = 4  # standard deviation of the Gaussian filter
    ft = odl.trafos.FourierTransform(space)
    c = filter_width**2 / 4.0**2
    gaussian = ft.range.element(lambda x: np.exp(-(x[0]**2 + x[1]**2) * c))
    operator = ft.inverse * gaussian * ft

    # Normalize the operator and create pseudo-inverse
    opnorm = odl.power_method_opnorm(operator)
    operator = (1 / opnorm) * operator

    # Do not need good pseudo-inverse, but keep to have same interface.
    pseudoinverse = odl.ZeroOperator(space)

    # Create gradient operator and normalize it
    part_grad_0 = odl.PartialDerivative(space,
                                        0,
                                        method='forward',
                                        pad_mode='order0')
    part_grad_1 = odl.PartialDerivative(space,
                                        1,
                                        method='forward',
                                        pad_mode='order0')

    grad_norm = odl.power_method_opnorm(
        odl.BroadcastOperator(part_grad_0, part_grad_1),
        xstart=odl.util.testutils.noise_element(space))

    part_grad_0 = (1 / grad_norm) * part_grad_0
    part_grad_1 = (1 / grad_norm) * part_grad_1

    # Create tensorflow layer from odl operator
    with tf.name_scope('odl_layers'):
        odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            operator, 'RayTransform')
        odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            operator.adjoint, 'RayTransformAdjoint')
        odl_grad0_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_0, 'PartialGradientDim0')
        odl_grad0_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_0.adjoint, 'PartialGradientDim0Adjoint')
        odl_grad1_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_1, 'PartialGradientDim1')
        odl_grad1_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_1.adjoint, 'PartialGradientDim1Adjoint')

    return (odl_op_layer, odl_op_layer_adjoint, odl_grad0_layer,
            odl_grad0_layer_adjoint, odl_grad1_layer, odl_grad1_layer_adjoint,
            part_grad_0, part_grad_1, operator, pseudoinverse)
def h1(operator, data, alpha, grad=None, nonneg=True, datafit=None):

    space = operator.domain

    if grad is None:
        grad = gradient(space)

    A = odl.BroadcastOperator(operator, grad)

    F1 = get_data_fit(datafit, data)
    F2 = alpha * odl.solvers.L2NormSquared(grad.range)
    F = odl.solvers.SeparableSum(F1, F2)

    if nonneg:
        G = odl.solvers.IndicatorNonnegativity(space)
    else:
        G = odl.solvers.ZeroFunctional(space)

    return G, F, A
def get_operators(space, geometry):
    # Create the forward operator
    operator = odl.tomo.RayTransform(space, geometry)
    pseudoinverse = odl.tomo.fbp_op(operator)

    # Normalize the operator and create pseudo-inverse
    opnorm = odl.power_method_opnorm(operator)
    operator = (1 / opnorm) * operator

    pseudoinverse = pseudoinverse * opnorm

    # Create gradient operator and normalize it
    part_grad_0 = odl.PartialDerivative(space, 0, method='forward',
                                        pad_mode='order0')
    part_grad_1 = odl.PartialDerivative(space, 1, method='forward',
                                        pad_mode='order0')

    grad_norm = odl.power_method_opnorm(
        odl.BroadcastOperator(part_grad_0, part_grad_1),
        xstart=odl.util.testutils.noise_element(space))

    part_grad_0 = (1 / grad_norm) * part_grad_0
    part_grad_1 = (1 / grad_norm) * part_grad_1

    # Create tensorflow layer from odl operator
    with tf.name_scope('odl_layers'):
        odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                operator, 'RayTransform')
        odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                operator.adjoint, 'RayTransformAdjoint')
        odl_grad0_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_0, 'PartialGradientDim0')
        odl_grad0_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_0.adjoint, 'PartialGradientDim0Adjoint')
        odl_grad1_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_1, 'PartialGradientDim1')
        odl_grad1_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_1.adjoint, 'PartialGradientDim1Adjoint')

    return (odl_op_layer, odl_op_layer_adjoint, odl_grad0_layer,
            odl_grad0_layer_adjoint, odl_grad1_layer, odl_grad1_layer_adjoint,
            operator, pseudoinverse)
def tnv(operator, data, alpha, sinfo, eta, nonneg=True, datafit=None):

    space = operator.domain
    grad = odl.Gradient(space)

    P = odl.ComponentProjection(grad.range**2, 0)
    D = P.adjoint * grad
    Q = odl.ComponentProjection(grad.range**2, 1)
    A = odl.BroadcastOperator(operator, D)

    F1 = get_data_fit(datafit, data)
    N = odl.solvers.NuclearNorm(D.range, outer_exp=1, singular_vector_exp=1)
    F2 = alpha * N.translated(-Q.adjoint(eta * grad(sinfo)))
    F = odl.solvers.SeparableSum(F1, F2)

    if nonneg:
        G = odl.solvers.IndicatorNonnegativity(space)
    else:
        G = odl.solvers.ZeroFunctional(space)

    return G, F, A
예제 #14
0
    def tv_reconsruction(self, y, param=1000000):
        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        operator = odl.BroadcastOperator(self.ray_transf, gradients)
        # define empty functional to fit the chambolle_pock framework
        g = odl.solvers.ZeroFunctional(operator.domain)

        # compute transformed data
        # ensure y stays away from 0
        y_cut = np.maximum(y, 0.03)
        data = -(np.log(y_cut)) / self.attenuation_coeff

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(
            self.ray_transf.range).translated(data)
        functional = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(operator)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 5000

        # find starting point
        x = self.fbp(data)

        # Run the optimization algoritm
        odl.solvers.chambolle_pock_solver(x,
                                          functional,
                                          g,
                                          operator,
                                          tau=tau,
                                          sigma=sigma,
                                          niter=niter)

        # plot results
        plt.figure(1)
        plt.imshow(x)
        plt.show()
예제 #15
0
# Initialize gradient operator
gradient = odl.Gradient(reco_space, method='forward')

gradient_back = odl.Gradient(reco_space, method='backward')
eps = odl.DiagonalOperator(gradient_back, reco_space.ndim)

# Create the domain of the problem, given by the reconstruction space and the
# range of the gradient on the reconstruction space.
domain = odl.ProductSpace(reco_space, gradient.range)

# Column vector of three operators defined as:
# 1. Computes ``A(x)``
# 2. Computes ``grad(x) - y``
# 3. Computes ``eps(y)``
op = odl.BroadcastOperator(
    ray_trafo * odl.ComponentProjection(domain, 0),
    odl.ReductionOperator(gradient, odl.ScalingOperator(gradient.range, -1)),
    eps * odl.ComponentProjection(domain, 1))

# Do not use the g functional, set it to zero.
g = odl.solvers.ZeroFunctional(op.domain)

# Create functionals for the dual variable

# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(ray_trafo.range).translated(data)

# The l1-norms scaled by regularization paramters
l1_norm_1 = 0.001 * odl.solvers.L1Norm(gradient.range)
l1_norm_2 = 1e-4 * odl.solvers.L1Norm(eps.range)

# Combine functionals, order must correspond to the operator K
예제 #16
0
# create data
data = odl.phantom.white_noise(X, mean=groundtruth, stddev=0.1, seed=1807)

# save images and data
if not os.path.exists('{}/groundtruth.png'.format(folder_main)):
    misc.save_image(groundtruth, 'groundtruth', folder_main, 1, clim=clim)
    misc.save_image(data, 'data', folder_main, 2, clim=clim)

alpha = .12  # set regularisation parameter
gamma = 0.99  # gamma^2 is upper bound of step size constraint

# create forward operators
Dx = odl.PartialDerivative(X, 0, pad_mode='symmetric')
Dy = odl.PartialDerivative(X, 1, pad_mode='symmetric')
A = odl.BroadcastOperator(Dx, Dy)
Y = A.range

# set up functional f
f = odl.solvers.SeparableSum(*[odl.solvers.L1Norm(Yi) for Yi in Y])
# set up functional g
g = 1 / (2 * alpha) * odl.solvers.L2NormSquared(X).translated(data)

obj_fun = f * A + g  # define objective function
mu_g = 1 / alpha  # define strong convexity constants

# create target / compute a saddle point
file_target = '{}/target.npy'.format(folder_main)
if not os.path.exists(file_target):

    # compute a saddle point with PDHG and time the reconstruction
# Adjoint currently bugged, needs to be fixed
proj._adjoint *= proj(phantom0).inner(proj(phantom0)) / phantom0.inner(proj.adjoint(proj(phantom0)))

# Not scaled correctly
proj = proj/5.0

# Create product space
proj_op = odl.diagonal_operator([proj, proj])

energies = np.linspace(0.5, 1.0, 5)
spectrum_low = np.exp(-((energies-0.5) * 2)**2)
spectrum_high = np.exp(-((energies-1.0) * 2)**2)
A_low = SpectralDetector(proj.range, energies, spectrum_low)
A_high = SpectralDetector(proj.range, energies, spectrum_high)

detector_op = odl.BroadcastOperator(A_low, A_high)

# Compose operators
spectral_proj = detector_op * proj_op

# Create data
phantom = spectral_proj.domain.element([phantom0, phantom1])
proj_op(phantom).show(title='materials', clim=[0, 5])
projections = spectral_proj(phantom)
projections.show(title='spectral', clim=[0, 5])

# Reconstruct with big op
if 0:
    partial = (odl.solvers.util.ShowPartial(indices=np.s_[0, :, :, :, n//2], clim=[0, 5]) &
               odl.solvers.util.ShowPartial(indices=np.s_[1, :, n//2, :, n//2]) &
               odl.solvers.util.ShowPartial(indices=np.s_[1, :, :, :, n//2], clim=[0, 1]) &
                            G, F, A = models.tgv(operator,
                                                 data,
                                                 regparam,
                                                 beta,
                                                 grad=grad,
                                                 datafit='l1')

                        else:
                            D = None

                        norm_As = []
                        for Ai in A:
                            xs = odl.phantom.white_noise(Ai.domain, seed=1807)
                            norm_As.append(Ai.norm(estimate=True, xstart=xs))

                        Atilde = odl.BroadcastOperator(
                            *[Ai / norm_Ai for Ai, norm_Ai in zip(A, norm_As)])
                        Ftilde = odl.solvers.SeparableSum(
                            *[Fi * norm_Ai for Fi, norm_Ai in zip(F, norm_As)])

                        obj_fun = Ftilde * Atilde + G

                        Atilde_norm = Atilde.norm(estimate=True)

                        x = Atilde.domain.zero()
                        scaling = 1
                        sigma = scaling / Atilde_norm
                        tau = 0.999 / (scaling * Atilde_norm)

                        cb = (odl.solvers.CallbackPrintIteration(step=step,
                                                                 end=', ')
                              & odl.solvers.CallbackPrintTiming(
예제 #19
0
# --- Generate artificial data --- #

# Create phantom
discr_phantom = odl.phantom.shepp_logan(reco_space, modified=True)

# Create sinogram of forward projected phantom with noise
data = ray_trafo(discr_phantom)
data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1

# --- Set up the inverse problem --- #

# Initialize gradient operator
gradient = odl.Gradient(reco_space)

# Column vector of two operators
op = odl.BroadcastOperator(ray_trafo, gradient)

# Do not use the f functional, set it to zero.
f = odl.solvers.ZeroFunctional(op.domain)

# Create functionals for the dual variable

# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(ray_trafo.range).translated(data)

# Isotropic TV-regularization i.e. the l1-norm
l1_norm = 0.015 * odl.solvers.L1Norm(gradient.range)

# Combine functionals, order must correspond to the operator K
g = odl.solvers.SeparableSum(l2_norm, l1_norm)
예제 #20
0
ray_trafo = odl.tomo.RayTransform(reco_space, geometry)

# --- Generate artificial data --- #

# Create phantom and noisy projection data
phantom = odl.phantom.shepp_logan(reco_space, modified=True)
data = ray_trafo(phantom)
data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1

# --- Set up the inverse problem --- #

# Gradient operator for the TV part
grad = odl.Gradient(reco_space)

# Stacking of the two operators
L = odl.BroadcastOperator(ray_trafo, grad)

# Data matching and regularization functionals
data_fit = odl.solvers.L2NormSquared(ray_trafo.range).translated(data)
reg_func = 0.015 * odl.solvers.L1Norm(grad.range)
g = odl.solvers.SeparableSum(data_fit, reg_func)

# We don't use the f functional, setting it to zero
f = odl.solvers.ZeroFunctional(L.domain)

# --- Select parameters and solve using ADMM --- #

# Estimated operator norm, add 10 percent for some safety margin
op_norm = 1.1 * odl.power_method_opnorm(L, maxiter=20)

niter = 200  # Number of iterations
예제 #21
0
# create ground truth
X = odl.uniform_discr([0, 0], simage, simage)
groundtruth = 100 * X.element(image_raw)
clim = [0, 100]
tol_norm = 1.05

# create forward operators
Dx = odl.PartialDerivative(X, 0, pad_mode='symmetric')
Dy = odl.PartialDerivative(X, 1, pad_mode='symmetric')
kernel = images.blurring_kernel(shape=[15, 15])
convolution = misc.Blur2D(X, kernel)
K = odl.uniform_discr([0, 0], kernel.shape, kernel.shape)
kernel = K.element(kernel)

scale = 1e+3
A = odl.BroadcastOperator(Dx, Dy, scale / clim[1] * convolution)
Y = A.range

# create data
background = 200 * Y[2].one()
data = odl.phantom.poisson_noise(A[2](groundtruth) + background, seed=1807)

# save images and data
if not os.path.exists('{}/groundtruth.png'.format(folder_main)):
    misc.save_image(groundtruth, 'groundtruth', folder_main, 1, clim=clim)
    misc.save_image(data - background, 'data', folder_main, 2, clim=[0, scale])
    misc.save_image(kernel, 'kernel', folder_main, 3)

alpha = 0.1  # set regularisation parameter
gamma = 0.99  # auxiliary step size parameter < 1
예제 #22
0
#sym_gradient = odl.operator.ProductSpaceOperator(
#    [[Dx, 0], [0, Dy], [0.5*Dy, 0.5*Dx]], range=W)
E = odl.operator.ProductSpaceOperator(
    [[Dx, 0], [0, Dy], [0.5*Dy, 0.5*Dx], [0.5*Dy, 0.5*Dx]])
W = E.range

# Create the domain of the problem, given by the reconstruction space and the
# range of the gradient on the reconstruction space.
domain = odl.ProductSpace(U, V)

# Column vector of three operators defined as:
# 1. Computes ``Ax``
# 2. Computes ``Gx - y``
# 3. Computes ``Ey``
op = odl.BroadcastOperator(
    A * odl.ComponentProjection(domain, 0),
    odl.ReductionOperator(G, odl.ScalingOperator(V, -1)),
    E * odl.ComponentProjection(domain, 1))

# Do not use the g functional, set it to zero.
g = odl.solvers.ZeroFunctional(domain)

# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(A.range).translated(data)

# parameters
alpha = 1e-1
beta = 1

# The l1-norms scaled by regularization paramters
l1_norm_1 = alpha * odl.solvers.L1Norm(V)
l1_norm_2 = alpha * beta * odl.solvers.L1Norm(W)
예제 #23
0
def pdhg(x, f, g, A, tau, sigma, niter, **kwargs):
    """Computes a saddle point with PDHG.

    This algorithm is the same as "algorithm 1" in [CP2011a] but with
    extrapolation on the dual variable.


    Parameters
    ----------
    x : primal variable
        This variable is both input and output of the method.
    f : function
        Functional Y -> IR_infty that has a convex conjugate with a
        proximal operator, i.e. f.convex_conj.proximal(sigma) : Y -> Y.
    g : function
        Functional X -> IR_infty that has a proximal operator, i.e.
        g.proximal(tau) : X -> X.
    A : function
        Operator A : X -> Y that possesses an adjoint: A.adjoint
    tau : scalar / vector / matrix
        Step size for primal variable. Note that the proximal operator of g
        has to be well-defined for this input.
    sigma : scalar
        Scalar / vector / matrix used as step size for dual variable. Note that
        the proximal operator related to f (see above) has to be well-defined
        for this input.
    niter : int
        Number of iterations

    Other Parameters
    ----------------
    y: dual variable
        Dual variable is part of a product space
    z: variable
        Adjoint of dual variable, z = A^* y.
    theta : scalar
        Extrapolation factor.
    callback : callable
        Function called with the current iterate after each iteration.

    References
    ----------
    [CP2011a] Chambolle, A and Pock, T. *A First-Order
    Primal-Dual Algorithm for Convex Problems with Applications to
    Imaging*. Journal of Mathematical Imaging and Vision, 40 (2011),
    pp 120-145.
    """

    def fun_select(k):
        return [0]

    f = odl.solvers.SeparableSum(f)
    A = odl.BroadcastOperator(A, 1)

    # Dual variable
    y = kwargs.pop('y', None)
    if y is None:
        y_new = None
    else:
        y_new = A.range.element([y])

    spdhg_generic(x, f, g, A, tau, [sigma], niter, fun_select, y=y_new,
                  **kwargs)

    if y is not None:
        y.assign(y_new[0])
예제 #24
0
# Create phantom
phantom = odl.phantom.shepp_logan(space, modified=True)

# Create sinogram of forward projected phantom with noise
data = operator(phantom)
data += odl.phantom.white_noise(operator.range) * np.mean(np.abs(data)) * 0.05


# --- Set up the inverse problem --- #


# Initialize gradient operator
gradient = odl.Gradient(space)

# Column vector of two operators
op = odl.BroadcastOperator(operator, gradient)

# Do not use the g functional, set it to zero.
g = odl.solvers.ZeroFunctional(op.domain)

# Create functionals for the dual variable

# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(operator.range).translated(data)

# Isotropic TV-regularization i.e. the l1-norm
l1_norm = 0.3 * odl.solvers.L1Norm(gradient.range)

# Combine functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(l2_norm, l1_norm)
예제 #25
0
파일: map_atv.py 프로젝트: petpp/spdhg_pet
            tmp = X.element()
            tmp_op = mMR.operator_mmr()
            tmp_op.toodl(image, tmp)
            fldr = '{}/pics'.format(folder_param)
            misc.save_image(tmp.asarray(), 'image_pet', fldr, planes=planes)
            tmp_op.toodl(image_mr, tmp)
            misc.save_image(tmp.asarray(), 'image_mr', fldr, planes=planes)
            tmp_op.toodl(image_ct, tmp)
            misc.save_image(tmp.asarray(), 'image_ct', fldr, planes=planes)

        # --- get target --- BE CAREFUL, THIS TAKES TIME
        file_target = '{}/target.npy'.format(folder_param)
        if not os.path.exists(file_target):
            print('file {} does not exist. Compute it.'.format(file_target))

            A = odl.BroadcastOperator(K, D)
            f = odl.solvers.SeparableSum(KL, L1)

            norm_A = misc.norm(A, '{}/norm_tv.npy'.format(folder_main))
            sigma = rho / norm_A
            tau = rho / norm_A

            niter_target = nepoch_target

            step = 10
            cb = (CallbackPrintIteration(step=step, end=', ')
                  & CallbackPrintTiming(step=step, cumulative=False, end=', ')
                  & CallbackPrintTiming(
                      step=step, cumulative=True, fmt='total={:.3f} s'))

            x_opt = X.zero()
예제 #26
0
# Create phantom
phantom = odl.phantom.shepp_logan(space, modified=True)

# Create the convolved version of the phantom
data = convolution(phantom)
data += odl.phantom.white_noise(convolution.range) * np.mean(data) * 0.1
data.show('Convolved data')

# Set up PDHG:

# Initialize gradient operator
gradient = odl.Gradient(space, method='forward')

# Column vector of two operators
op = odl.BroadcastOperator(convolution, gradient)

# Create the functional for unconstrained primal variable
g = odl.solvers.ZeroFunctional(op.domain)

# l2-squared data matching
l2_norm_squared = odl.solvers.L2NormSquared(space).translated(data)

# Isotropic TV-regularization i.e. the l1-norm
l1_norm = 0.01 * odl.solvers.L1Norm(gradient.range)

# Make separable sum of functionals, order must be the same as in `op`
f = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

# --- Select solver parameters and solve using PDHG --- #
예제 #27
0
    os.makedirs(folder_main)

folder_today = '{}/{}'.format(folder_main, subfolder)
if not os.path.exists(folder_today):
    os.makedirs(folder_today)

folder_npy = '{}/npy'.format(folder_today)
if not os.path.exists(folder_npy):
    os.makedirs(folder_npy)

# create geometry of operator
X = odl.uniform_discr(min_pt=[-1, -1], max_pt=[1, 1], shape=[nvoxelx, nvoxelx])

geometry = odl.tomo.parallel_beam_geometry(X, num_angles=200, det_shape=250)

G = odl.BroadcastOperator(
    *[odl.tomo.RayTransform(X, g, impl='astra_cpu') for g in geometry])

# create ground truth
Y = G.range
groundtruth = X.element(images.resolution_phantom(shape=X.shape))
clim = [0, 1]
tol_norm = 1.05

# save images and data
file_data = '{}/data.npy'.format(folder_main)
if not os.path.exists(file_data):
    sino = G(groundtruth)

    support = X.element(groundtruth.ufuncs.greater(0))
    factors = -G(0.005 / X.cell_sides[0] * support)
    factors.ufuncs.exp(out=factors)
        ), -1))

phantom /= 1000.0  # convert go g/cm^3

data = nonlinear_operator(phantom)
noisy_data = odl.phantom.poisson_noise(
    data * photons_per_pixel) / photons_per_pixel

# --- Set up the inverse problem --- #

# Initialize gradient operator
gradient = odl.Gradient(space)

# Column vector of two operators
# scaling the operator acts as a pre-conditioner, improving convergence.
op = odl.BroadcastOperator(nonlinear_operator, gradient)

# Do not use the g functional, set it to zero.
g = odl.solvers.ZeroFunctional(op.domain)

# Create functionals for the dual variable

# l2-squared data matching
data_discr = odl.solvers.KullbackLeibler(operator.range, noisy_data)

# Isotropic TV-regularization i.e. the l1-norm
l1_norm = 0.00011 * odl.solvers.GroupL1Norm(gradient.range)

# Combine functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(data_discr, l1_norm)
예제 #29
0
space = odl.uniform_discr([0, 0], shape, shape)

# Original image
orig = space.element(image)

# Add noise
image += np.random.normal(0, 0.1, shape)

# Data of noisy image
noisy = space.element(image)

# Gradient operator
gradient = odl.Gradient(space, method='forward')

# Matrix of operators
op = odl.BroadcastOperator(odl.IdentityOperator(space), gradient)

# Set up the functionals

# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(space).translated(noisy)

# Isotropic TV-regularization: l1-norm of grad(x)
l1_norm = 0.15 * odl.solvers.L1Norm(gradient.range)

# Make separable sum of functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(l2_norm, l1_norm)

# Non-negativity constraint
g = odl.solvers.IndicatorNonnegativity(op.domain)
예제 #30
0
vol = odl.uniform_partition([-v / sqrt(128) for v in vol],
                            [v / sqrt(128) for v in vol], vol)

vol = odl.uniform_discr_frompartition(vol[:, :, 500:564], dtype='float32')
data = ascontiguousarray(data[:, :, 500:564], dtype='float32')

PSpace = (angles,
          odl.uniform_partition([-v / sqrt(128) for v in data.shape[1:]],
                                [v / sqrt(128) for v in data.shape[1:]],
                                data.shape[1:]))
PSpace = odl.tomo.Parallel3dAxisGeometry(*PSpace, axis=[0, 0, 1])

# Operators
Radon = odl.tomo.RayTransform(vol, PSpace)
grad = odl.Gradient(vol)
op = odl.BroadcastOperator(Radon, grad)
g = odl.solvers.functional.default_functionals.IndicatorNonnegativity(
    op.domain)
fidelity = odl.solvers.L2NormSquared(Radon.range).translated(data)
TV = .0003 * odl.solvers.L1Norm(grad.range)
# fidelity = odl.solvers.L1Norm(Radon.range).translated(data)
# TV = .03 * odl.solvers.L1Norm(grad.range)
f = odl.solvers.SeparableSum(fidelity, TV)
data /= abs(Radon.adjoint(data).__array__()).max()

# fbp = odl.tomo.fbp_op(Radon)
# fbp = fbp(data)
# fbp.show(title='FBP', force_show='True')
# exit()

# op_norm = [odl.power_method_opnorm(Radon), odl.power_method_opnorm(grad)]