Пример #1
0
    min_x  10 ||laplacian(x) - g||_2^2 + || |grad(x)| ||_1

Where ``laplacian`` is the spatial Laplacian, ``grad`` the spatial
gradient and ``g`` is given noisy data.
"""

import numpy as np
import odl
import proximal

# Create space defined on a square from [0, 0] to [100, 100] with (100 x 100)
# points
space = odl.uniform_discr([0, 0], [100, 100], [100, 100])

# Create ODL operator for the Laplacian
laplacian = odl.Laplacian(space)

# Create right hand side
phantom = odl.phantom.shepp_logan(space, modified=True)
phantom.show('original image')
rhs = laplacian(phantom)
rhs += odl.phantom.white_noise(space) * np.std(rhs) * 0.1
rhs.show('rhs')

# Convert laplacian to ProxImaL operator
proximal_lang_laplacian = odl.as_proximal_lang_operator(laplacian)

# Convert to array
rhs_arr = rhs.asarray()

# Set up optimization problem
    result += 0.50 * np.sin(np.pi * x) * (y == 1)
    return result


n_last = 1
for n in [5, 50, 500]:
    # Discrete reconstruction space
    domain = odl.uniform_discr_fromspace(space, [n, n],
                                         nodes_on_bdry=True,
                                         interp='linear')

    # Define right hand side
    rhs = domain.element(boundary_values)

    # Define operator
    laplacian = odl.Laplacian(domain) * (-1.0 / n**2)

    if n_last == 1:
        # Pick initial guess
        vec = domain.zero()
    else:
        # Extend last value if possible
        extension = vec.space.extension(vec.ntuple)
        vec = domain.element(extension)

    # Solve with conjugate gradient
    odl.solvers.conjugate_gradient(laplacian,
                                   vec,
                                   rhs,
                                   niter=n,
                                   partial=odl.solvers.ShowPartial())
Пример #3
0
# --- Segmentation starts here ---

# Create the "conv" operator that adds neighbor regularization

# Create convolution with gaussian operator
ft = odl.trafos.FourierTransform(domain)
const = filter_width**2 / 4.0**2
gaussian = ft.range.element(lambda x: np.exp(-(x[0]**2 + x[1]**2) * const))
convolution = ft.inverse * gaussian * ft

# Add extra weight on the diagonal
diag = odl.IdentityOperator(domain)
conv = diag + alpha * convolution

# Create gradient
lap = odl.Laplacian(domain, pad_mode='constant')

# Create initial guess
mu = [
    np.less(y, (c[0] + c[1]) / 2),
    np.logical_and(np.greater_equal(y, (c[0] + c[1]) / 2),
                   np.less(y, (c[1] + c[2]) / 2)),
    np.greater_equal(y, (c[1] + c[2]) / 2)
]
x = y.copy()

callback = (odl.solvers.CallbackShow('bias', display_step=1)
            & odl.solvers.CallbackPrintIteration())

# Store some values
I = len(c)
Пример #4
0
# - Take its absolute value and smooth it more aggressively
# - Multiply by 2 / max(L(g)), then clip at value 1.
#   This is to make the regions with high values broader.
# - Use 2 minus the result as exponent
def exp_kernel(x, **kwargs):
    s = kwargs.pop('s', 0.5)
    scaled = [xi / (np.sqrt(2) * s) for xi in x]
    return np.exp(-sum(xi**2 for xi in scaled))


# Pre-smoothing convolution
fourier = odl.trafos.FourierTransform(reco_space)
pre_kernel = reco_space.element(exp_kernel, s=0.05)
pre_kernel_ft = fourier(pre_kernel) * (2 * np.pi)
pre_conv = fourier.inverse * pre_kernel_ft * fourier
smoothed_lapl = odl.Laplacian(reco_space, pad_mode='symmetric') * pre_conv
# Smoothed Laplacian of the data
abs_lapl = np.abs(smoothed_lapl(data))
# Remove jumps at the boundary, they're artificial
avg = np.mean(abs_lapl)
abs_lapl[:5, :] = avg
abs_lapl[-5:, :] = avg
abs_lapl[:, :5] = avg
abs_lapl[:, -5:] = avg
# Post-smoothing
post_kernel = reco_space.element(exp_kernel, s=0.4)
post_kernel_ft = fourier(post_kernel) * (2 * np.pi)
post_conv = fourier.inverse * post_kernel_ft * fourier
conv_abs_lapl = np.maximum(post_conv(abs_lapl), 0)
conv_abs_lapl -= np.min(conv_abs_lapl)
conv_abs_lapl *= 2 / np.max(conv_abs_lapl)
Пример #5
0
import numpy as np

# Internal
import odl

n = 100

# Discrete reconstruction space
domain = odl.uniform_discr([0, 0], [1, 1], [n, n])

# Define right hand side
x, y = domain.grid.coord_vectors
rhs_arr = np.zeros([n, n])
rhs_arr[0, :] = 0.25 * np.sin(np.pi * y)
rhs_arr[-1, :] = 1.00 * np.sin(np.pi * y)
rhs_arr[:, 0] = 0.50 * np.sin(np.pi * x)
rhs_arr[:, -1] = 0.50 * np.sin(np.pi * x)
rhs_arr *= n**2
rhs = domain.element(rhs_arr)

# Define operator
laplacian = -odl.Laplacian(domain)

# Solve with conjugate gradient
x = domain.zero()
odl.solvers.conjugate_gradient(laplacian,
                               x,
                               rhs,
                               niter=300,
                               partial=odl.solvers.ShowPartial())