Beispiel #1
0
    def __init__(self, ray_trafo, niter=None, **kwargs):
        """
        Parameters
        ----------
        ray_trafo : :class:`odl.tomo.RayTransform`
            Ray transform from which the FBP operator is constructed.
        niter : int, optional
            Number of iteration blocks
        """
        super().__init__(ray_trafo, **kwargs)

        # NOTE: self.ray_trafo is possibly normalized, while ray_trafo is not
        self.non_normed_ray_trafo = ray_trafo

        if niter is not None:
            self.niter = niter
            if kwargs.get('hyper_params', {}).get('niter') is not None:
                warn("hyper parameter 'niter' overridden by constructor "
                     "argument")

        self.ray_trafo_mod = OperatorModule(self.ray_trafo)
        self.ray_trafo_adj_mod = OperatorModule(self.ray_trafo.adjoint)

        partial0 = odl.PartialDerivative(self.ray_trafo.domain, axis=0)
        partial1 = odl.PartialDerivative(self.ray_trafo.domain, axis=1)
        self.reg_mod = OperatorModule(partial0.adjoint * partial0 +
                                      partial1.adjoint * partial1)
def get_operators(space):
    # Create the forward operator
    filter_width = 4  # standard deviation of the Gaussian filter
    ft = odl.trafos.FourierTransform(space)
    c = filter_width**2 / 4.0**2
    gaussian = ft.range.element(lambda x: np.exp(-(x[0]**2 + x[1]**2) * c))
    operator = ft.inverse * gaussian * ft

    # Normalize the operator and create pseudo-inverse
    opnorm = odl.power_method_opnorm(operator)
    operator = (1 / opnorm) * operator

    # Do not need good pseudo-inverse, but keep to have same interface.
    pseudoinverse = odl.ZeroOperator(space)

    # Create gradient operator and normalize it
    part_grad_0 = odl.PartialDerivative(space,
                                        0,
                                        method='forward',
                                        pad_mode='order0')
    part_grad_1 = odl.PartialDerivative(space,
                                        1,
                                        method='forward',
                                        pad_mode='order0')

    grad_norm = odl.power_method_opnorm(
        odl.BroadcastOperator(part_grad_0, part_grad_1),
        xstart=odl.util.testutils.noise_element(space))

    part_grad_0 = (1 / grad_norm) * part_grad_0
    part_grad_1 = (1 / grad_norm) * part_grad_1

    # Create tensorflow layer from odl operator
    with tf.name_scope('odl_layers'):
        odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            operator, 'RayTransform')
        odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            operator.adjoint, 'RayTransformAdjoint')
        odl_grad0_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_0, 'PartialGradientDim0')
        odl_grad0_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_0.adjoint, 'PartialGradientDim0Adjoint')
        odl_grad1_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_1, 'PartialGradientDim1')
        odl_grad1_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_1.adjoint, 'PartialGradientDim1Adjoint')

    return (odl_op_layer, odl_op_layer_adjoint, odl_grad0_layer,
            odl_grad0_layer_adjoint, odl_grad1_layer, odl_grad1_layer_adjoint,
            part_grad_0, part_grad_1, operator, pseudoinverse)
Beispiel #3
0
    def init_model(self):
        self.op_mod = OperatorModule(self.op)
        self.op_adj_mod = OperatorModule(self.op.adjoint)
        partial0 = odl.PartialDerivative(self.op.domain, axis=0)
        partial1 = odl.PartialDerivative(self.op.domain, axis=1)
        self.reg_mod = OperatorModule(partial0.adjoint * partial0 +
                                      partial1.adjoint * partial1)
        if self.hyper_params['init_fbp']:
            fbp = fbp_op(
                self.non_normed_op,
                filter_type=self.hyper_params['init_filter_type'],
                frequency_scaling=self.hyper_params['init_frequency_scaling'])
            if self.normalize_by_opnorm:
                fbp = OperatorRightScalarMult(fbp, self.opnorm)
            self.init_mod = OperatorModule(fbp)
        else:
            self.init_mod = None
        self.model = IterativeNet(n_iter=self.niter,
                                  n_memory=5,
                                  op=self.op_mod,
                                  op_adj=self.op_adj_mod,
                                  op_init=self.init_mod,
                                  op_reg=self.reg_mod,
                                  use_sigmoid=self.hyper_params['use_sigmoid'],
                                  n_layer=self.hyper_params['nlayer'],
                                  internal_ch=self.hyper_params['internal_ch'],
                                  kernel_size=self.hyper_params['kernel_size'],
                                  batch_norm=self.hyper_params['batch_norm'],
                                  prelu=self.hyper_params['prelu'],
                                  lrelu_coeff=self.hyper_params['lrelu_coeff'])

        def weights_init(m):
            if isinstance(m, torch.nn.Conv2d):
                m.bias.data.fill_(0.0)
                if self.hyper_params['init_weight_xavier_normal']:
                    torch.nn.init.xavier_normal_(
                        m.weight, gain=self.hyper_params['init_weight_gain'])

        self.model.apply(weights_init)

        if self.use_cuda:
            # WARNING: using data-parallel here doesn't work, probably
            # astra_cuda is not thread-safe
            self.model = self.model.to(self.device)
def symm_derivative(space):
    Dx = odl.PartialDerivative(space,
                               0,
                               method='backward',
                               pad_mode='symmetric')
    Dy = odl.PartialDerivative(space,
                               1,
                               method='backward',
                               pad_mode='symmetric')

    # Create symmetrized operator and weighted space.
    # TODO: As the weighted space is currently not supported in ODL we find a
    # workaround.
    # W = odl.ProductSpace(U, 3, weighting=[1, 1, 2])
    # sym_gradient = odl.operator.ProductSpaceOperator(
    #    [[Dx, 0], [0, Dy], [0.5*Dy, 0.5*Dx]], range=W)
    return odl.operator.ProductSpaceOperator([[Dx, 0], [0, Dy],
                                              [0.5 * Dy, 0.5 * Dx],
                                              [0.5 * Dy, 0.5 * Dx]])
def get_operators(space, geometry):
    # Create the forward operator
    operator = odl.tomo.RayTransform(space, geometry)
    pseudoinverse = odl.tomo.fbp_op(operator)

    # Normalize the operator and create pseudo-inverse
    opnorm = odl.power_method_opnorm(operator)
    operator = (1 / opnorm) * operator

    pseudoinverse = pseudoinverse * opnorm

    # Create gradient operator and normalize it
    part_grad_0 = odl.PartialDerivative(space, 0, method='forward',
                                        pad_mode='order0')
    part_grad_1 = odl.PartialDerivative(space, 1, method='forward',
                                        pad_mode='order0')

    grad_norm = odl.power_method_opnorm(
        odl.BroadcastOperator(part_grad_0, part_grad_1),
        xstart=odl.util.testutils.noise_element(space))

    part_grad_0 = (1 / grad_norm) * part_grad_0
    part_grad_1 = (1 / grad_norm) * part_grad_1

    # Create tensorflow layer from odl operator
    with tf.name_scope('odl_layers'):
        odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                operator, 'RayTransform')
        odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                operator.adjoint, 'RayTransformAdjoint')
        odl_grad0_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_0, 'PartialGradientDim0')
        odl_grad0_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_0.adjoint, 'PartialGradientDim0Adjoint')
        odl_grad1_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_1, 'PartialGradientDim1')
        odl_grad1_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_1.adjoint, 'PartialGradientDim1Adjoint')

    return (odl_op_layer, odl_op_layer_adjoint, odl_grad0_layer,
            odl_grad0_layer_adjoint, odl_grad1_layer, odl_grad1_layer_adjoint,
            operator, pseudoinverse)
def jtv(operator, data, alpha, sinfo, eta, nonneg=True, datafit=None):

    space = operator.domain

    Dx = odl.PartialDerivative(space, 0)
    Dy = odl.PartialDerivative(space, 1)
    Z = odl.ZeroOperator(space)
    D = odl.BroadcastOperator(Dx, Dy, Z, Z)
    A = odl.BroadcastOperator(operator, D)

    F1 = get_data_fit(datafit, data)
    Q = odl.BroadcastOperator(Z, Z, Dx, Dy)
    N = odl.solvers.GroupL1Norm(D.range)
    F2 = alpha * N.translated(-eta * Q(sinfo))
    F = odl.solvers.SeparableSum(F1, F2)

    if nonneg:
        G = odl.solvers.IndicatorNonnegativity(space)
    else:
        G = odl.solvers.ZeroFunctional(space)

    return G, F, A
Beispiel #7
0
groundtruth = X.element(image_gray)
clim = [0, 1]

# create data
data = odl.phantom.white_noise(X, mean=groundtruth, stddev=0.1, seed=1807)

# save images and data
if not os.path.exists('{}/groundtruth.png'.format(folder_main)):
    misc.save_image(groundtruth, 'groundtruth', folder_main, 1, clim=clim)
    misc.save_image(data, 'data', folder_main, 2, clim=clim)

alpha = .12  # set regularisation parameter
gamma = 0.99  # gamma^2 is upper bound of step size constraint

# create forward operators
Dx = odl.PartialDerivative(X, 0, pad_mode='symmetric')
Dy = odl.PartialDerivative(X, 1, pad_mode='symmetric')
A = odl.BroadcastOperator(Dx, Dy)
Y = A.range

# set up functional f
f = odl.solvers.SeparableSum(*[odl.solvers.L1Norm(Yi) for Yi in Y])
# set up functional g
g = 1 / (2 * alpha) * odl.solvers.L2NormSquared(X).translated(data)

obj_fun = f * A + g  # define objective function
mu_g = 1 / alpha  # define strong convexity constants

# create target / compute a saddle point
file_target = '{}/target.npy'.format(folder_main)
if not os.path.exists(file_target):
detector_partition = odl.uniform_partition(-360, 360, 1000)
geometry = odl.tomo.FanFlatGeometry(angle_partition,
                                    detector_partition,
                                    src_radius=500,
                                    det_radius=500)

operator = odl.tomo.RayTransform(space, geometry)
pseudoinverse = odl.tomo.fbp_op(operator)

# Create tensorflow layer from odl operator
odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
    operator, 'RayTransform')
odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
    operator.adjoint, 'RayTransformAdjoint')

partial0 = odl.PartialDerivative(space, axis=0)
partial1 = odl.PartialDerivative(space, axis=1)
odl_op_regularizer = odl.contrib.tensorflow.as_tensorflow_layer(
    partial0.adjoint * partial0 + partial1.adjoint * partial1, 'Regularizer')

# User selected paramters
n_data = 1
n_memory = 5
n_iter = 10
mu_water = 0.02
photons_per_pixel = 10000
epsilon = 1.0 / photons_per_pixel

# Helper functions to load data from disk, please excuse the ugly code.
global f
f = []
Beispiel #9
0
phantom = odl.phantom.tgv_phantom(U)
phantom.show(title='Phantom')

# Create sinogram of forward projected phantom with noise
data = A(phantom)
data += odl.phantom.white_noise(A.range) * np.mean(data) * 0.1

data.show(title='Simulated data')

# --- Set up the inverse problem --- #

# Initialize gradient operator
G = odl.Gradient(U, method='forward', pad_mode='symmetric')
V = G.range

Dx = odl.PartialDerivative(U, 0, method='backward', pad_mode='symmetric')
Dy = odl.PartialDerivative(U, 1, method='backward', pad_mode='symmetric')

# Create symmetrized operator and weighted space.
# TODO: As the weighted space is currently not supported in ODL we find a
# workaround.
#W = odl.ProductSpace(U, 3, weighting=[1, 1, 2])
#sym_gradient = odl.operator.ProductSpaceOperator(
#    [[Dx, 0], [0, Dy], [0.5*Dy, 0.5*Dx]], range=W)
E = odl.operator.ProductSpaceOperator(
    [[Dx, 0], [0, Dy], [0.5*Dy, 0.5*Dx], [0.5*Dy, 0.5*Dx]])
W = E.range

# Create the domain of the problem, given by the reconstruction space and the
# range of the gradient on the reconstruction space.
domain = odl.ProductSpace(U, V)
Beispiel #10
0
import odl.contrib.datasets.images as images
import numpy as np

# set ground truth and data
image_gray = images.building(gray=True)
X = odl.uniform_discr([0, 0], image_gray.shape, image_gray.shape)
groundtruth = X.element(image_gray)
data = odl.phantom.white_noise(X, mean=groundtruth, stddev=0.1, seed=1807)

# set parameter
alpha = .12  # regularisation parameter
nepoch = 100

# set functionals and operator
A = odl.BroadcastOperator(
    *[odl.PartialDerivative(X, d, pad_mode='symmetric') for d in [0, 1]])
f = odl.solvers.SeparableSum(*[odl.solvers.L1Norm(Yi) for Yi in A.range])
g = 1 / (2 * alpha) * odl.solvers.L2NormSquared(X).translated(data)

# set sampling
n = 2  # number of subsets
prob = [1 / n] * n  # probablity that a subset gets selected
S = [[0], [1]]  # all possible subsets to select from


def fun_select(k):  # subset selection function
    return S[int(np.random.choice(n, 1, p=prob))]


# set parameters for algorithm
Ai_norm = [2, 2]
Beispiel #11
0
    def __init__(self, final=True):
        # Ensure that the needed folders are in place
        self.create_folders()

        # create a tensorflow session
        self.sess = tf.InteractiveSession()

        # load MNIST data
        mnist = learn.datasets.load_dataset("mnist")
        self.train_data = mnist.train.images  # Returns np.array
        self.train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
        self.number_training_data = self.train_data.shape[0]
        self.eval_data = mnist.test.images  # Returns np.array
        self.eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
        self.number_eval_data = self.eval_data.shape[0]

        # ODL operator setup
        grid_endpoints = math.floor(self.pic_size / 2) + 1
        self.space = odl.uniform_discr([-grid_endpoints, -grid_endpoints],
                                       [grid_endpoints, grid_endpoints],
                                       [self.pic_size, self.pic_size],
                                       dtype='float32')
        angle_partition = odl.uniform_partition(0, 2 * np.pi, 5)
        detector_partition = odl.uniform_partition(-36, 36, 25)

        # generate geometry with the uniform angle distributions defined above
        geometry = odl.tomo.FanFlatGeometry(angle_partition,
                                            detector_partition,
                                            src_radius=4 * self.pic_size,
                                            det_radius=4 * self.pic_size)

        # define radon transf and fbp
        self.ray_transf = odl.tomo.RayTransform(self.space, geometry)
        self.fbp = odl.tomo.fbp_op(self.ray_transf)

        # Gradient of Regulariser term
        partial0 = odl.PartialDerivative(self.space, axis=0)
        partial1 = odl.PartialDerivative(self.space, axis=1)
        self.grad_reg_op = partial0.adjoint * partial0 + partial1.adjoint * partial1

        # generate Tensorflow layers
        self.tf_ray = odl.contrib.tensorflow.as_tensorflow_layer(
            self.ray_transf, 'RayTransform')
        self.tf_ray_adj = odl.contrib.tensorflow.as_tensorflow_layer(
            self.ray_transf.adjoint, 'RayTransformAdj')
        self.tf_reg = odl.contrib.tensorflow.as_tensorflow_layer(
            self.grad_reg_op, 'Regulariser')

        # placeholders for forward model
        self.x_ini = tf.placeholder(
            shape=[None, self.pic_size, self.pic_size, 1],
            dtype=tf.float32,
            name='InitialGuess')
        self.x_true = tf.placeholder(
            shape=[None, self.pic_size, self.pic_size, 1],
            dtype=tf.float32,
            name='GroundTruth')
        self.y = tf.placeholder(shape=[
            None, self.ray_transf.range.shape[0],
            self.ray_transf.range.shape[1], 1
        ],
                                dtype=tf.float32,
                                name='Measurement_Data')
        self.labels = tf.placeholder(shape=[None],
                                     dtype=tf.float32,
                                     name='CorrectLabels')
        self.ohl = tf.one_hot(tf.cast(self.labels, tf.int32), depth=10)

        # set up the forward model
        x = self.x_ini
        self.weights_recon = self.get_weights()
        for i in range(self.iterations):
            # calculate the gradient of the data error
            with tf.name_scope('Data_gradient'):
                measurement = tf.exp(-self.attenuation_coeff * self.tf_ray(x))
                g_x = self.attenuation_coeff * self.tf_ray_adj(self.y -
                                                               measurement)
                tf.summary.scalar('Data_gradient_Norm', tf.norm(g_x))
                g_reg = self.tf_reg(x)
                tf.summary.scalar('Regulariser_gradient_Norm', tf.norm(g_reg))

                # use the network model defined in
                x_update = self.forward_model(x, g_x, g_reg,
                                              self.weights_recon)

                tf.summary.scalar('x_update', tf.norm(x_update))
                x = x + x_update
        self.result = x

        # define L2 loss function
        with tf.name_scope('L2-Loss'):
            self.lossL2 = tf.reduce_mean(
                tf.reduce_sum((self.result - self.x_true)**2, axis=(1, 2)))

        self.global_step = tf.Variable(0, name='global_step', trainable=False)

        # Optimizer for L2 loss
        with tf.name_scope('L2-optimizer'):
            self.optimizer_L2 = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.lossL2,
                                             global_step=self.global_step,
                                             var_list=self.weights_recon)

        # finish setup. Should always be executed unless this init is called in an init of a subclass
        if final:
            self.finish_setup()
Beispiel #12
0
# Functionals and operators for the total variation. This is the l1 norm of the
# (discretized) gradient of the reconstruction. For each of the dimensions
# we create two functionals and two operators.

# Start with empty lists ...
tv_functionals = []
tv_operators = []
tv_stepsizes = []

# ... and for each dimension of the reconstruction space ...
reco_shape = reco_space.shape
reco_dim = len(reco_shape)
for dim in range(reco_dim):
    # ... add two operators taking only the even and odd elements,
    # respectively, in that dimension.
    partial_der = odl.PartialDerivative(reco_space, dim, pad_mode='order0')
    all_points = list(np.ndindex(reco_shape))
    even_pts = [list(p) for p in all_points if p[dim] % 2 == 0]
    even_pts = np.array(even_pts).T.tolist()
    odd_pts = [list(p) for p in all_points if p[dim] % 2 == 1]
    odd_pts = np.array(odd_pts).T.tolist()
    op1 = reco_space.cell_sides[dim] * odl.SamplingOperator(
        reco_space, even_pts) * partial_der
    op2 = reco_space.cell_sides[dim] * odl.SamplingOperator(
        reco_space, odd_pts) * partial_der
    tv_functionals += [
        odl.solvers.L1Norm(op1.range),
        odl.solvers.L1Norm(op2.range)
    ]
    tv_operators += [op1, op2]
    tv_stepsizes += [0.5 / reco_shape[dim], 0.5 / reco_shape[dim]]
Beispiel #13
0
        background = Y.element(background)
        factors = Y.element(factors)

        # define operator
        K = mMR.operator_mmr(factors=factors)
        U = K.domain
        norm_K = misc.norm(K, '{}/norm_1subset.npy'.format(folder_norms))

        KL = misc.kullback_leibler(Y, data, background)

        gradient = odl.Gradient(U)
        Id = odl.IdentityOperator(gradient.range)

        PD = [
            odl.PartialDerivative(U,
                                  i,
                                  method='backward',
                                  pad_mode='symmetric') for i in range(3)
        ]

        E = odl.operator.ProductSpaceOperator([[PD[0], 0, 0], [0, PD[1], 0],
                                               [0, 0, PD[2]],
                                               [PD[1], PD[0], 0],
                                               [PD[2], 0, PD[0]],
                                               [0, PD[2], PD[1]]])

        D = odl.ProductSpaceOperator([[gradient, -Id], [0, E]])
        norm_D = misc.norm(D, '{}/norm_D.npy'.format(folder_param))
        norm_vfield = odl.PointwiseNorm(gradient.range)

        def save_image(x, n, f):
            misc.save_image(x[0].asarray(), n, f, planes=planes, clim=clim)