示例#1
0
    def _reconstruct(self, observation, *args, **kwargs):
        xspace = self.ray_trafo.domain
        yspace = self.ray_trafo.range

        grad = odl.Gradient(self.ray_trafo.domain)

        # Assemble all operators into a list.
        lin_ops = [self.ray_trafo, grad]

        # Create functionals for the l2 distance and l1 norm.
        g_funcs = [odl.solvers.L2NormSquared(yspace).translated(observation),
                   self.gamma * odl.solvers.L1Norm(grad.range)]

        # Functional of the bound constraint 0 <= x <= 1
        f = odl.solvers.IndicatorBox(xspace, 0, 1)

        # Find scaling constants so that the solver converges.
        # See the douglas_rachford_pd documentation for more information.
        xstart = self.fbp_op(observation)
        opnorm_A = odl.power_method_opnorm(self.ray_trafo, xstart=xstart)
        opnorm_grad = odl.power_method_opnorm(grad, xstart=xstart)
        sigma = [1 / opnorm_A ** 2, 1 / opnorm_grad ** 2]
        tau = 1.0

        # Solve using the Douglas-Rachford Primal-Dual method
        x = xspace.zero()
        odl.solvers.douglas_rachford_pd(x, f, g_funcs, lin_ops,
                                        tau=tau,
                                        sigma=sigma,
                                        niter=self.iterations)
        return x
示例#2
0
    def _reconstruct(self, observation, out):
        observation = self.observation_space.element(observation)
        out[:] = self.x0
        gradient = Gradient(self.op.domain)
        L = [self.op, gradient]
        f = ZeroFunctional(self.op.domain)
        l2_norm = 0.5 * L2NormSquared(self.op.range).translated(observation)
        l12_norm = self.lam * GroupL1Norm(gradient.range)
        g = [l2_norm, l12_norm]
        op_norm = power_method_opnorm(self.op, maxiter=20)
        gradient_norm = power_method_opnorm(gradient, maxiter=20)
        sigma_ray_trafo = 45.0 / op_norm**2
        sigma_gradient = 45.0 / gradient_norm**2
        sigma = [sigma_ray_trafo, sigma_gradient]
        h = ZeroFunctional(self.op.domain)
        forward_backward_pd(out,
                            f,
                            g,
                            L,
                            h,
                            self.tau,
                            sigma,
                            self.niter,
                            callback=self.callback)

        return out
def get_operators(space):
    # Create the forward operator
    filter_width = 4  # standard deviation of the Gaussian filter
    ft = odl.trafos.FourierTransform(space)
    c = filter_width**2 / 4.0**2
    gaussian = ft.range.element(lambda x: np.exp(-(x[0]**2 + x[1]**2) * c))
    operator = ft.inverse * gaussian * ft

    # Normalize the operator and create pseudo-inverse
    opnorm = odl.power_method_opnorm(operator)
    operator = (1 / opnorm) * operator

    # Do not need good pseudo-inverse, but keep to have same interface.
    pseudoinverse = odl.ZeroOperator(space)

    # Create gradient operator and normalize it
    part_grad_0 = odl.PartialDerivative(space,
                                        0,
                                        method='forward',
                                        pad_mode='order0')
    part_grad_1 = odl.PartialDerivative(space,
                                        1,
                                        method='forward',
                                        pad_mode='order0')

    grad_norm = odl.power_method_opnorm(
        odl.BroadcastOperator(part_grad_0, part_grad_1),
        xstart=odl.util.testutils.noise_element(space))

    part_grad_0 = (1 / grad_norm) * part_grad_0
    part_grad_1 = (1 / grad_norm) * part_grad_1

    # Create tensorflow layer from odl operator
    with tf.name_scope('odl_layers'):
        odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            operator, 'RayTransform')
        odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            operator.adjoint, 'RayTransformAdjoint')
        odl_grad0_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_0, 'PartialGradientDim0')
        odl_grad0_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_0.adjoint, 'PartialGradientDim0Adjoint')
        odl_grad1_layer = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_1, 'PartialGradientDim1')
        odl_grad1_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
            part_grad_1.adjoint, 'PartialGradientDim1Adjoint')

    return (odl_op_layer, odl_op_layer_adjoint, odl_grad0_layer,
            odl_grad0_layer_adjoint, odl_grad1_layer, odl_grad1_layer_adjoint,
            part_grad_0, part_grad_1, operator, pseudoinverse)
示例#4
0
    def __init__(self, ray_trafo, hyper_params=None, iterations=None, gamma=None, **kwargs):
        """
        Parameters
        ----------
        ray_trafo : `odl.tomo.operators.RayTransform`
            The forward operator
        """

        super().__init__(
            reco_space=ray_trafo.domain, observation_space=ray_trafo.range,
            hyper_params=hyper_params, **kwargs)

        self.ray_trafo = ray_trafo
        self.domain_shape = ray_trafo.domain.shape
        self.opnorm = odl.power_method_opnorm(ray_trafo)
        self.fbp_op = fbp_op(
            ray_trafo, frequency_scaling=0.1, filter_type='Hann')

        if iterations is not None:
            self.iterations = iterations
            if kwargs.get('hyper_params', {}).get('iterations') is not None:
                warn("hyper parameter 'iterations' overridden by constructor argument")

        if gamma is not None:
            self.gamma = gamma
            if kwargs.get('hyper_params', {}).get('gamma') is not None:
                warn("hyper parameter 'gamma' overridden by constructor argument")
示例#5
0
    def reconstruction(proj_data, lam):
        lam = float(lam)

        print('lam = {}'.format(lam))

        # We do not allow negative paramters, so return a bogus result
        if lam <= 0:
            return np.inf * space.one()

        # Construct operators and functionals
        gradient = odl.Gradient(space)
        op = odl.BroadcastOperator(ray_trafo, gradient)

        f = odl.solvers.ZeroFunctional(op.domain)

        l2_norm = odl.solvers.L2NormSquared(
            ray_trafo.range).translated(proj_data)
        l1_norm = lam * odl.solvers.GroupL1Norm(gradient.range)
        g = odl.solvers.SeparableSum(l2_norm, l1_norm)

        # Select solver parameters
        op_norm = 1.5 * odl.power_method_opnorm(op, maxiter=10)

        # Run the algorithm
        x = op.domain.zero()
        odl.solvers.pdhg(x,
                         f,
                         g,
                         op,
                         niter=200,
                         tau=1.0 / op_norm,
                         sigma=1.0 / op_norm)

        return x
示例#6
0
 def _reconstruct(self, observation, out):
     observation = self.observation_space.element(observation)
     out_ = out
     if out not in self.reco_space:
         out_ = self.reco_space.zero()
     out_[:] = self.x0
     gradient = Gradient(self.op.domain)
     L = BroadcastOperator(self.op, gradient)
     f = ZeroFunctional(self.op.domain)
     l2_norm = L2NormSquared(self.op.range).translated(observation)
     l1_norm = self.lam * L1Norm(gradient.range)
     g = SeparableSum(l2_norm, l1_norm)
     op_norm = 1.1 * power_method_opnorm(L, maxiter=20)
     sigma = self.tau * op_norm**2
     admm.admm_linearized(out_,
                          f,
                          g,
                          L,
                          self.tau,
                          sigma,
                          self.niter,
                          callback=self.callback)
     if out not in self.reco_space:
         out[:] = out_
     return out
示例#7
0
    def tv_reconstruction(self, y, param=def_lambda):
        """
        NOTE: I'll follow the example from the odl github: https://github.com/odlgroup/odl/blob/master/examples/solvers/pdhg_tomography.py
        NOTE: The only thing I changed was swap what g and functional were supposed to be. That's it.
        """
        # internal method to evaluate tv on a single element y with shape [width, height]

        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        broad_op = odl.BroadcastOperator(self.operator, gradients)
        # define empty functional to fit the chambolle_pock framework
        functional = odl.solvers.ZeroFunctional(broad_op.domain)

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(self.range).translated(y)
        g = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(broad_op)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 200

        # find starting point
        x = self.space.element(self.model.inverse(np.expand_dims(y, axis=-1))[...,0])

        # Run the optimization algoritm
        # odl.solvers.chambolle_pock_solver(x, functional, g, broad_op, tau = tau, sigma = sigma, niter=niter)
        odl.solvers.pdhg(x, functional, g, broad_op, tau=tau, sigma=sigma, niter=niter)
        return x
示例#8
0
    def tv_reconstruction(self, y, param=def_lambda):
        # internal method to evaluate tv on a single element y with shape [width, height]

        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        broad_op = odl.BroadcastOperator(self.operator, gradients)
        # define empty functional to fit the chambolle_pock framework
        g = odl.solvers.ZeroFunctional(broad_op.domain)

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(self.range).translated(y)
        functional = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(broad_op)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 200

        # find starting point
        x = self.space.element(
            self.model.inverse(np.expand_dims(y, axis=-1))[..., 0])

        # Run the optimization algoritm
        # odl.solvers.chambolle_pock_solver(x, functional, g, broad_op, tau = tau, sigma = sigma, niter=niter)
        odl.solvers.pdhg(x,
                         functional,
                         g,
                         broad_op,
                         tau=tau,
                         sigma=sigma,
                         niter=niter)
        return x
示例#9
0
def check_params(res_level):
    """Check the convergence criterion for the DR solver at ``res_level``."""
    ray_trafo = odl.tomo.RayTransform(res_level.space, geometry,
                                      impl='astra_cuda')
    ray_trafo_norm = 1.2 * odl.power_method_opnorm(ray_trafo, maxiter=4)
    print('norm of the ray transform: {}'.format(ray_trafo_norm))
    grad = odl.Gradient(res_level.space, pad_mode='order1')
    grad_xstart = odl.phantom.shepp_logan(grad.domain, modified=True)
    grad_norm = 1.5 * odl.power_method_opnorm(grad, xstart=grad_xstart,
                                              maxiter=10)
    print('norm of the gradient: {}'.format(grad_norm))

    # Here we check the convergence criterion for the Douglas-Rachford solver
    check_value = tau * (res_level.sigma_ray * ray_trafo_norm ** 2 +
                         res_level.sigma_grad * grad_norm ** 2)
    print('check_value = {}, must be < 4 for convergence'.format(check_value))
    convergence_criterion = check_value < 4
    assert convergence_criterion
def get_operators(space, geometry):
    # Create the forward operator
    operator = odl.tomo.RayTransform(space, geometry)
    pseudoinverse = odl.tomo.fbp_op(operator)

    # Normalize the operator and create pseudo-inverse
    opnorm = odl.power_method_opnorm(operator)
    operator = (1 / opnorm) * operator

    pseudoinverse = pseudoinverse * opnorm

    # Create gradient operator and normalize it
    part_grad_0 = odl.PartialDerivative(space, 0, method='forward',
                                        pad_mode='order0')
    part_grad_1 = odl.PartialDerivative(space, 1, method='forward',
                                        pad_mode='order0')

    grad_norm = odl.power_method_opnorm(
        odl.BroadcastOperator(part_grad_0, part_grad_1),
        xstart=odl.util.testutils.noise_element(space))

    part_grad_0 = (1 / grad_norm) * part_grad_0
    part_grad_1 = (1 / grad_norm) * part_grad_1

    # Create tensorflow layer from odl operator
    with tf.name_scope('odl_layers'):
        odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                operator, 'RayTransform')
        odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                operator.adjoint, 'RayTransformAdjoint')
        odl_grad0_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_0, 'PartialGradientDim0')
        odl_grad0_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_0.adjoint, 'PartialGradientDim0Adjoint')
        odl_grad1_layer = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_1, 'PartialGradientDim1')
        odl_grad1_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(
                part_grad_1.adjoint, 'PartialGradientDim1Adjoint')

    return (odl_op_layer, odl_op_layer_adjoint, odl_grad0_layer,
            odl_grad0_layer_adjoint, odl_grad1_layer, odl_grad1_layer_adjoint,
            operator, pseudoinverse)
示例#11
0
def norm(op, file_norm):
    if not os.path.exists(file_norm):
        print('file {} does not exist. Compute it.'.format(file_norm))
        rnd = odl.phantom.uniform_noise(op.domain)
        norm_op = 1.05 * odl.power_method_opnorm(op, maxiter=100, xstart=rnd)
        np.save(file_norm, norm_op)
    else:
        print('file {} exists. Load it.'.format(file_norm))
        norm_op = np.load(file_norm)

    return norm_op
    def __init__(self, size):
        super(CT, self).__init__(size)
        self.space = odl.uniform_discr([-64, -64], [64, 64], [self.size[0], self.size[1]],
                                       dtype='float32')

        geometry = odl.tomo.parallel_beam_geometry(self.space, num_angles=30)
        op = odl.tomo.RayTransform(self.space, geometry)

        # Ensure operator has fixed operator norm for scale invariance
        opnorm = odl.power_method_opnorm(op)
        self.operator = (1 / opnorm) * op
        self.fbp = opnorm * odl.tomo.fbp_op(op)
        self.adjoint_operator = (1 / opnorm)*op.adjoint

        # Create tensorflow layer from odl operator
        self.ray_transform = as_tensorflow_layer(self.operator, 'RayTransform')
示例#13
0
 def _reconstruct(self, observation, out):
     observation = self.observation_space.element(observation)
     out[:] = self.x0
     l2_norm = L2NormSquared(self.op.range)
     discrepancy = l2_norm * (self.op - observation)
     gradient = Gradient(self.op.domain)
     l1_norm = GroupL1Norm(gradient.range)
     smoothed_l1 = MoreauEnvelope(l1_norm, sigma=0.03)
     regularizer = smoothed_l1 * gradient
     f = discrepancy + self.lam * regularizer
     opnorm = power_method_opnorm(self.op)
     hessinv_estimate = ScalingOperator(self.op.domain, 1 / opnorm**2)
     newton.bfgs_method(f,
                        out,
                        maxiter=self.niter,
                        hessinv_estimate=hessinv_estimate,
                        callback=self.callback)
     return out
示例#14
0
    def __init__(self, size):
        self.space = odl.uniform_discr([-256, -256], [256, 256], [512, 512],
                                       dtype='float32')

        geometry = odl.tomo.parallel_beam_geometry(self.space, num_angles=360)
        op = odl.tomo.RayTransform(self.space, geometry)

        # Ensure operator has fixed operator norm for scale invariance
        opnorm = odl.power_method_opnorm(op)
        self.operator = (1 / opnorm) * op
        self.fbp = (opnorm) * odl.tomo.fbp_op(op)
        self.adjoint_operator = (1 / opnorm) * op.adjoint

        # Create tensorflow layer from odl operator
        self.ray_transform = odl.contrib.tensorflow.as_tensorflow_layer(
            self.operator, 'RayTransform')
        self.ray_transform_adj = odl.contrib.tensorflow.as_tensorflow_layer(
            self.adjoint_operator, 'AdjRayTransform')
示例#15
0
def check_params(res_level):
    """Check the convergence criterion for the DR solver at ``res_level``."""
    grad = odl.Gradient(res_level.space, pad_mode='order1')
    grad_xstart = odl.phantom.shepp_logan(grad.domain, modified=True)
    grad_norm = 1.5 * odl.power_method_opnorm(grad, xstart=grad_xstart,
                                              maxiter=10)
    print('norm of the gradient: {}'.format(grad_norm))

    res_level = ResLevel(res_level.space, res_level.num_iter,
                         res_level.regularizer, res_level.reg_param,
                         sigma_ray=1.5 / tau,
                         sigma_grad=1.5 / (tau * grad_norm ** 2))

    # Here we check the convergence criterion for the Douglas-Rachford solver
    check_value = tau * (res_level.sigma_ray +
                         res_level.sigma_grad * grad_norm ** 2)
    print('check_value = {}, must be < 4 for convergence'.format(check_value))
    convergence_criterion = check_value < 4
    assert convergence_criterion

    return res_level
示例#16
0
    def tv_reconsruction(self, y, param=1000000):
        # the operators
        gradients = odl.Gradient(self.space, method='forward')
        operator = odl.BroadcastOperator(self.ray_transf, gradients)
        # define empty functional to fit the chambolle_pock framework
        g = odl.solvers.ZeroFunctional(operator.domain)

        # compute transformed data
        # ensure y stays away from 0
        y_cut = np.maximum(y, 0.03)
        data = -(np.log(y_cut)) / self.attenuation_coeff

        # the norms
        l1_norm = param * odl.solvers.L1Norm(gradients.range)
        l2_norm_squared = odl.solvers.L2NormSquared(
            self.ray_transf.range).translated(data)
        functional = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)

        # Find parameters
        op_norm = 1.1 * odl.power_method_opnorm(operator)
        tau = 10.0 / op_norm
        sigma = 0.1 / op_norm
        niter = 5000

        # find starting point
        x = self.fbp(data)

        # Run the optimization algoritm
        odl.solvers.chambolle_pock_solver(x,
                                          functional,
                                          g,
                                          operator,
                                          tau=tau,
                                          sigma=sigma,
                                          niter=niter)

        # plot results
        plt.figure(1)
        plt.imshow(x)
        plt.show()
示例#17
0
    def reconstruction(proj_data, parameters):
        # Extract the separate parameters
        lam, sigma = parameters

        print('lam = {}, sigma = {}'.format(lam, sigma))

        # We do not allow negative parameters, so return a bogus result
        if lam <= 0 or sigma <= 0:
            return np.inf * space.one()

        # Create data term ||Ax - b||_2^2
        l2_norm = odl.solvers.L2NormSquared(ray_trafo.range)
        data_discrepancy = l2_norm * (ray_trafo - proj_data)

        # Create regularizing functional huber(|grad(x)|)
        gradient = odl.Gradient(space)
        l1_norm = odl.solvers.GroupL1Norm(gradient.range)
        smoothed_l1 = odl.solvers.MoreauEnvelope(l1_norm, sigma=sigma)
        regularizer = smoothed_l1 * gradient

        # Create full objective functional
        obj_fun = data_discrepancy + lam * regularizer

        # Pick parameters
        maxiter = 30
        num_store = 5

        # Run the algorithm
        x = ray_trafo.domain.zero()
        odl.solvers.bfgs_method(obj_fun,
                                x,
                                maxiter=maxiter,
                                num_store=num_store,
                                hessinv_estimate=odl.ScalingOperator(
                                    space,
                                    1 / odl.power_method_opnorm(ray_trafo)**2))

        return x
    def __init__(self, size):
        self.space = odl.uniform_discr([-64, -64], [64, 64],
                                       [size[0], size[1]],
                                       dtype='float32')

        geometry = odl.tomo.parallel_beam_geometry(self.space, num_angles=30)
        op = odl.tomo.RayTransform(self.space, geometry)

        # Ensure operator has fixed operator norm for scale invariance
        opnorm = odl.power_method_opnorm(op)
        self.operator = (1 / opnorm) * op
        self.fbp = (opnorm) * odl.tomo.fbp_op(op)
        self.adjoint_operator = (1 / opnorm) * op.adjoint

        # the spaces
        self.meas_space = self.operator.range.shape
        self.image_space = (128, 128)

        # Create tensorflow layer from odl operator
        self.ray_transform = odl.contrib.tensorflow.as_tensorflow_layer(
            self.operator, 'RayTransform')
        self.ray_transform_adj = odl.contrib.tensorflow.as_tensorflow_layer(
            self.adjoint_operator, 'AdjRayTransform')
size = 128

space = odl.uniform_discr([-64, -64], [64, 64], [size, size],
                          dtype='float64')

print("generating geometry")
geometry = odl.tomo.parallel_beam_geometry(space, num_angles=n_angles)

print("generating operator")
# operator = odl.tomo.RayTransform(space, geometry)
operator = odl.tomo.RayTransform(space, geometry, impl=astra_impl) #this operator to create the corresponding phantoms (with the full specified layer)

#operator is a function class (so simply calling the operator, will call out the function)

# Ensure operator has fixed operator norm for scale invariance
opnorm = odl.power_method_opnorm(operator)
operator = (1 / opnorm) * operator

# Create tensorflow layer from odl operator
print("\ngenerating operator tensorflow layer")
odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(operator, 'RayTransform')

print("generating adjoint operator tensorflow layer")
odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(operator.adjoint, 'RayTransformAdjoint')

partial_op = partial.PartialRay(space, impl=astra_impl)

print("preparing the partial layer")
odl_op_partial_layer = partial.tensor_partial_layer(partial_op, 'PartialRayTransform')
odl_op_partial_layer_adjoint = partial.tensor_partial_layer(partial_op.adjoint, 'PartialRayTransformAdjoint')
示例#20
0
# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(ray_trafo.range).translated(data)

# The l1-norms scaled by regularization paramters
l1_norm_1 = 0.001 * odl.solvers.L1Norm(gradient.range)
l1_norm_2 = 1e-4 * odl.solvers.L1Norm(eps.range)

# Combine functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(l2_norm, l1_norm_1, l1_norm_2)


# --- Select solver parameters and solve using Chambolle-Pock --- #


# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op)

niter = 400  # Number of iterations
tau = 1.0 / op_norm  # Step size for the primal variable
sigma = 1.0 / op_norm  # Step size for the dual variable
gamma = 0.5

# Optionally pass callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration() &
            odl.solvers.CallbackShow(clim=[0.018, 0.022], indices=0, step=10))

# Choose a starting point
x = op.domain.zero()

# Run the algorithm
odl.solvers.chambolle_pock_solver(
示例#21
0
 def opnorm(self):
     if self._opnorm is None:
         self._opnorm = odl.power_method_opnorm(self.non_normed_op)
     return self._opnorm
示例#22
0
    def __init__(self,
                 ray_trafo,
                 epochs=None,
                 batch_size=None,
                 lr=None,
                 normalize_by_opnorm=None,
                 num_workers=8,
                 use_cuda=True,
                 show_pbar=True,
                 fbp_impl='astra_cuda',
                 hyper_params=None,
                 log_dir=None,
                 log_num_validation_samples=0,
                 save_best_learned_params_path=None,
                 **kwargs):
        """
        Parameters
        ----------
        ray_trafo : :class:`odl.tomo.RayTransform`
            Ray transform from which the FBP operator is constructed.
        scales : int, optional
            Number of scales in the U-Net (a hyper parameter).
        epochs : int, optional
            Number of epochs to train (a hyper parameter).
        batch_size : int, optional
            Batch size (a hyper parameter).
        lr : float, optional
            Base learning rate (a hyper parameter).
        normalize_by_opnorm : bool, optional
            Whether to normalize :attr:`ray_trafo` by its operator norm.
        num_data_loader_workers : int, optional
            Number of parallel workers to use for loading data.
        use_cuda : bool, optional
            Whether to use cuda for the U-Net.
        show_pbar : bool, optional
            Whether to show tqdm progress bars during the epochs.
        fbp_impl : str, optional
            The backend implementation passed to
            :class:`odl.tomo.RayTransform` in case no `ray_trafo` is specified.
            Then ``dataset.get_ray_trafo(impl=fbp_impl)`` is used to get the
            ray transform and FBP operator.
        log_dir : str, optional
            Tensorboard log directory (name of sub-directory in utils/logs).
            If `None`, no logs are written.
        log_num_valiation_samples : int, optional
            Number of validation images to store in tensorboard logs.
            This option only takes effect if ``log_dir is not None``.
        save_best_learned_params_path : str, optional
            Save best model weights during training under the specified path by
            calling :meth:`save_learned_params`.
        """
        super().__init__(reco_space=ray_trafo.domain,
                         observation_space=ray_trafo.range,
                         hyper_params=hyper_params,
                         **kwargs)
        self.ray_trafo = ray_trafo
        self.num_workers = num_workers
        self.use_cuda = use_cuda
        self.show_pbar = show_pbar
        self.fbp_impl = fbp_impl
        self.log_dir = log_dir
        self.log_num_validation_samples = log_num_validation_samples
        self.save_best_learned_params_path = save_best_learned_params_path
        self.model = None

        if epochs is not None:
            self.epochs = epochs
            if kwargs.get('hyper_params', {}).get('epochs') is not None:
                warn("hyper parameter 'epochs' overridden by constructor "
                     "argument")

        if batch_size is not None:
            self.batch_size = batch_size
            if kwargs.get('hyper_params', {}).get('batch_size') is not None:
                warn("hyper parameter 'batch_size' overridden by constructor "
                     "argument")

        if lr is not None:
            self.lr = lr
            if kwargs.get('hyper_params', {}).get('lr') is not None:
                warn("hyper parameter 'lr' overridden by constructor argument")

        if normalize_by_opnorm is not None:
            self.normalize_by_opnorm = normalize_by_opnorm
            if (kwargs.get('hyper_params', {}).get('normalize_by_opnorm')
                    is not None):
                warn("hyper parameter 'normalize_by_opnorm' overridden by "
                     "constructor argument")

        if self.normalize_by_opnorm:
            self.opnorm = odl.power_method_opnorm(self.ray_trafo)
            self.ray_trafo = (1. / self.opnorm) * self.ray_trafo

        self.device = (torch.device('cuda:0') if self.use_cuda
                       and torch.cuda.is_available() else torch.device('cpu'))
示例#23
0
# Create data discrepancy functionals
alpha = 0.8
g_kl = [(1 - alpha) * odl.solvers.KullbackLeibler(ray_trafo.range, prior=d)
        for d in data]
g_l2 = alpha * odl.solvers.L2NormSquared(ray_trafo.range).translated(data_sum)

# Create L1 functional for the TV regularization
g_l1 = [0.2 * odl.solvers.L1Norm(grad.range)] * domain.size

# Assemble functionals
g = [odl.solvers.SeparableSum(*g_kl),
     g_l2,
     odl.solvers.SeparableSum(*g_l1)]

opnorm = odl.power_method_opnorm(ray_trafo, maxiter=2)
gradnorm = odl.power_method_opnorm(grad, maxiter=10)
tau = 0.5
sigma = [1 / (opnorm) ** 2, 1 / (ndim * opnorm) ** 2, 1 / (gradnorm) ** 2]

lin_ops = [diagop, redop, grad_n]

# Solve
callback = (odl.solvers.CallbackShow(display_step=10) &
            odl.solvers.CallbackPrintIteration())

x = domain.one()
odl.solvers.douglas_rachford_pd(x, f, g, lin_ops,
                                tau=tau, sigma=sigma,
                                niter=200, callback=callback)
x.show('result', indices=np.s_[:])
示例#24
0
        comp_proj_0 = odl.ComponentProjection(pspace, 0)
        comp_proj_1 = odl.ComponentProjection(pspace, 1)

        lin_ops = [insert_grad * comp_proj_1, coarse_grad * comp_proj_0]

        # Column vector of two operators
        #lin_ops = odl.BroadcastOperator(outside_lin_ops, insert_lin_ops)

        nonsmooth_funcs = [nonsmooth_func, nonsmooth_func_coarse]

        box_constr = odl.solvers.IndicatorBox(pspace, np.min(phantom_f),
                                              np.max(phantom_f))
        f = box_constr

        # eta^-1 is the Lipschitz constant of the smooth functional gradient
        ray_trafo_norm = 1.1 * odl.power_method_opnorm(
            sum_ray_trafo, xstart=phantom, maxiter=2)
        print('norm of the ray transform: {}'.format(ray_trafo_norm))
        eta = 1 / (2 * ray_trafo_norm**2 + 2 * reg_param_1)
        print('eta = {}'.format(eta))
        grad_norm_insert = 1.1 * odl.power_method_opnorm(
            insert_grad, xstart=phantom_insert, maxiter=4)
        grad_norm_coarse = 1.1 * odl.power_method_opnorm(
            coarse_grad, xstart=phantom_c, maxiter=4)
        grad_norm = [grad_norm_insert, grad_norm_coarse]
        print('norm of the gradient: {}'.format(grad_norm))

        # tau and sigma are like step sizes
        sigma = [4e-3, 4e-3]
        tau = 1.0 * sigma[0]
        # Here we check the convergence criterion for the forward-backward solver
        # 1. This is required such that the square root is well-defined
print('number of minibatches during testing = %d' % len(eval_dataloader))
############################################
######### forward operator and FBP ######################
import simulate_projections_for_train_and_test
from simulate_projections_for_train_and_test import img_size, space_range, num_angles, det_shape, noise_std_dev, geom
space = odl.uniform_discr([-space_range, -space_range], [space_range, space_range],\
                              (img_size, img_size), dtype='float32', weighting=1.0)
if (geom == 'parallel_beam'):
    geometry = odl.tomo.geometry.parallel.parallel_beam_geometry(
        space, num_angles=num_angles, det_shape=det_shape)
else:
    geometry = odl.tomo.geometry.conebeam.cone_beam_geometry(space, src_radius=1.5*space_range, \
                                                             det_radius=5.0, num_angles=num_angles, det_shape=det_shape)

fwd_op_odl = odl.tomo.RayTransform(space, geometry, impl='astra_cuda')
op_norm = 1.1 * odl.power_method_opnorm(fwd_op_odl)
print('operator norm = {:.4f}'.format(op_norm))

fbp_op_odl = odl.tomo.fbp_op(fwd_op_odl)
adjoint_op_odl = fwd_op_odl.adjoint

fwd_op = torch_wrapper.OperatorModule(fwd_op_odl).to(device)
fbp_op = torch_wrapper.OperatorModule(fbp_op_odl).to(device)
adjoint_op = torch_wrapper.OperatorModule(adjoint_op_odl).to(device)

####### variational optimizer for the learned convex prior ####################
sq_loss = torch.nn.MSELoss(reduction='mean')  #data-fidelity loss


def acr_optimizer(x_init, x_ground_truth, y_test, n_iter, lambda_acr, lr=0.80):
    x_cvx = x_init.clone().detach().requires_grad_(True).to(device)
示例#26
0
redop = odl.ReductionOperator(ray_trafo, domain.size)

# Assemble all operators
data = diagop(phantom)
data_sum = redop(phantom)

# Create functionals as needed
f = odl.solvers.IndicatorNonnegativity(domain)

alpha = 0.8
g_kl = [(1 - alpha) * odl.solvers.KullbackLeibler(ray_trafo.range, prior=d)
        for d in data]
g_l2 = alpha * odl.solvers.L2NormSquared(ray_trafo.range).translated(data_sum)
g = [odl.solvers.SeparableSum(*g_kl), g_l2]

opnorm = odl.power_method_opnorm(ray_trafo, maxiter=4)
tau = 1.0
sigma = [1 / (opnorm)**2, 1 / (ndim * opnorm)**2]

# Solve
callback = (odl.solvers.CallbackShow(display_step=10)
            & odl.solvers.CallbackPrintIteration())

x = domain.one()
odl.solvers.douglas_rachford_pd(x,
                                f,
                                g,
                                L=[diagop, redop],
                                tau=tau,
                                sigma=sigma,
                                niter=200,
# Isotropic TV-regularization: l1-norm of grad(x)
l1_norm = 0.1 * odl.solvers.L1Norm(gradient.range)

# Make separable sum of functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(kl_divergence, l1_norm)

# Optional: pass callback objects to solver
callback = (odl.solvers.CallbackPrintIteration() &
            odl.solvers.CallbackShow(display_step=20))


# --- Select solver parameters and solve using Chambolle-Pock --- #


# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op)
tau = 10.0 / op_norm  # Step size for the primal variable
sigma = 0.1 / op_norm  # Step size for the dual variable

# Starting point
x = op.domain.zero()

# Run algorithm (and display intermediates)
odl.solvers.chambolle_pock_solver(
    x, f, g, op, tau=tau, sigma=sigma, niter=100, callback=callback)

# Display images
orig.show(title='original image')
noisy.show(title='noisy image')
x.show(title='denoised', show=True)  # show and hold
示例#28
0
# Functions to be composed with linear operators. L[i] applies to g[i].
alpha = 0.15
g = [
    odl.solvers.L2NormSquared(X).translated(y),
    alpha * odl.solvers.L1Norm(grad.range)
]
L = [ident, grad]

# We check if everything makes sense by evaluating the total functional at 0
x = X.zero()
print(f(x) + sum(g[i](L[i](x)) for i in range(len(g))))

# %% Choose solver parameters

grad_norm = 1.1 * odl.power_method_opnorm(grad, xstart=y, maxiter=20)
opnorms = [1, grad_norm]  # identity has norm 1


def check_params(tau, sigmas):
    sum_part = sum(sigma * opnorm**2 for sigma, opnorm in zip(sigmas, opnorms))
    print('Sum evaluates to', sum_part)
    check_value = tau * sum_part

    assert check_value < 4, 'value must be < 4, got {}'.format(check_value)
    print('Values ok, check evaluates to {}, must be < 4'.format(check_value))


tau = 1.5
c = 3.0 / (len(opnorms) * tau)
sigmas = [c / opnorm**2 for opnorm in opnorms]
示例#29
0
# Stacking of the two operators
L = odl.BroadcastOperator(ray_trafo, grad)

# Data matching and regularization functionals
data_fit = odl.solvers.L2NormSquared(ray_trafo.range).translated(data)
reg_func = 0.015 * odl.solvers.L1Norm(grad.range)
g = odl.solvers.SeparableSum(data_fit, reg_func)

# We don't use the f functional, setting it to zero
f = odl.solvers.ZeroFunctional(L.domain)

# --- Select parameters and solve using ADMM --- #

# Estimated operator norm, add 10 percent for some safety margin
op_norm = 1.1 * odl.power_method_opnorm(L, maxiter=20)

niter = 200  # Number of iterations
sigma = 2.0  # Step size for g.proximal
tau = sigma / op_norm**2  # Step size for f.proximal

# Optionally pass a callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration(step=10)
            & odl.solvers.CallbackShow(step=10))

# Choose a starting point
x = L.domain.zero()

# Run the algorithm
odl.solvers.admm_linearized(x, f, g, L, tau, sigma, niter, callback=callback)
# G = spatial gradient X2 -> X2^2

G = odl.Gradient(X2, pad_mode='symmetric')

D = odl.solvers.L2NormSquared(R22.range).translated(g2)
alpha = 1e-3
S = alpha * odl.solvers.GroupL1Norm(G.range)
P = odl.solvers.IndicatorBox(X2, 0, np.inf)

# Arguments for the solver
f_func = P
g_funcs = [D, S]
L_ops = [R22, G]

# Operator norm estimation for the step size parameters
R22_norm = odl.power_method_opnorm(R22, maxiter=10)
# TODO: choose a different starting point
G_norm = odl.power_method_opnorm(G, xstart=f2_FBP2, maxiter=10)

# We need tau * sum[i](sigma_i * opnorm_i^2) < 4 for convergence, so we
# choose tau and set sigma_i = c / (tau * opnorm_i^2) such that sum[i](c) < 4
tau = 1.0
opnorms = [R22_norm, G_norm]
sigmas = [3.0 / (tau * len(opnorms) * opnorm**2) for opnorm in opnorms]

callback = (odl.solvers.CallbackPrintIteration(step=20)
            & odl.solvers.CallbackShow(step=20, clim=[0, 3]))

f_tv_orig = f2_FBP2.copy()
odl.solvers.douglas_rachford_pd(f_tv_orig,
                                f_func,
示例#31
0
prox_convconj_l1 = odl.solvers.proximal_cconj_l1(gradient.range, lam=0.2,
                                                 isotropic=True)

# Combine proximal operators: the order must match the order of operators in K
proximal_dual = odl.solvers.combine_proximals(prox_convconj_l2,
                                              prox_convconj_l1)

# Proximal operator related to the primal variable, a non-negativity constraint
proximal_primal = odl.solvers.proximal_nonnegativity(op.domain)


# --- Select solver parameters and solve using Chambolle-Pock --- #


# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op, 100, noisy)

niter = 400  # Number of iterations
tau = 1.0 / op_norm  # Step size for the primal variable
sigma = 1.0 / op_norm  # Step size for the dual variable

# Optional: pass callback objects to solver
callback = (odl.solvers.CallbackPrintIteration() &
            odl.solvers.CallbackShow(display_step=20))

# Starting point
x = op.domain.zero()

# Run algorithms (and display intermediates)
odl.solvers.chambolle_pock_solver(
    op, x, tau=tau, sigma=sigma, proximal_primal=proximal_primal,
# Create phantom (the "unknown" solution)
phantom = odl.phantom.shepp_logan(space, modified=True)

# Apply convolution to phantom to create data
g = A(phantom)

# Display the results using the show method
kernel.show('kernel')
phantom.show('phantom')
g.show('convolved phantom')

# Landweber

# Need operator norm for step length (omega)
opnorm = odl.power_method_opnorm(A)

f = space.zero()
odl.solvers.landweber(A, f, g, niter=100, omega=1/opnorm**2)
f.show('landweber')

# Conjugate gradient

f = space.zero()
odl.solvers.conjugate_gradient_normal(A, f, g, niter=100)
f.show('conjugate gradient')

# Tikhonov with identity

B = odl.IdentityOperator(space)
a = 0.1
示例#33
0
# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(ray_trafo.range).translated(data)

# Isotropic TV-regularization i.e. the l1-norm
l1_norm = 0.03 * odl.solvers.L1Norm(gradient.range)

# Combine functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(l2_norm, l1_norm)


# --- Select solver parameters and solve using Chambolle-Pock --- #


# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.3 * odl.power_method_opnorm(op, maxiter=6)

niter = 100  # Number of iterations
tau = 1.0 / op_norm  # Step size for the primal variable
sigma = 1.0 / op_norm  # Step size for the dual variable
gamma = 0.2

# Optionally pass callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration() &
            odl.solvers.CallbackShow(display_step=5))

# Choose a starting point
x = op.domain.zero()

# Run the algorithm
odl.solvers.chambolle_pock_solver(
示例#34
0
l2_norm = odl.solvers.L2NormSquared(ray_trafo.range)
data_discrepancy = l2_norm * (ray_trafo - data)

# Create regularizing functional || |grad(x)| ||_1 and smooth the functional
# using the Moreau envelope.
# The parameter sigma controls the strength of the regularization.
gradient = odl.Gradient(reco_space)
l1_norm = odl.solvers.GroupL1Norm(gradient.range)
smoothed_l1 = odl.solvers.MoreauEnvelope(l1_norm, sigma=0.03)
regularizer = smoothed_l1 * gradient

# Create full objective functional
obj_fun = data_discrepancy + 0.03 * regularizer

# Create initial estimate of the inverse Hessian by a diagonal estimate
opnorm = odl.power_method_opnorm(ray_trafo)
hessinv_estimate = odl.ScalingOperator(reco_space, 1 / opnorm ** 2)

# Optionally pass callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration() &
            odl.solvers.CallbackShow())

# Pick parameters
maxiter = 30
num_store = 5  # only save some vectors (Limited memory)

# Choose a starting point
x = ray_trafo.domain.zero()

# Run the algorithm
odl.solvers.bfgs_method(
# l2-squared data matching
l2_norm = odl.solvers.L2NormSquared(space).translated(noisy)

# Isotropic TV-regularization: l1-norm of grad(x)
l1_norm = 0.15 * odl.solvers.L1Norm(gradient.range)

# Make separable sum of functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(l2_norm, l1_norm)

# Non-negativity constraint
g = odl.solvers.IndicatorNonnegativity(op.domain)

# --- Select solver parameters and solve using Chambolle-Pock --- #

# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op, xstart=noisy)

niter = 200  # Number of iterations
tau = 1.0 / op_norm  # Step size for the primal variable
sigma = 1.0 / op_norm  # Step size for the dual variable

# Optional: pass callback objects to solver
callback = (odl.solvers.CallbackPrintIteration()
            & odl.solvers.CallbackShow(display_step=5))

# Starting point
x = op.domain.zero()

# Run algorithm (and display intermediates)
odl.solvers.chambolle_pock_solver(x,
                                  f,
示例#36
0
# Isotropic TV-regularization: l1-norm of grad(x)
l1_norm = 0.2 * odl.solvers.L1Norm(gradient.range)

# Make separable sum of functionals, order must correspond to the operator K
f = odl.solvers.SeparableSum(l2_norm, l1_norm)

# Non-negativity constraint
g = odl.solvers.IndicatorNonnegativity(op.domain)


# --- Select solver parameters and solve using Chambolle-Pock --- #


# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op, xstart=noisy)

niter = 400  # Number of iterations
tau = 1.0 / op_norm  # Step size for the primal variable
sigma = 1.0 / op_norm  # Step size for the dual variable

# Optional: pass callback objects to solver
callback = (odl.solvers.CallbackPrintIteration() &
            odl.solvers.CallbackShow(display_step=20))

# Starting point
x = op.domain.zero()

# Run algorithm (and display intermediates)
odl.solvers.chambolle_pock_solver(
    x, f, g, op, tau=tau, sigma=sigma, niter=niter, callback=callback)