Beispiel #1
0
def ExpOp_builder(bin_param, filter_space, interp):
    # Create binning scheme
    if interp == 'Full':
        spf_space = filter_space
        Exp_op = odl.IdentityOperator(filter_space)
    elif interp == 'uniform':
        # Create binning scheme
        dpix = np.size(filter_space)
        dsize = filter_space.max_pt
        filt_bin_space = odl.uniform_discr(-dsize, dsize, dpix // (bin_param))
        spf_space = odl.uniform_discr(0, dsize, dpix // (2 * bin_param))
        resamp = odl.Resampling(filt_bin_space, filter_space)
        sym = SymOp(spf_space, filt_bin_space)
        Exp_op = resamp * sym
    else:
        if interp == 'constant':
            interp = 'nearest'
        elif interp == 'linear':
            pass
        else:
            raise ValueError('unknown `expansion operator type` ({})'
                             ''.format(interp))
        B = ExpBin(bin_param, np.size(filter_space)) * \
                        filter_space.weighting.const
        B[-1] -= 1 / 2 * filter_space.weighting.const

        # Create sparse filter space
        spf_part = odl.nonuniform_partition(B, min_pt=0, max_pt=B[-1])
        spf_weight = np.ravel(
            np.multiply.reduce(np.meshgrid(*spf_part.cell_sizes_vecs)))
        spf_fspace = odl.FunctionSpace(spf_part.set)
        spf_space = odl.DiscreteLp(spf_fspace,
                                   spf_part,
                                   odl.rn(spf_part.size, weighting=spf_weight),
                                   interp=interp)
        filt_pos_part = odl.uniform_partition(0, B[-1],
                                              int(np.size(filter_space) / 2))

        filt_pos_space = odl.uniform_discr_frompartition(filt_pos_part,
                                                         dtype='float64')
        lin_interp = odl.Resampling(spf_space, filt_pos_space)

        # Create symmetry operator
        sym = SymOp(filt_pos_space, filter_space)

        # Create sparse filter operator
        Exp_op = sym * lin_interp
    return spf_space, Exp_op
Beispiel #2
0
    def generate_data(self, voxels_up, reco_space_up, f_up, **kwargs):
        factor = 2
        dpix_up = [factor * voxels_up[0], voxels_up[1]]
        dpix = [int(factor * self.voxels[0]), self.voxels[0]]
        src_radius = self.src_rad * self.volumesize[0] * 2
        det_radius = self.det_rad * self.volumesize[0] * 2
        # Make a circular scanning geometry
        angle_partition = odl.uniform_partition(0, 2 * np.pi, self.angles)
        # Make a flat detector space
        det_partition = odl.uniform_partition(-self.detecsize, self.detecsize,
                                              dpix_up)
        # Create data_space_up and data_space
        data_space = odl.uniform_discr((0, *-self.detecsize),
                                       (2 * np.pi, *self.detecsize),
                                       [self.angles, *dpix],
                                       dtype='float32')
        data_space_up = odl.uniform_discr((0, *-self.detecsize),
                                          (2 * np.pi, *self.detecsize),
                                          [self.angles, *dpix_up],
                                          dtype='float32')
        # Create geometry
        geometry = odl.tomo.ConeFlatGeometry(angle_partition,
                                             det_partition,
                                             src_radius=src_radius,
                                             det_radius=det_radius,
                                             axis=[0, 0, 1])

        FP = odl.tomo.RayTransform(reco_space_up, geometry, use_cache=False)

        resamp = odl.Resampling(data_space_up, data_space)
        if 'load_data_g' in kwargs:
            if type(kwargs['load_data_g']) == str:
                self.g = data_space.element(np.load(kwargs['load_data_g']))
            else:
                self.g = data_space.element(kwargs['load_data_g'])
        else:
            self.g = resamp(FP(f_up))

            if self.noise == None:
                pass
            elif self.noise[0] == 'Gaussian':
                self.g += data_space.element(
                        odl.phantom.white_noise(resamp.range) * \
                        np.mean(self.g) * self.noise[1])
            elif self.noise[0] == 'Poisson':
                # 2**8 seems to be the minimal accepted I_0
                self.g = data_space.element(
                    self.add_poisson_noise(self.noise[1]))
            else:
                raise ValueError('unknown `noise type` ({})'
                                 ''.format(self.noise[0]))
det_partition = odl.uniform_partition(det_min_pt, det_max_pt, det_shape)
geometry = odl.tomo.CircularConeFlatGeometry(angle_partition, det_partition,
                                             src_radius, det_radius)

# Generate data
ray_trafo = odl.tomo.RayTransform(space_hires, geometry, impl='astra_cuda')
data = ray_trafo(templ_shifted)
noisy_data = (data +
              0.01 * np.max(data) * odl.phantom.white_noise(ray_trafo.range))

data_non_shifted = ray_trafo(templ)
data_non_shifted.show(indices=[0, slice(None), slice(None)])
data_non_shifted.show(indices=[15, slice(None), slice(None)])

# %%
resampl = odl.Resampling(space_hires, space_lowres)
cur_templ = resampl(templ)
cur_templ_shifted = resampl(templ_shifted)

# Perform multi-resolution loop to optimize for the shift
bfgs_max_iter = 10
t = odl.rn(space_hires.ndim).zero()

with odl.util.Timer('Multigrid-BFGS:'):
    for cur_space, next_space in zip_longest(spaces, spaces[1:]):
        # Resample data by creating a ray transform with a coarser detector
        factor = space_hires.shape[0] // cur_space.shape[0]
        cur_det_shape = det_shape // factor
        cur_det_partition = odl.uniform_partition(det_min_pt, det_max_pt,
                                                  cur_det_shape)
        cur_geometry = odl.tomo.CircularConeFlatGeometry(
Beispiel #4
0
                                              maxiter=10)
    print('norm of the gradient: {}'.format(grad_norm))

    # Here we check the convergence criterion for the Douglas-Rachford solver
    check_value = tau * (res_level.sigma_ray * ray_trafo_norm ** 2 +
                         res_level.sigma_grad * grad_norm ** 2)
    print('check_value = {}, must be < 4 for convergence'.format(check_value))
    convergence_criterion = check_value < 4
    assert convergence_criterion


for res_level in res_levels:
    check_params(res_level)

# Start value at the very beginning, the FBP reco resampled to the first space
resampling = odl.Resampling(reco_fbp.space, res_levels[0].space)
reco = resampling(reco_fbp)
# reco = res_levels[0].space.zero()

for cur_res, next_res in zip_longest(res_levels, res_levels[1:]):
    # Functionals composed with operators, given in split form
    ray_trafo = odl.tomo.RayTransform(cur_res.space, geometry,
                                      impl='astra_cuda')
    l2_norm_sq = odl.solvers.L2NormSquared(ray_trafo.range)
    data_func = l2_norm_sq.translated(data)

    if cur_res.regularizer == 'L2':
        reg_func = cur_res.reg_param * odl.solvers.L2NormSquared(cur_res.space)
        reg_op = odl.IdentityOperator(cur_res.space)
    elif cur_res.regularizer == 'H1':
        grad = odl.Gradient(cur_res.space, pad_mode='order1')
    lam = 1.5
    # Here we check the convergence criterion for the Douglas-Rachford solver
    check_value = tau * (sigma_ray * ray_trafo_norm**2 + sigma_ident +
                         sigma_grad * grad_norm**2)
    print('check_value = {}, must be < 4 for convergence'.format(check_value))
    convergence_criterion = check_value < 4
    assert convergence_criterion

    callback = (odl.solvers.CallbackPrintIteration(step=2)
                & odl.solvers.CallbackPrint(g[0] * L[0], fmt='data fit:   {}')
                & odl.solvers.CallbackPrint(g[1] * L[1], fmt='reg lowres: {}')
                & odl.solvers.CallbackPrint(g[2] * L[2], fmt='reg detail: {}')
                & odl.solvers.CallbackShow(step=2, clim=[0.019, 0.023]))

    # Start value, resample & resize FBP reco
    resample_lowres = odl.Resampling(reco_fbp.space, space_lowres)
    reco = pspace.element(
        [resample_lowres(reco_fbp), 0.021 * space_detail.one()])

    if timing:
        callback = None
        with odl.util.Timer(reco_method):
            odl.solvers.douglas_rachford_pd(reco,
                                            f,
                                            g,
                                            L,
                                            tau,
                                            sigma,
                                            lam=lam,
                                            niter=80,
                                            callback=callback)
                                           geometry,
                                           impl='astra_cuda')
ray_trafo_scaling = np.sqrt(2)
ray_trafo = ray_trafo_scaling * ray_trafo_unscaled

# Read the images as phantom and prior, down-sample them if needed
tmp_image = np.rot90(scipy.misc.imread('/home/aringh/Downloads/handnew2.png'),
                     k=-1)
phantom_full = image_space.element(tmp_image)

tmp_image = np.rot90(scipy.misc.imread('/home/aringh/Downloads/handnew1.png'),
                     k=-1)
prior_full = image_space.element(tmp_image)

if not n_image == n:
    resample_op = odl.Resampling(image_space, reco_space)
    phantom = resample_op(phantom_full)
    prior = resample_op(prior_full)
else:
    phantom = phantom_full
    prior = prior_full

# Make sure they are nonnegative
prior = prior + 1e-4  # 1e-6
phantom = phantom + 1e-4  # 1e-6

# Show the phantom and the prior
phantom.show(title='Phantom', saveto='Phantom')
prior.show(title='Prior', saveto='Prior')

no_title = '_no_title'