def setUp(self): # Timing self.start_time = time.time() # DATA self.d = ctdata.sets[14] self.d.load() # Parameters det_row_count, num_proj, det_col_count = self.d.shape num_voxel = (det_col_count, det_col_count, det_row_count) # voxel_size = 1 voxel_size = 2 * self.d.roi_cubic_width_mm / num_voxel[0] source_origin = self.d.distance_source_origin_mm / voxel_size origin_detector = self.d.distance_origin_detector_mm / voxel_size angles = self.d.angles_rad det_col_spacing = self.d.detector_width_mm / det_col_count / voxel_size det_row_spacing = det_col_spacing # PROJECTOR self.projector = Projector( num_voxel=num_voxel, det_row_count=det_row_count, det_col_count=det_col_count, source_origin=source_origin, origin_detector=origin_detector, det_row_spacing=det_row_spacing, det_col_spacing=det_col_spacing, angles=angles) # ALGORITHM self.cgm = ChanGolubMullet(projections=self.d.projections, projector=self.projector) self.u_shape = num_voxel
def __init__(self, projections=np.array([]), projector=Projector()): self.z = projections self.K = projector self.a = 1 self.u = np.zeros(projector.num_voxel) self.w = [self.u for _ in range(self.u.ndim)] self.b = 1
def test_adjoint_scaling(self): vol_rn = Rn(self.geom.vol_size) vol_rn_ones = vol_rn.element(1) proj_rn = Rn(self.geom.proj_size) proj_rn_ones = proj_rn.element(1) projector = Projector(self.geom, vol_rn, proj_rn) proj1 = projector.forward(vol_rn_ones) vol1 = projector.backward(proj_rn_ones) n1 = proj1.inner(proj_rn_ones) n2 = vol_rn_ones.inner(vol1) print('<A x, y> = <x, Ad y> : {0} = {1}'.format(n1, n2)) print('<A x, y> / <x, Ad y> - 1 = {0}'.format(n1 / n2 - 1)) proj = projector.forward(vol_rn_ones) vol = projector.backward(proj) alpha = proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones) print alpha projector.clear_astra_memory()
def test_ndim(self): vshape = (88, 77) vsize = np.prod(vshape) cols = 99 angles = np.linspace(0, 2 * np.pi, 111, endpoint=False) psize = cols * np.size(angles) geom = Geometry(geometry_type='parallel', scale_factor=1, volume_shape=vshape, det_col_count=cols, det_row_count=1, angles=angles) print 'Vol size: ', vsize print 'Proj size:', psize print 'Voxel size:', self.geom.voxel_size vol_rn = Rn(vsize) proj_rn = Rn(psize) projector = Projector(geom, vol_rn, proj_rn) proj = projector.forward(vol_rn.element(1)) p = proj.data.reshape(geom.proj_shape) print 'Proj at 0 degree: max = ', p[0, :].max() vol = projector.backward(proj_rn.element(1)) print vol.data.max() projector.clear_astra_memory()
def test_gui(self): num_iter = 8 u, p, cpd, l2_atp = ChambollePock( projections=np.ones((100, 180, 100)), projector=Projector()).least_squares( num_iterations=num_iter, L=363.569641113, verbose=True, non_negativiy_constraint=True) self.assertEqual(u.__class__.__name__, 'ndarray') self.assertEqual(cpd.size, num_iter) self.assertEqual(l2_atp.size, num_iter)
def test_odlprojector_instance(self): # Create cubic unit volume vol_rn = Rn(self.geom.vol_size) vol = np.ones(self.geom.vol_shape) vol_rn_vec = vol_rn.element(vol.ravel()) # Create projections proj_rn = Rn(self.geom.proj_size) proj = np.ones(self.geom.proj_size) proj_rn_vec = proj_rn.element(proj.ravel()) vol_norm_0 = vol_rn_vec.norm() self.assertEqual(vol_norm_0**2, np.sqrt(self.geom.vol_size)**2) proj_norm_0 = proj_rn_vec.norm() self.assertEqual(proj_norm_0**2, np.sqrt(self.geom.proj_size)**2) # ODLProjector instance projector = Projector(self.geom, vol_rn, proj_rn) proj_rn_vec = projector.forward(vol_rn_vec) proj_norm_1 = proj_rn_vec.norm() self.assertNotEqual(proj_norm_0, proj_norm_1) vol_rn_vec = projector.backward(proj_rn_vec) vol_norm_1 = vol_rn_vec.norm() self.assertNotEqual(vol_norm_0, vol_norm_1) proj_rn_vec = projector.forward(vol_rn_vec) proj_norm_2 = proj_rn_vec.norm() self.assertNotEqual(proj_norm_1, proj_norm_2) vol_rn_vec = projector.backward(proj_rn_vec) vol_norm_2 = vol_rn_vec.norm() self.assertNotEqual(vol_norm_2, vol_norm_1) projector.clear_astra_memory() print 'vol norms:', vol_norm_0, vol_norm_1, vol_norm_2 print 'proj norms', proj_norm_0, proj_norm_1, proj_norm_2
def adjoint_scaling_factor(self): """Compute scaling factor of adjoint projector. Consider A x = y, the adjoint A* of A is defined as: <A x, y>_D = <x, A* y>_I Assume A* = s B with B being the ASTRA backprojector, then: s = <A x, A x> / <B A x, x> Returns ------- :rtype: float :returns: s """ vol_rn = Rn(self.geom.vol_size) proj_rn = Rn(self.geom.proj_size) vol_rn_ones = vol_rn.element(1) proj_rn_ones = proj_rn.element(1) # projector = Projector(self.geom, vol_rn, proj_rn) projector = Projector(self.geom) proj = projector.forward(vol_rn_ones) vol = projector.backward(proj_rn_ones) # print vol.data.min(), vol.data.max() # print proj.data.min(), proj.data.max() self.adj_scal_fac = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol) # self.adj_scal_fac = proj.norm()**2 / vol_rn.inner(vol, vol_rn_ones) # return proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones) projector.clear_astra_memory()
def matrix_norm(self, iterations, vol_init=1.0, tv_norm=False, return_volume=False, intermediate_results=False): """The matrix norm || K ||_2 of 'K' defined here as largest singular value of 'K'. Employs the generic power method to obtain a scalar 's' which tends to || K ||_2 as the iterations N increase. To be implemented: optionally return volume 'x', such that it can be re-used as initializer to continue the iteration. Parameters ---------- :type iterations: int :param iterations: Number of iterations of the generic power method. :type vol_init: float | ndarray (default 1.0) :param vol_init: in I, initial image to start with. :type intermediate_results: bool :param intermediate_results: Returns list of intermediate results instead of scalar. :type return_volume: bool :param return_volume: Return volume in order to resume iteration via passing it over as initial volume. Returns ------- :rtype: float | numpy.ndarray, numpay.array (optional) :returns: s, vol s: Scalar of final iteration or numpy.ndarray containing all results during iteration. vol: Volume vector """ geom = self.geom vol = self.recon_space.element(vol_init) proj = Rn(geom.proj_size).zero() # projector = Projector(geom, vol.space, proj.space) projector = Projector(geom) # print 'projector scaling factor', projector.scal_fac tmp = None if intermediate_results: s = np.zeros(iterations) else: s = 0 # Power method loop for n in range(iterations): # step 4: x_{n+1} <- K^T K x_n if tv_norm: # K = (A, grad) instead of K = A # Compute: - div grad x_n # use sum over generator expression tmp = -reduce(add, (partial( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_width[dim]), dim, geom.voxel_width[dim]) for dim in range(geom.vol_ndim))) # x_n <- A^T (A x_n) vol = projector.backward(projector.forward(vol)) vol *= self.adj_scal_fac if tv_norm: # x_n <- x_n - div grad x_n # print 'n: {2}. vol: min = {0}, max = {1}'.format( # vol.data.min(), vol.data.max(), n) # print 'n: {2}. tv: min = {0}, max = {1}'.format(tmp.min(), # tmp.max(), n) vol.data[:] += tmp.ravel() # step 5: # x_n <- x_n/||x_n||_2 vol /= vol.norm() # step 6: # s_n <-|| K x ||_2 if intermediate_results: # proj <- A^T x_n proj = projector.forward(vol) s[n] = proj.norm() if tv_norm: s[n] = np.sqrt( s[n]**2 + reduce(add, (np.linalg.norm( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_width[dim]))**2 for dim in range(geom.vol_ndim)))) # step 6: || K x ||_2 if not intermediate_results: proj = projector.forward(vol) s = proj.norm() if tv_norm: s = np.sqrt(s**2 + reduce(add, (np.linalg.norm( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_width[dim]))**2 for dim in range(geom.vol_ndim)))) # Clear ASTRA memory projector.clear_astra_memory() # Returns if not return_volume: return s else: return s, vol.data
def least_squares(self, iterations=1, L=None, tau=None, sigma=None, theta=None, non_negativiy_constraint=False, tv_norm=False, verbose=True): """Least-squares problem with optional TV-regularisation and/or non-negativity constraint. Parameters ---------- :type iterations: int (default 1) :param iterations: Number of iterations the optimization should run for. :type L: float (defaul: None) :param L: Matrix norm of forward projector. If 'None' matrix_norm is called with 20 iterations. :type tau: float (default 1/L) :param tau: :type sigma: float (default 1/L) :param sigma: :type theta: float (default 1) :param theta: :type non_negativiy_constraint: bool (default False) :param non_negativiy_constraint: Add non-negativity constraint to optimization problem (via indicator function). :type tv_norm: bool | float (default False) :param tv_norm: Unless False, coincides with the numerical value of the parameter lambda for TV-Regularisation. :type verbose: bool (default False) :param verbose: Show intermediate reconstructions and convergence measures during iteration. Returns ------- :rtype: odl.Vector, odl.Vector, numpy.ndarray, numpy.ndarray :returns: u, p, cpd, l2_du u: vector of reconstructed volume p: vector of dual projection variable cpd: condition primal-dual gap (convergence measure) l2_du: l2-norm of constraint-induced convergence measure """ # step 1: if L is None: L = self.matrix_norm(20) if tau is None: tau = 1 / L if sigma is None: sigma = 1 / L if theta is None: theta = 1 # print 'tau:', tau # print 'sigma:', sigma # print 'theta:', theta geom = self.geom g = self.proj # domain: D # l2-norm of (volume update / tau) l2_du = np.zeros(iterations) # conditional primal-dual gap cpd = np.zeros(iterations) # step 2: initialize u and p with zeros u = self.recon_space.zero() # domain: I p = g.space.zero() # domain: D # q: spatial vector = list of ndarrays in I (not Rn vectors) if tv_norm: ndim = geom.vol_ndim # domain of q: V = [I, I, ...] q = [ np.zeros(geom.vol_shape, dtype=u.data.dtype) for _ in range(ndim) ] # step 3: ub <- u ub = u.copy() # domain: I # initialize projector # A = Projector(geom, u.space, p.space) A = Projector(geom) # visual output instance disp = DisplayIntermediates(verbose=verbose, vol=u.data.reshape(geom.vol_shape), cpd=cpd, l2_du=l2_du) # step 4: repeat for n in range(iterations): # step 5: p_{n+1} <- (p_n + sigma(A^T ub_n - g)) / (1 + sigma) if n >= 0: # with(Timer('proj:')): # # p_tmp <- A ub # p_tmp = A.forward(ub) # # p_tmp <- p_tmp - g # p_tmp -= g # # p <- p + sigma * p_tmp # p += sigma * p_tmp # p_n <- p_n + sigma(A ub -g ) tmp = A.forward(ub) # print 'p:', p.data.shape, 'Au:', tmp.data.shape, 'g:', \ # g.data.shape p += sigma * (A.forward(ub) - g) else: p -= sigma * g # p <- p / (1 + sigma) p /= 1 + sigma # TV step 6: q_{n+1} <- lambda(q_n + sigma grad ub_n) / # max(lambda 1_I, |q_n + sigma grad ub_n|) if tv_norm: for dim in range(ndim): # q_n <- q_n + sigma * grad ub_n q[dim] += sigma * partial( ub.data.reshape(self.geom.vol_shape), dim, geom.voxel_width[dim]) # |q_n|: isotropic TV # use div_q to save memory, q = [qi] where qi are ndarrays div_q = np.sqrt(reduce(add, (qi**2 for qi in q))) # max(lambda 1_I, |q_n + sigma diff ub_n|) # print 'q_mag:', div_q.min(), div_q.max() div_q[div_q < tv_norm] = tv_norm # q_n <- lambda * q_n / |q_n| for dim in range(ndim): q[dim] /= div_q q[dim] *= tv_norm # div q_{n+1} div_q = reduce(add, (partial(qi, dim, geom.voxel_width[dim]) for (dim, qi) in enumerate(q))) div_q *= tau # step 6: u_{n+1} <- u_{n} - tau * A^T p_{n+1} # TV step 7: u_{n+1} <- u_{n} - tau * A^T p_{n+1} + div q_{n+1} # ub_tmp <- A^T p ub_tmp = A.backward(p) ub_tmp *= tau ub_tmp *= self.adj_scal_fac # l2-norm per voxel of ub_tmp = A^T p l2_du[n:] = ub_tmp.norm() # / u.data.size if tv_norm: l2_du[n:] += np.linalg.norm(div_q.ravel()) # / u.data.size # store current u_n temporarily in ub_n ub = -u.copy() # u <- u - tau ub_tmp u -= ub_tmp # TV: u <- u + tau div q if tv_norm: print('{0}: u - A^T p: min = {1}, max = {2}'.format( n, u.data.min(), u.data.max())) print('{0}: div q: min = {1}, max = {2}'.format( n, div_q.min(), div_q.max())) u.data[:] += div_q.ravel() # Positivity constraint if non_negativiy_constraint: u.data[u.data < 0] = 0 # print '\nu:', u.data.min(), u.data.max() # conditional primal-dual gap for current u and p # 1/2||A u - g||_2^2 + 1/2||p||_2^2 + <p,g>_D # p_tmp <- A u # p_tmp = A.forward(u) # p_tmp -= g # cpd[n:] = (0.5 * p_tmp.norm() ** 2 + cpd[n:] = (0.5 * p.space.norm(A.forward(u) - g)**2 + 0.5 * p.norm()**2 + p.inner(g)) # / p.data.size if tv_norm: cpd[n:] += tv_norm * np.linalg.norm(reduce( add, (partial(u.data.reshape(geom.vol_shape), dim, geom.voxel_width[dim]) for dim in range(geom.vol_ndim))).ravel(), ord=1) # / u.data.size # step 7 / TV step 8: ub_{n+1} <- u_{n+1} + theta(u_{n+1} - u_n) # ub <- ub + u_{n+1}, remember ub = -u_n ub += u # ub <- theta * ub ub *= theta # ub <- ub + u_{n+1} ub += u # visual output disp.update() A.clear_astra_memory() # Should avoid window freezing disp.show() return u, p, cpd, l2_du
class ChanGolubMulletTestCase(unittest.TestCase): def setUp(self): # Timing self.start_time = time.time() # DATA self.d = ctdata.sets[14] self.d.load() # Parameters det_row_count, num_proj, det_col_count = self.d.shape num_voxel = (det_col_count, det_col_count, det_row_count) # voxel_size = 1 voxel_size = 2 * self.d.roi_cubic_width_mm / num_voxel[0] source_origin = self.d.distance_source_origin_mm / voxel_size origin_detector = self.d.distance_origin_detector_mm / voxel_size angles = self.d.angles_rad det_col_spacing = self.d.detector_width_mm / det_col_count / voxel_size det_row_spacing = det_col_spacing # PROJECTOR self.projector = Projector( num_voxel=num_voxel, det_row_count=det_row_count, det_col_count=det_col_count, source_origin=source_origin, origin_detector=origin_detector, det_row_spacing=det_row_spacing, det_col_spacing=det_col_spacing, angles=angles) # ALGORITHM self.cgm = ChanGolubMullet(projections=self.d.projections, projector=self.projector) self.u_shape = num_voxel def tearDown(self): # Timing t = time.time() - self.start_time print "%s: %.3f" % (self.id(), t) # Clear ASTRA memory self.projector.clear() def test_initialization(self): self.assertTrue(issubclass(type(self.cgm), object)) def test_g(self): g = self.cgm.g u_shape = self.cgm.K.volume_shape self.assertEqual(g.__class__.__name__, 'ndarray') self.assertEqual(np.shape(g), tuple((x - 0 for x in u_shape))) def test_f(self): f = self.cgm.f fl = list(f) # ft = tuple(f) self.assertEqual(len(fl), len(self.d.shape)) # self.assertEqual(type(f), list) def test_func_du(self): func_du = self.cgm.func_du(np.zeros(self.u_shape)) self.assertEqual(func_du.shape, self.u_shape) self.assertTrue(func_du.any() == 0)
class ChambollePockTestCase(unittest.TestCase): """Test case for primal-dual method Chambolle-Pock algorithm.""" def setUp(self): # Timing self.start_time = time.time() # DATA self.d = ctdata.sets[14] self.d.load() # Parameters det_row_count, num_proj, det_col_count = self.d.shape num_voxel = (det_col_count, det_col_count, det_row_count) voxel_size = 2 * self.d.roi_cubic_width_mm / num_voxel[0] source_origin = self.d.distance_source_origin_mm / voxel_size origin_detector = self.d.distance_origin_detector_mm / voxel_size angles = self.d.angles_rad det_col_spacing = self.d.detector_width_mm / det_col_count / voxel_size det_row_spacing = det_col_spacing # PROJECTOR self.projector = Projector( num_voxel=num_voxel, det_row_count=det_row_count, det_col_count=det_col_count, source_origin=source_origin, origin_detector=origin_detector, det_row_spacing=det_row_spacing, det_col_spacing=det_col_spacing, angles=angles) # ALGORITHM self.pc = ChambollePock(projections=self.d.projections, projector=self.projector) self.u_shape = num_voxel def tearDown(self): # Timing t = time.time() - self.start_time print "%s: %.3f" % (self.id(), t) # Clean ASTRA memory self.projector.clear() def test_projection_data(self): d = self.d.projections print 'min:', d.min(), 'max:', d.max(), 'mean:', np.mean(d) self.assertTrue(self.d.projections.min() > 0) self.assertTrue(self.d.projections.max() < np.inf) def test_initialization(self): self.assertTrue(self.d.projections.shape > 0) self.assertTrue(self.pc.K.volume_data) self.pc.K.backward() # self.assertEqual(str(self.d.dtype), 'uint16') self.assertEqual(str(self.d.dtype), 'float32') self.assertEqual(self.pc.K.volume_data.dtype.__str__(), 'float32') self.assertTrue(self.pc.K.volume_shape > 0) self.assertTrue(issubclass(type(self.pc), object)) self.assertEqual(self.pc.K, self.projector) def test_matrix_norm(self): # Start computation of matrix num_iter = 2 mat_norm_list = self.pc.matrix_norm( num_iter, vol_init=1, intermediate_results=True) self.assertEqual(np.size(mat_norm_list), num_iter) # Continue iteration of matrix, starting from the above results mat_norm = self.pc.matrix_norm(3, continue_iteration=True) self.assertEqual(np.size(mat_norm), 1) self.assertTrue(mat_norm > 0) self.assertNotEqual(mat_norm_list[-1], mat_norm) mat_norm = self.pc.matrix_norm(20, vol_init=1, intermediate_results=True) self.pc.K.clear() print mat_norm def test_least_squares(self): num_iter = 10 u, p, cpd, l2_atp = self.pc.least_squares( num_iterations=num_iter, L=363.569641113, verbose=True, non_negativiy_constraint=False) self.assertEqual(u.__class__.__name__, 'ndarray') self.assertEqual(cpd.size, num_iter) self.assertEqual(l2_atp.size, num_iter) self.pc.K.clear() def test_least_squares_with_non_negativity_constraint(self): num_iter = 4 u, p, cpd, l2_atp = self.pc.least_squares( num_iterations=num_iter, L=363.569641113, verbose=True, non_negativiy_constraint=True) self.assertEqual(u.__class__.__name__, 'ndarray') self.assertEqual(cpd.size, num_iter) self.assertEqual(l2_atp.size, num_iter) self.pc.K.clear()
def __init__(self, projections=np.array([]), projector=Projector()): self.y = projections.astype('float32', copy=False) self.K = projector
# Parameters num_iter = 50 det_row_count, num_proj, det_col_count = d.shape num_voxel = (det_col_count, det_col_count, det_row_count) # voxel_size = 1 voxel_size = 2 * d.roi_cubic_width_mm / num_voxel[0] source_origin = d.distance_source_origin_mm / voxel_size origin_detector = d.distance_origin_detector_mm / voxel_size angles = d.angles_rad det_col_spacing = d.detector_width_mm / det_col_count / voxel_size det_row_spacing = det_col_spacing # Projector instance p = Projector(num_voxel=num_voxel, det_row_count=det_row_count, det_col_count=det_col_count, source_origin=source_origin, origin_detector=origin_detector, det_row_spacing=det_row_spacing, det_col_spacing=det_col_spacing, angles=angles) # Create row sums of system matrix p.set_volume_data(1) p.forward() # rs = p.projection_data.copy() row_sum = p.projection_data row_sum[row_sum > 0] = 1.0 / row_sum[row_sum > 0] # Create colum sums of system matrix p.set_projection_data(1) p.backward() # cs = p.volume_data.copy() col_sum = p.volume_data