def test_ndim(self): vshape = (88, 77) vsize = np.prod(vshape) cols = 99 angles = np.linspace(0, 2 * np.pi, 111, endpoint=False) psize = cols * np.size(angles) geom = Geometry(geometry_type='parallel', scale_factor=1, volume_shape=vshape, det_col_count=cols, det_row_count=1, angles=angles) print 'Vol size: ', vsize print 'Proj size:', psize print 'Voxel size:', self.geom.voxel_size vol_rn = Rn(vsize) proj_rn = Rn(psize) projector = Projector(geom, vol_rn, proj_rn) proj = projector.forward(vol_rn.element(1)) p = proj.data.reshape(geom.proj_shape) print 'Proj at 0 degree: max = ', p[0, :].max() vol = projector.backward(proj_rn.element(1)) print vol.data.max() projector.clear_astra_memory()
def adjoint_scaling_factor(self): """Compute scaling factor of adjoint projector. Consider A x = y, the adjoint A* of A is defined as: <A x, y>_D = <x, A* y>_I Assume A* = s B with B being the ASTRA backprojector, then: s = <A x, A x> / <B A x, x> Returns ------- :rtype: float :returns: s """ vol_rn = Rn(self.geom.vol_size) proj_rn = Rn(self.geom.proj_size) vol_rn_ones = vol_rn.element(1) proj_rn_ones = proj_rn.element(1) projector = ODLProjector(self.geom, vol_rn, proj_rn) proj = projector.forward(vol_rn_ones) vol = projector.backward(proj_rn_ones) # print vol.data.min(), vol.data.max() # print proj.data.min(), proj.data.max() self.adj_scal_fac = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol) # self.adj_scal_fac = proj.norm()**2 / vol_rn.inner(vol, vol_rn_ones) # return proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones) projector.clear_astra_memory()
def test_adjoint_scaling_factor(self): """Test if back-projector A^* is adjoint of forward projector A: <A x, y>_D = <x,A^* y>_I . Define scaling factor as A^* = s B where is the implemented back-projector. Thus, s = <A x, y>_D / <x,B y>_I , or using y = A x s = <A x, A x>_D / <x,B A x>_I . """ geom = Geometry(2) # x = ones() and y = A x vol_rn = Rn(geom.vol_size) vol_rn_ones = vol_rn.element(1) proj_rn = Rn(geom.proj_size) projector = ODLProjector(geom, vol_rn, proj_rn) proj = projector.forward(vol_rn_ones) vol = projector.backward(proj) s0 = proj.norm() ** 2 / vol_rn._inner(vol, vol_rn_ones) # x = ones(), y = ones() vol_rn = Rn(geom.vol_size) vol_rn_ones = vol_rn.element(1) proj_rn = Rn(geom.proj_size) proj_rn_ones = proj_rn.element(1) projector = ODLProjector(geom, vol_rn, proj_rn) proj = projector.forward(vol_rn_ones) vol = projector.backward(proj_rn_ones) s1 = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol) # implemented function proj_vec = Rn(geom.proj_size).element(1) cp = ODLChambollePock(geom, proj_vec) cp.adjoint_scaling_factor() s2 = cp.adj_scal_fac self.assertEqual(s1, s2) print('Scaling factors:', s0, s1, s2) projector.clear_astra_memory()
def test_adjoint_scaling(self): vol_rn = Rn(self.geom.vol_size) vol_rn_ones = vol_rn.element(1) proj_rn = Rn(self.geom.proj_size) proj_rn_ones = proj_rn.element(1) projector = Projector(self.geom, vol_rn, proj_rn) proj1 = projector.forward(vol_rn_ones) vol1 = projector.backward(proj_rn_ones) n1 = proj1.inner(proj_rn_ones) n2 = vol_rn_ones.inner(vol1) print('<A x, y> = <x, Ad y> : {0} = {1}'.format(n1, n2)) print('<A x, y> / <x, Ad y> - 1 = {0}'.format(n1/n2 - 1)) proj = projector.forward(vol_rn_ones) vol = projector.backward(proj) alpha = proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones) print alpha projector.clear_astra_memory()
def test_adjoint_scaling(self): vol_rn = Rn(self.geom.vol_size) vol_rn_ones = vol_rn.element(1) proj_rn = Rn(self.geom.proj_size) proj_rn_ones = proj_rn.element(1) projector = Projector(self.geom, vol_rn, proj_rn) proj1 = projector.forward(vol_rn_ones) vol1 = projector.backward(proj_rn_ones) n1 = proj1.inner(proj_rn_ones) n2 = vol_rn_ones.inner(vol1) print('<A x, y> = <x, Ad y> : {0} = {1}'.format(n1, n2)) print('<A x, y> / <x, Ad y> - 1 = {0}'.format(n1 / n2 - 1)) proj = projector.forward(vol_rn_ones) vol = projector.backward(proj) alpha = proj.norm()**2 / vol_rn._inner(vol, vol_rn_ones) print alpha projector.clear_astra_memory()
def test_adjoint_scaling_factor(self): # x vol_rn = Rn(self.geom.vol_size) vol_rn_ones = vol_rn.element(1) # y proj_rn = Rn(self.geom.proj_size) proj_rn_ones = proj_rn.element(1) # A projector = ODLProjector(self.geom, vol_rn, proj_rn) # A x proj = projector.forward(vol_rn_ones) # A^* y vol = projector.backward(proj_rn_ones) # scaling factor for x[:] = 1 and y[:] = 1 s0 = proj.inner(proj_rn_ones) / vol_rn_ones.inner(vol) # A^* A x volp = projector.backward(proj) # scaling factor for x[:] = 1 and y = A x s1 = proj.norm() ** 2 / vol_rn._inner(volp, vol_rn_ones) cp = self.cp_class(self.geom, self.proj_vec) self.assertEqual(cp.adj_scal_fac, 1) cp.adjoint_scaling_factor() s2 = cp.adj_scal_fac self.assertFalse(s2 == 1) self.assertEqual(s0, s2) print ('Test adjoint') print (' Scaling factor for backprojector', s0, s1, s2) projector.clear_astra_memory()
def test_odlprojector_instance(self): # Create cubic unit volume vol_rn = Rn(self.geom.vol_size) vol = np.ones(self.geom.vol_shape) vol_rn_vec = vol_rn.element(vol.ravel()) # Create projections proj_rn = Rn(self.geom.proj_size) proj = np.ones(self.geom.proj_size) proj_rn_vec = proj_rn.element(proj.ravel()) vol_norm_0 = vol_rn_vec.norm() self.assertEqual(vol_norm_0**2, np.sqrt(self.geom.vol_size)**2) proj_norm_0 = proj_rn_vec.norm() self.assertEqual(proj_norm_0**2, np.sqrt(self.geom.proj_size)**2) # ODLProjector instance projector = Projector(self.geom, vol_rn, proj_rn) proj_rn_vec = projector.forward(vol_rn_vec) proj_norm_1 = proj_rn_vec.norm() self.assertNotEqual(proj_norm_0, proj_norm_1) vol_rn_vec = projector.backward(proj_rn_vec) vol_norm_1 = vol_rn_vec.norm() self.assertNotEqual(vol_norm_0, vol_norm_1) proj_rn_vec = projector.forward(vol_rn_vec) proj_norm_2 = proj_rn_vec.norm() self.assertNotEqual(proj_norm_1, proj_norm_2) vol_rn_vec = projector.backward(proj_rn_vec) vol_norm_2 = vol_rn_vec.norm() self.assertNotEqual(vol_norm_2, vol_norm_1) projector.clear_astra_memory() print 'vol norms:', vol_norm_0, vol_norm_1, vol_norm_2 print 'proj norms', proj_norm_0, proj_norm_1, proj_norm_2
def matrix_norm(self, iterations, vol_init=1.0, tv_norm=False, return_volume=False, intermediate_results=False): """The matrix norm || K ||_2 of 'K' defined here as largest singular value of 'K'. Employs the generic power method to obtain a scalar 's' which tends to || K ||_2 as the iterations N increase. To be implemented: optionally return volume 'x', such that it can be re-used as initializer to continue the iteration. Parameters ---------- :type iterations: int :param iterations: Number of iterations of the generic power method. :type vol_init: float | ndarray (default 1.0) :param vol_init: in I, initial image to start with. :type intermediate_results: bool :param intermediate_results: Returns list of intermediate results instead of scalar. :type return_volume: bool :param return_volume: Return volume in order to resume iteration via passing it over as initial volume. Returns ------- :rtype: float | numpy.ndarray, numpay.array (optional) :returns: s, vol s: Scalar of final iteration or numpy.ndarray containing all results during iteration. vol: Volume vector """ geom = self.geom vol = self.recon_space.element(vol_init) proj = Rn(geom.proj_size).zero() projector = ODLProjector(geom, vol.space, proj.space) # print 'projector scaling factor', projector.scal_fac tmp = None if intermediate_results: s = np.zeros(iterations) else: s = 0 # Power method loop for n in range(iterations): # step 4: x_{n+1} <- K^T K x_n if tv_norm: # K = (A, grad) instead of K = A # Compute: - div grad x_n # use sum over generator expression tmp = -reduce(add, (partial( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_size[dim]), dim, geom.voxel_size[dim]) for dim in range(geom.vol_ndim))) # x_n <- A^T (A x_n) vol = projector.backward(projector.forward(vol)) vol *= self.adj_scal_fac if tv_norm: # x_n <- x_n - div grad x_n # print 'n: {2}. vol: min = {0}, max = {1}'.format( # vol.data.min(), vol.data.max(), n) # print 'n: {2}. tv: min = {0}, max = {1}'.format(tmp.min(), # tmp.max(), n) vol.data[:] += tmp.ravel() # step 5: # x_n <- x_n/||x_n||_2 vol /= vol.norm() # step 6: # s_n <-|| K x ||_2 if intermediate_results: # proj <- A^T x_n proj = projector.forward(vol) s[n] = proj.norm() if tv_norm: s[n] = np.sqrt( s[n]**2 + reduce(add, (np.linalg.norm( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_size[dim]))**2 for dim in range(geom.vol_ndim)))) # step 6: || K x ||_2 if not intermediate_results: proj = projector.forward(vol) s = proj.norm() if tv_norm: s = np.sqrt(s**2 + reduce(add, (np.linalg.norm( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_size[dim]))**2 for dim in range(geom.vol_ndim)))) # Clear ASTRA memory projector.clear_astra_memory() # Returns if not return_volume: return s else: return s, vol.data
def least_squares(self, iterations=1, L=None, tau=None, sigma=None, theta=None, non_negativiy_constraint=False, tv_norm=False, verbose=True): """Least-squares problem with optional TV-regularisation and/or non-negativity constraint. Parameters ---------- :type iterations: int (default 1) :param iterations: Number of iterations the optimization should run for. :type L: float (defaul: None) :param L: Matrix norm of forward projector. If 'None' matrix_norm is called with 20 iterations. :type tau: float (default 1/L) :param tau: :type sigma: float (default 1/L) :param sigma: :type theta: float (default 1) :param theta: :type non_negativiy_constraint: bool (default False) :param non_negativiy_constraint: Add non-negativity constraint to optimization problem (via indicator function). :type tv_norm: bool | float (default False) :param tv_norm: Unless False, coincides with the numerical value of the parameter lambda for TV-Regularisation. :type verbose: bool (default False) :param verbose: Show intermediate reconstructions and convergence measures during iteration. Returns ------- :rtype: odl.Vector, odl.Vector, numpy.ndarray, numpy.ndarray :returns: u, p, cpd, l2_du u: vector of reconstructed volume p: vector of dual projection variable cpd: condition primal-dual gap (convergence measure) l2_du: l2-norm of constraint-induced convergence measure """ # step 1: if L is None: L = self.matrix_norm(20) if tau is None: tau = 1 / L if sigma is None: sigma = 1 / L if theta is None: theta = 1 # print 'tau:', tau # print 'sigma:', sigma # print 'theta:', theta geom = self.geom g = self.proj # domain: D # l2-norm of (volume update / tau) l2_du = np.zeros(iterations) # conditional primal-dual gap cpd = np.zeros(iterations) # step 2: initialize u and p with zeros u = self.recon_space.zero() # domain: I p = g.space.zero() # domain: D # q: spatial vector = list of ndarrays in I (not Rn vectors) if tv_norm: ndim = geom.vol_ndim # domain of q: V = [I, I, ...] q = [ np.zeros(geom.vol_shape, dtype=u.data.dtype) for _ in range(ndim) ] # step 3: ub <- u ub = u.copy() # domain: I # initialize projector A = ODLProjector(geom, u.space, p.space) # visual output instance disp = DisplayIntermediates(verbose=verbose, vol=u.data.reshape(geom.vol_shape), cpd=cpd, l2_du=l2_du) # step 4: repeat for n in range(iterations): # step 5: p_{n+1} <- (p_n + sigma(A^T ub_n - g)) / (1 + sigma) if n >= 0: # with(Timer('proj:')): # # p_tmp <- A ub # p_tmp = A.forward(ub) # # p_tmp <- p_tmp - g # p_tmp -= g # # p <- p + sigma * p_tmp # p += sigma * p_tmp # p_n <- p_n + sigma(A ub -g ) tmp = A.forward(ub) # print 'p:', p.data.shape, 'Au:', tmp.data.shape, 'g:', \ # g.data.shape p += sigma * (A.forward(ub) - g) else: p -= sigma * g # p <- p / (1 + sigma) p /= 1 + sigma # TV step 6: q_{n+1} <- lambda(q_n + sigma grad ub_n) / # max(lambda 1_I, |q_n + sigma grad ub_n|) if tv_norm: for dim in range(ndim): # q_n <- q_n + sigma * grad ub_n q[dim] += sigma * partial( ub.data.reshape(self.geom.vol_shape), dim, geom.voxel_size[dim]) # |q_n|: isotropic TV # use div_q to save memory, q = [qi] where qi are ndarrays div_q = np.sqrt(reduce(add, (qi**2 for qi in q))) # max(lambda 1_I, |q_n + sigma diff ub_n|) # print 'q_mag:', div_q.min(), div_q.max() div_q[div_q < tv_norm] = tv_norm # q_n <- lambda * q_n / |q_n| for dim in range(ndim): q[dim] /= div_q q[dim] *= tv_norm # div q_{n+1} div_q = reduce(add, (partial(qi, dim, geom.voxel_size[dim]) for (dim, qi) in enumerate(q))) div_q *= tau # step 6: u_{n+1} <- u_{n} - tau * A^T p_{n+1} # TV step 7: u_{n+1} <- u_{n} - tau * A^T p_{n+1} + div q_{n+1} # ub_tmp <- A^T p ub_tmp = A.backward(p) ub_tmp *= tau ub_tmp *= self.adj_scal_fac # l2-norm per voxel of ub_tmp = A^T p l2_du[n:] = ub_tmp.norm() # / u.data.size if tv_norm: l2_du[n:] += np.linalg.norm(div_q.ravel()) # / u.data.size # store current u_n temporarily in ub_n ub = -u.copy() # u <- u - tau ub_tmp u -= ub_tmp # TV: u <- u + tau div q if tv_norm: print('{0}: u - A^T p: min = {1}, max = {2}'.format( n, u.data.min(), u.data.max())) print('{0}: div q: min = {1}, max = {2}'.format( n, div_q.min(), div_q.max())) u.data[:] += div_q.ravel() # Positivity constraint if non_negativiy_constraint: u.data[u.data < 0] = 0 # print '\nu:', u.data.min(), u.data.max() # conditional primal-dual gap for current u and p # 1/2||A u - g||_2^2 + 1/2||p||_2^2 + <p,g>_D # p_tmp <- A u # p_tmp = A.forward(u) # p_tmp -= g # cpd[n:] = (0.5 * p_tmp.norm() ** 2 + cpd[n:] = (0.5 * p.space.norm(A.forward(u) - g)**2 + 0.5 * p.norm()**2 + p.inner(g)) # / p.data.size if tv_norm: cpd[n:] += tv_norm * np.linalg.norm(reduce( add, (partial(u.data.reshape(geom.vol_shape), dim, geom.voxel_size[dim]) for dim in range(geom.vol_ndim))).ravel(), ord=1) # / u.data.size # step 7 / TV step 8: ub_{n+1} <- u_{n+1} + theta(u_{n+1} - u_n) # ub <- ub + u_{n+1}, remember ub = -u_n ub += u # ub <- theta * ub ub *= theta # ub <- ub + u_{n+1} ub += u # visual output disp.update() A.clear_astra_memory() # Should avoid window freezing disp.show() return u, p, cpd, l2_du
def matrix_norm(self, iterations, vol_init=1.0, tv_norm=False, return_volume=False, intermediate_results=False): """The matrix norm || K ||_2 of 'K' defined here as largest singular value of 'K'. Employs the generic power method to obtain a scalar 's' which tends to || K ||_2 as the iterations N increase. To be implemented: optionally return volume 'x', such that it can be re-used as initializer to continue the iteration. Parameters ---------- :type iterations: int :param iterations: Number of iterations of the generic power method. :type vol_init: float | ndarray (default 1.0) :param vol_init: in I, initial image to start with. :type intermediate_results: bool :param intermediate_results: Returns list of intermediate results instead of scalar. :type return_volume: bool :param return_volume: Return volume in order to resume iteration via passing it over as initial volume. Returns ------- :rtype: float | numpy.ndarray, numpay.array (optional) :returns: s, vol s: Scalar of final iteration or numpy.ndarray containing all results during iteration. vol: Volume vector """ geom = self.geom vol = self.recon_space.element(vol_init) proj = Rn(geom.proj_size).zero() projector = ODLProjector(geom, vol.space, proj.space) # print 'projector scaling factor', projector.scal_fac tmp = None if intermediate_results: s = np.zeros(iterations) else: s = 0 # Power method loop for n in range(iterations): # step 4: x_{n+1} <- K^T K x_n if tv_norm: # K = (A, grad) instead of K = A # Compute: - div grad x_n # use sum over generator expression tmp = -reduce(add, (partial( partial(vol.data.reshape(geom.vol_shape), dim, geom.voxel_size[dim]), dim, geom.voxel_size[dim]) for dim in range(geom.vol_ndim))) # x_n <- A^T (A x_n) vol = projector.backward(projector.forward(vol)) vol *= self.adj_scal_fac if tv_norm: # x_n <- x_n - div grad x_n # print 'n: {2}. vol: min = {0}, max = {1}'.format( # vol.data.min(), vol.data.max(), n) # print 'n: {2}. tv: min = {0}, max = {1}'.format(tmp.min(), # tmp.max(), n) vol.data[:] += tmp.ravel() # step 5: # x_n <- x_n/||x_n||_2 vol /= vol.norm() # step 6: # s_n <-|| K x ||_2 if intermediate_results: # proj <- A^T x_n proj = projector.forward(vol) s[n] = proj.norm() if tv_norm: s[n] = np.sqrt(s[n] ** 2 + reduce(add, (np.linalg.norm( partial(vol.data.reshape( geom.vol_shape), dim, geom.voxel_size[dim])) ** 2 for dim in range(geom.vol_ndim)))) # step 6: || K x ||_2 if not intermediate_results: proj = projector.forward(vol) s = proj.norm() if tv_norm: s = np.sqrt(s ** 2 + reduce(add, (np.linalg.norm(partial( vol.data.reshape( geom.vol_shape), dim, geom.voxel_size[dim])) ** 2 for dim in range(geom.vol_ndim)))) # Clear ASTRA memory projector.clear_astra_memory() # Returns if not return_volume: return s else: return s, vol.data
def least_squares(self, iterations=1, L=None, tau=None, sigma=None, theta=None, non_negativiy_constraint=False, tv_norm=False, verbose=True): """Least-squares problem with optional TV-regularisation and/or non-negativity constraint. Parameters ---------- :type iterations: int (default 1) :param iterations: Number of iterations the optimization should run for. :type L: float (defaul: None) :param L: Matrix norm of forward projector. If 'None' matrix_norm is called with 20 iterations. :type tau: float (default 1/L) :param tau: :type sigma: float (default 1/L) :param sigma: :type theta: float (default 1) :param theta: :type non_negativiy_constraint: bool (default False) :param non_negativiy_constraint: Add non-negativity constraint to optimization problem (via indicator function). :type tv_norm: bool | float (default False) :param tv_norm: Unless False, coincides with the numerical value of the parameter lambda for TV-Regularisation. :type verbose: bool (default False) :param verbose: Show intermediate reconstructions and convergence measures during iteration. Returns ------- :rtype: odl.Vector, odl.Vector, numpy.ndarray, numpy.ndarray :returns: u, p, cpd, l2_du u: vector of reconstructed volume p: vector of dual projection variable cpd: condition primal-dual gap (convergence measure) l2_du: l2-norm of constraint-induced convergence measure """ # step 1: if L is None: L = self.matrix_norm(20) if tau is None: tau = 1 / L if sigma is None: sigma = 1 / L if theta is None: theta = 1 # print 'tau:', tau # print 'sigma:', sigma # print 'theta:', theta geom = self.geom g = self.proj # domain: D # l2-norm of (volume update / tau) l2_du = np.zeros(iterations) # conditional primal-dual gap cpd = np.zeros(iterations) # step 2: initialize u and p with zeros u = self.recon_space.zero() # domain: I p = g.space.zero() # domain: D # q: spatial vector = list of ndarrays in I (not Rn vectors) if tv_norm: ndim = geom.vol_ndim # domain of q: V = [I, I, ...] q = [np.zeros(geom.vol_shape, dtype=u.data.dtype) for _ in range( ndim)] # step 3: ub <- u ub = u.copy() # domain: I # initialize projector A = ODLProjector(geom, u.space, p.space) # visual output instance disp = DisplayIntermediates(verbose=verbose, vol=u.data.reshape( geom.vol_shape), cpd=cpd, l2_du=l2_du) # step 4: repeat for n in range(iterations): # step 5: p_{n+1} <- (p_n + sigma(A^T ub_n - g)) / (1 + sigma) if n >= 0: # with(Timer('proj:')): # # p_tmp <- A ub # p_tmp = A.forward(ub) # # p_tmp <- p_tmp - g # p_tmp -= g # # p <- p + sigma * p_tmp # p += sigma * p_tmp # p_n <- p_n + sigma(A ub -g ) tmp = A.forward(ub) # print 'p:', p.data.shape, 'Au:', tmp.data.shape, 'g:', \ # g.data.shape p += sigma * (A.forward(ub) - g) else: p -= sigma * g # p <- p / (1 + sigma) p /= 1 + sigma # TV step 6: q_{n+1} <- lambda(q_n + sigma grad ub_n) / # max(lambda 1_I, |q_n + sigma grad ub_n|) if tv_norm: for dim in range(ndim): # q_n <- q_n + sigma * grad ub_n q[dim] += sigma * partial(ub.data.reshape( self.geom.vol_shape), dim, geom.voxel_size[dim]) # |q_n|: isotropic TV # use div_q to save memory, q = [qi] where qi are ndarrays div_q = np.sqrt(reduce(add, (qi ** 2 for qi in q))) # max(lambda 1_I, |q_n + sigma diff ub_n|) # print 'q_mag:', div_q.min(), div_q.max() div_q[div_q < tv_norm] = tv_norm # q_n <- lambda * q_n / |q_n| for dim in range(ndim): q[dim] /= div_q q[dim] *= tv_norm # div q_{n+1} div_q = reduce(add, (partial(qi, dim, geom.voxel_size[dim]) for (dim, qi) in enumerate(q))) div_q *= tau # step 6: u_{n+1} <- u_{n} - tau * A^T p_{n+1} # TV step 7: u_{n+1} <- u_{n} - tau * A^T p_{n+1} + div q_{n+1} # ub_tmp <- A^T p ub_tmp = A.backward(p) ub_tmp *= tau ub_tmp *= self.adj_scal_fac # l2-norm per voxel of ub_tmp = A^T p l2_du[n:] = ub_tmp.norm() # / u.data.size if tv_norm: l2_du[n:] += np.linalg.norm(div_q.ravel()) # / u.data.size # store current u_n temporarily in ub_n ub = -u.copy() # u <- u - tau ub_tmp u -= ub_tmp # TV: u <- u + tau div q if tv_norm: print('{0}: u - A^T p: min = {1}, max = {2}'.format( n, u.data.min(), u.data.max())) print('{0}: div q: min = {1}, max = {2}'.format( n, div_q.min(), div_q.max())) u.data[:] += div_q.ravel() # Positivity constraint if non_negativiy_constraint: u.data[u.data < 0] = 0 # print '\nu:', u.data.min(), u.data.max() # conditional primal-dual gap for current u and p # 1/2||A u - g||_2^2 + 1/2||p||_2^2 + <p,g>_D # p_tmp <- A u # p_tmp = A.forward(u) # p_tmp -= g # cpd[n:] = (0.5 * p_tmp.norm() ** 2 + cpd[n:] = (0.5 * p.space.norm(A.forward(u) - g) ** 2 + 0.5 * p.norm() ** 2 + p.inner(g)) # / p.data.size if tv_norm: cpd[n:] += tv_norm * np.linalg.norm( reduce(add, (partial(u.data.reshape(geom.vol_shape), dim, geom.voxel_size[dim]) for dim in range(geom.vol_ndim)) ).ravel(), ord=1) # / u.data.size # step 7 / TV step 8: ub_{n+1} <- u_{n+1} + theta(u_{n+1} - u_n) # ub <- ub + u_{n+1}, remember ub = -u_n ub += u # ub <- theta * ub ub *= theta # ub <- ub + u_{n+1} ub += u # visual output disp.update() A.clear_astra_memory() # Should avoid window freezing disp.show() return u, p, cpd, l2_du