def test_IndicatorBox(self): ig = ImageGeometry(10, 10) im = ig.allocate(-1) ib = IndicatorBox(lower=0) a = ib(im) numpy.testing.assert_equal(a, numpy.inf) ib = IndicatorBox(lower=-2) a = ib(im) numpy.testing.assert_array_equal(0, a) ib = IndicatorBox(lower=-5, upper=-2) a = ib(im) numpy.testing.assert_equal(a, numpy.inf)
# Create operators op1 = Gradient(ig) op2 = Aop # Create BlockOperator operator = BlockOperator(op1, op2, shape=(2, 1)) # Compute operator Norm normK = operator.norm() # Create functions if noise == 'poisson': alpha = 20 f2 = KullbackLeibler(noisy_data) g = IndicatorBox(lower=0) sigma = 1 tau = 1 / (sigma * normK**2) elif noise == 'gaussian': alpha = 200 f2 = 0.5 * L2NormSquared(b=noisy_data) g = ZeroFunction() sigma = 10 tau = 1 / (sigma * normK**2) f1 = alpha * L2NormSquared() f = BlockFunction(f1, f2) # Setup and run the PDHG algorithm pdhg = PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma)
back_proj = Aop.adjoint(noisy_data) # Define Least Squares f = FunctionOperatorComposition(L2NormSquared(b=noisy_data), Aop) # Allocate solution x_init = ig.allocate() # Run FISTA for least squares fista = FISTA(x_init=x_init, f=f, g=ZeroFunction()) fista.max_iteration = 10 fista.update_objective_interval = 2 fista.run(100, verbose=True) # Run FISTA for least squares with lower/upper bound fista0 = FISTA(x_init=x_init, f=f, g=IndicatorBox(lower=0, upper=1)) fista0.max_iteration = 10 fista0.update_objective_interval = 2 fista0.run(100, verbose=True) # Run FISTA for Regularised least squares, with Squared norm of Gradient alpha = 20 Grad = Gradient(ig) block_op = BlockOperator(Aop, alpha * Grad, shape=(2, 1)) block_data = BlockDataContainer(noisy_data, Grad.range_geometry().allocate()) f1 = FunctionOperatorComposition(L2NormSquared(b=block_data), block_op) fista1 = FISTA(x_init=x_init, f=f1, g=IndicatorBox(lower=0, upper=1)) fista1.max_iteration = 2000 fista1.update_objective_interval = 200 fista1.run(2000, verbose=True)
op22 = SymmetrizedGradient(op11.domain_geometry()) op21 = ZeroOperator(ig, op22.range_geometry()) op31 = Aop op32 = ZeroOperator(op22.domain_geometry(), ag) operator = BlockOperator(op11, -1 * op12, op21, op22, op31, op32, shape=(3, 2)) normK = operator.norm() # Create functions if noise == 'poisson': alpha = 2 beta = 3 f3 = KullbackLeibler(noisy_data) g = BlockFunction(IndicatorBox(lower=0), ZeroFunction()) # Primal & dual stepsizes sigma = 1 tau = 1 / (sigma * normK**2) elif noise == 'gaussian': alpha = 20 beta = 50 f3 = 0.5 * L2NormSquared(b=noisy_data) g = BlockFunction(ZeroFunction(), ZeroFunction()) # Primal & dual stepsizes sigma = 10 tau = 1 / (sigma * normK**2)
def main(): ########################################################################### # Parse input files ########################################################################### if trans_pattern is None: raise AssertionError("--trans missing") if sino_pattern is None: raise AssertionError("--sino missing") trans_files = sorted(glob(trans_pattern)) sino_files = sorted(glob(sino_pattern)) attn_files = sorted(glob(attn_pattern)) rand_files = sorted(glob(rand_pattern)) num_ms = len(sino_files) # Check some sinograms found if num_ms == 0: raise AssertionError("No sinograms found!") # Should have as many trans as sinos if num_ms != len(trans_files): raise AssertionError("#trans should match #sinos. " "#sinos = " + str(num_ms) + ", #trans = " + str(len(trans_files))) # If any rand, check num == num_ms if len(rand_files) > 0 and len(rand_files) != num_ms: raise AssertionError("#rand should match #sinos. " "#sinos = " + str(num_ms) + ", #rand = " + str(len(rand_files))) # For attn, there should be 0, 1 or num_ms images if len(attn_files) > 1 and len(attn_files) != num_ms: raise AssertionError("#attn should be 0, 1 or #sinos") ########################################################################### # Read input ########################################################################### if trans_type == "tm": trans = [reg.AffineTransformation(file) for file in trans_files] elif trans_type == "disp": trans = [ reg.NiftiImageData3DDisplacement(file) for file in trans_files ] elif trans_type == "def": trans = [reg.NiftiImageData3DDeformation(file) for file in trans_files] else: raise error("Unknown transformation type") sinos_raw = [pet.AcquisitionData(file) for file in sino_files] attns = [pet.ImageData(file) for file in attn_files] rands = [pet.AcquisitionData(file) for file in rand_files] # Loop over all sinograms sinos = [0] * num_ms for ind in range(num_ms): # If any sinograms contain negative values # (shouldn't be the case), set them to 0 sino_arr = sinos_raw[ind].as_array() if (sino_arr < 0).any(): print("Input sinogram " + str(ind) + " contains -ve elements. Setting to 0...") sinos[ind] = sinos_raw[ind].clone() sino_arr[sino_arr < 0] = 0 sinos[ind].fill(sino_arr) else: sinos[ind] = sinos_raw[ind] # If rebinning is desired segs_to_combine = 1 if args['--numSegsToCombine']: segs_to_combine = int(args['--numSegsToCombine']) views_to_combine = 1 if args['--numViewsToCombine']: views_to_combine = int(args['--numViewsToCombine']) if segs_to_combine * views_to_combine > 1: sinos[ind] = sinos[ind].rebin(segs_to_combine, views_to_combine) # only print first time if ind == 0: print(f"Rebinned sino dimensions: {sinos[ind].dimensions()}") ########################################################################### # Initialise recon image ########################################################################### if initial_estimate: image = pet.ImageData(initial_estimate) else: # Create image based on ProjData image = sinos[0].create_uniform_image(0.0, (nxny, nxny)) # If using GPU, need to make sure that image is right size. if use_gpu: dim = (127, 320, 320) spacing = (2.03125, 2.08626, 2.08626) # elif non-default spacing desired elif args['--dxdy']: dim = image.dimensions() dxdy = float(args['--dxdy']) spacing = (image.voxel_sizes()[0], dxdy, dxdy) if use_gpu or args['--dxdy']: image.initialise(dim=dim, vsize=spacing) image.fill(0.0) ########################################################################### # Set up resamplers ########################################################################### resamplers = [get_resampler(image, trans=tran) for tran in trans] ########################################################################### # Resample attenuation images (if necessary) ########################################################################### resampled_attns = None if len(attns) > 0: resampled_attns = [0] * num_ms # if using GPU, dimensions of attn and recon images have to match ref = image if use_gpu else None for i in range(len(attns)): # if we only have 1 attn image, then we need to resample into # space of each gate. However, if we have num_ms attn images, then # assume they are already in the correct position, so use None as # transformation. tran = trans[i] if len(attns) == 1 else None # If only 1 attn image, then resample that. If we have num_ms attn # images, then use each attn image of each frame. attn = attns[0] if len(attns) == 1 else attns[i] resam = get_resampler(attn, ref=ref, trans=tran) resampled_attns[i] = resam.forward(attn) ########################################################################### # Set up acquisition models ########################################################################### print("Setting up acquisition models...") if not use_gpu: acq_models = num_ms * [pet.AcquisitionModelUsingRayTracingMatrix()] else: acq_models = num_ms * [pet.AcquisitionModelUsingNiftyPET()] for acq_model in acq_models: acq_model.set_use_truncation(True) acq_model.set_cuda_verbosity(verbosity) # If present, create ASM from ECAT8 normalisation data asm_norm = None if norm_file: asm_norm = pet.AcquisitionSensitivityModel(norm_file) # Loop over each motion state for ind in range(num_ms): # Create attn ASM if necessary asm_attn = None if resampled_attns: asm_attn = get_asm_attn(sinos[ind], resampled_attns[i], acq_models[ind]) # Get ASM dependent on attn and/or norm asm = None if asm_norm and asm_attn: if ind == 0: print("ASM contains norm and attenuation...") asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn) elif asm_norm: if ind == 0: print("ASM contains norm...") asm = asm_norm elif asm_attn: if ind == 0: print("ASM contains attenuation...") asm = asm_attn if asm: acq_models[ind].set_acquisition_sensitivity(asm) if len(rands) > 0: acq_models[ind].set_background_term(rands[ind]) # Set up acq_models[ind].set_up(sinos[ind], image) ########################################################################### # Set up reconstructor ########################################################################### print("Setting up reconstructor...") # Create composition operators containing acquisition models and resamplers C = [ CompositionOperator(am, res, preallocate=True) for am, res in zip(*(acq_models, resamplers)) ] # Configure the PDHG algorithm if args['--normK'] and not args['--onlyNormK']: normK = float(args['--normK']) else: kl = [KullbackLeibler(b=sino, eta=(sino * 0 + 1e-5)) for sino in sinos] f = BlockFunction(*kl) K = BlockOperator(*C) # Calculate normK print("Calculating norm of the block operator...") normK = K.norm(iterations=10) print("Norm of the BlockOperator ", normK) if args['--onlyNormK']: exit(0) # Optionally rescale sinograms and BlockOperator using normK scale_factor = 1. / normK if args['--normaliseDataAndBlock'] else 1.0 kl = [ KullbackLeibler(b=sino * scale_factor, eta=(sino * 0 + 1e-5)) for sino in sinos ] f = BlockFunction(*kl) K = BlockOperator(*C) * scale_factor # If preconditioned if precond: def get_nonzero_recip(data): """Get the reciprocal of a datacontainer. Voxels where input == 0 will have their reciprocal set to 1 (instead of infinity)""" inv_np = data.as_array() inv_np[inv_np == 0] = 1 inv_np = 1. / inv_np data.fill(inv_np) tau = K.adjoint(K.range_geometry().allocate(1)) get_nonzero_recip(tau) tmp_sigma = K.direct(K.domain_geometry().allocate(1)) sigma = 0. * tmp_sigma get_nonzero_recip(sigma[0]) def precond_proximal(self, x, tau, out=None): """Modify proximal method to work with preconditioned tau""" pars = { 'algorithm': FGP_TV, 'input': np.asarray(x.as_array() / tau.as_array(), dtype=np.float32), 'regularization_parameter': self.lambdaReg, 'number_of_iterations': self.iterationsTV, 'tolerance_constant': self.tolerance, 'methodTV': self.methodTV, 'nonneg': self.nonnegativity, 'printingOut': self.printing } res, info = regularisers.FGP_TV(pars['input'], pars['regularization_parameter'], pars['number_of_iterations'], pars['tolerance_constant'], pars['methodTV'], pars['nonneg'], self.device) if out is not None: out.fill(res) else: out = x.copy() out.fill(res) out *= tau return out FGP_TV.proximal = precond_proximal print("Will run proximal with preconditioned tau...") # If not preconditioned else: sigma = float(args['--sigma']) # If we need to calculate default tau if args['--tau']: tau = float(args['--tau']) else: tau = 1 / (sigma * normK**2) if regularisation == 'none': G = IndicatorBox(lower=0) elif regularisation == 'FGP_TV': r_iterations = float(args['--reg_iters']) r_tolerance = 1e-7 r_iso = 0 r_nonneg = 1 r_printing = 0 device = 'gpu' if use_gpu else 'cpu' G = FGP_TV(r_alpha, r_iterations, r_tolerance, r_iso, r_nonneg, r_printing, device) else: raise error("Unknown regularisation") if precond: def PDHG_new_update(self): """Modify the PDHG update to allow preconditioning""" # save previous iteration self.x_old.fill(self.x) self.y_old.fill(self.y) # Gradient ascent for the dual variable self.operator.direct(self.xbar, out=self.y_tmp) self.y_tmp *= self.sigma self.y_tmp += self.y_old self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y) # Gradient descent for the primal variable self.operator.adjoint(self.y, out=self.x_tmp) self.x_tmp *= -1 * self.tau self.x_tmp += self.x_old self.g.proximal(self.x_tmp, self.tau, out=self.x) # Update self.x.subtract(self.x_old, out=self.xbar) self.xbar *= self.theta self.xbar += self.x PDHG.update = PDHG_new_update # Get filename outp_file = outp_prefix if descriptive_fname: if len(attn_files) > 0: outp_file += "_wAC" if norm_file: outp_file += "_wNorm" if use_gpu: outp_file += "_wGPU" outp_file += "_Reg-" + regularisation if regularisation == 'FGP_TV': outp_file += "-alpha" + str(r_alpha) outp_file += "-riters" + str(r_iterations) if args['--normK']: outp_file += '_userNormK' + str(normK) else: outp_file += '_calcNormK' + str(normK) if args['--normaliseDataAndBlock']: outp_file += '_wDataScale' else: outp_file += '_noDataScale' if not precond: outp_file += "_sigma" + str(sigma) outp_file += "_tau" + str(tau) else: outp_file += "_wPrecond" outp_file += "_nGates" + str(len(sino_files)) if resamplers is None: outp_file += "_noMotion" pdhg = PDHG(f=f, g=G, operator=K, sigma=sigma, tau=tau, max_iteration=num_iters, update_objective_interval=update_obj_fn_interval, x_init=image, log_file=outp_file + ".log") def callback_save(iteration, objective_value, solution): """Callback function to save images""" if (iteration + 1) % save_interval == 0: out = solution if not nifti else reg.NiftiImageData(solution) out.write(outp_file + "_iters" + str(iteration + 1)) pdhg.run(iterations=num_iters, callback=callback_save, verbose=True, very_verbose=True) if visualisations: # show reconstructed image out = pdhg.get_output() out_arr = out.as_array() z = out_arr.shape[0] // 2 show_2D_array('Reconstructed image', out.as_array()[z, :, :]) pylab.show()
# In[ ]: # reg parameter alpha = 0.001 # explicit case # rescale KL # KL(lambda *x + eta, b) = lambda * KL(x + eta/lambda, b/lambda) f1 = ScaledFunction( KullbackLeibler(b=(1 / am_norm) * acq_data, eta=(1 / am_norm) * rand), am_norm) F = f1 G = IndicatorBox(lower=0) # rescale operators am_rescaled = ScaledOperator(am, (1 / am_norm)) K = am_rescaled # In[ ]: sigma = 1.0 tau = 1.0 pet.set_max_omp_threads(15) # In[ ]: pdhg = PDHG(f=F,