# Create functions f1 = alpha * MixedL21Norm() f2 = KullbackLeibler(noisy_data) f = BlockFunction(f1, f2) g = ZeroFunction() normK = operator.norm() # Primal & dual stepsizes sigma = 5 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 200 pdhg.run(1000) tindex = [0, int(phantom_2Dt.shape[0]/2), phantom_2Dt.shape[0]-1] fig2, axes2 = plt.subplots(nrows=2, ncols=3, figsize=(5, 5)) # Ground Truth axes2[0, 0].imshow(phantom_2Dt[tindex[0],:,:]) axes2[0, 0].set_title('Time {}'.format(tindex[0])) axes2[0,0].set_ylabel('Ground Truth') axes2[0, 1].imshow(phantom_2Dt[tindex[1],:,:])
# Create BlockOperator op_PDHG = BlockOperator(Grad, Aop, shape=(2, 1)) # Create functions f1 = 0.5 * alpha**2 * L2NormSquared() f2 = 0.5 * L2NormSquared(b=noisy_data) f = BlockFunction(f1, f2) g = ZeroFunction() ## Compute operator Norm normK = op_PDHG.norm() ## Primal & dual stepsizes sigma = 10 tau = 1 / (sigma * normK**2) pdhg = PDHG(f=f, g=g, operator=op_PDHG, tau=tau, sigma=sigma) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 200 pdhg.run(1000, verbose=False) # Show results plt.figure(figsize=(10, 10)) plt.subplot(2, 1, 1) plt.imshow(cgls.get_output().as_array()) plt.title('CGLS reconstruction') plt.subplot(2, 1, 2) plt.imshow(pdhg.get_output().as_array()) plt.title('PDHG reconstruction')
#%% Use PDHG to solve non-smooth version of problem for comparison # Set up non-smooth TV regularisation term operator = Grad f = alpha * MixedL21Norm() # Set algorithm parameters: primal and dual step sizes, sigma and tau, # standard choices based on operator's norm. normK = operator.norm() sigma = 1 tau = 1 / (sigma * normK**2) # Setup and run the PDHG algorithm print("Running PDHG with non-smooth TV.\nThis will take some time...") pdhg = PDHG(f=f, g=f2, operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 10000 pdhg.update_objective_interval = 100 pdhg.run(very_verbose=True) ## Show PDHG reconstruction results plt.figure(figsize=(20, 5)) plt.subplot(1, 4, 1) plt.imshow(data.as_array()) plt.title('Ground Truth') plt.colorbar() plt.subplot(1, 4, 2) plt.imshow(noisy_data.as_array()) plt.title('Noisy Data') plt.colorbar() plt.subplot(1, 4, 3)
f = BlockFunction(f1, f2) g = ZeroFunction() # Compute operator Norm normK1 = operator1.norm() normK2 = operator2.norm() # Primal & dual stepsizes sigma1 = 1 tau1 = 1 / (sigma1 * normK1**2) sigma2 = 1 tau2 = 1 / (sigma2 * normK2**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f, g=g, operator=operator1, tau=tau1, sigma=sigma1) pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000) # Setup and run the PDHG algorithm pdhg2 = PDHG(f=f, g=g, operator=operator2, tau=tau2, sigma=sigma2) pdhg2.max_iteration = 2000 pdhg2.update_objective_interval = 200 pdhg2.run(1000) #%% tindex = [8, 16, 24] fig2, axes2 = plt.subplots(nrows=3, ncols=3, figsize=(10, 10)) # Ground Truth
if noise == 's&p': g = L1Norm(b=noisy_data) elif noise == 'poisson': g = KullbackLeibler(noisy_data) elif noise == 'gaussian': g = 0.5 * L2NormSquared(b=noisy_data) # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1 / (sigma * normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1, g=g, operator=operator, tau=tau, sigma=sigma) pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000) # Show results plt.figure(figsize=(10, 10)) plt.subplot(1, 3, 1) plt.imshow(data.as_array()) plt.title('Ground Truth') plt.subplot(1, 3, 2) plt.imshow(noisy_data.as_array()) plt.title('Noisy Data') plt.subplot(1, 3, 3) plt.imshow(pdhg1.get_output().as_array()) plt.title('TV Reconstruction')
def main(): ########################################################################### # Parse input files ########################################################################### if trans_pattern is None: raise AssertionError("--trans missing") if sino_pattern is None: raise AssertionError("--sino missing") trans_files = sorted(glob(trans_pattern)) sino_files = sorted(glob(sino_pattern)) attn_files = sorted(glob(attn_pattern)) rand_files = sorted(glob(rand_pattern)) num_ms = len(sino_files) # Check some sinograms found if num_ms == 0: raise AssertionError("No sinograms found!") # Should have as many trans as sinos if num_ms != len(trans_files): raise AssertionError("#trans should match #sinos. " "#sinos = " + str(num_ms) + ", #trans = " + str(len(trans_files))) # If any rand, check num == num_ms if len(rand_files) > 0 and len(rand_files) != num_ms: raise AssertionError("#rand should match #sinos. " "#sinos = " + str(num_ms) + ", #rand = " + str(len(rand_files))) # For attn, there should be 0, 1 or num_ms images if len(attn_files) > 1 and len(attn_files) != num_ms: raise AssertionError("#attn should be 0, 1 or #sinos") ########################################################################### # Read input ########################################################################### if trans_type == "tm": trans = [reg.AffineTransformation(file) for file in trans_files] elif trans_type == "disp": trans = [ reg.NiftiImageData3DDisplacement(file) for file in trans_files ] elif trans_type == "def": trans = [reg.NiftiImageData3DDeformation(file) for file in trans_files] else: raise error("Unknown transformation type") sinos_raw = [pet.AcquisitionData(file) for file in sino_files] attns = [pet.ImageData(file) for file in attn_files] rands = [pet.AcquisitionData(file) for file in rand_files] # Loop over all sinograms sinos = [0] * num_ms for ind in range(num_ms): # If any sinograms contain negative values # (shouldn't be the case), set them to 0 sino_arr = sinos_raw[ind].as_array() if (sino_arr < 0).any(): print("Input sinogram " + str(ind) + " contains -ve elements. Setting to 0...") sinos[ind] = sinos_raw[ind].clone() sino_arr[sino_arr < 0] = 0 sinos[ind].fill(sino_arr) else: sinos[ind] = sinos_raw[ind] # If rebinning is desired segs_to_combine = 1 if args['--numSegsToCombine']: segs_to_combine = int(args['--numSegsToCombine']) views_to_combine = 1 if args['--numViewsToCombine']: views_to_combine = int(args['--numViewsToCombine']) if segs_to_combine * views_to_combine > 1: sinos[ind] = sinos[ind].rebin(segs_to_combine, views_to_combine) # only print first time if ind == 0: print(f"Rebinned sino dimensions: {sinos[ind].dimensions()}") ########################################################################### # Initialise recon image ########################################################################### if initial_estimate: image = pet.ImageData(initial_estimate) else: # Create image based on ProjData image = sinos[0].create_uniform_image(0.0, (nxny, nxny)) # If using GPU, need to make sure that image is right size. if use_gpu: dim = (127, 320, 320) spacing = (2.03125, 2.08626, 2.08626) # elif non-default spacing desired elif args['--dxdy']: dim = image.dimensions() dxdy = float(args['--dxdy']) spacing = (image.voxel_sizes()[0], dxdy, dxdy) if use_gpu or args['--dxdy']: image.initialise(dim=dim, vsize=spacing) image.fill(0.0) ########################################################################### # Set up resamplers ########################################################################### resamplers = [get_resampler(image, trans=tran) for tran in trans] ########################################################################### # Resample attenuation images (if necessary) ########################################################################### resampled_attns = None if len(attns) > 0: resampled_attns = [0] * num_ms # if using GPU, dimensions of attn and recon images have to match ref = image if use_gpu else None for i in range(len(attns)): # if we only have 1 attn image, then we need to resample into # space of each gate. However, if we have num_ms attn images, then # assume they are already in the correct position, so use None as # transformation. tran = trans[i] if len(attns) == 1 else None # If only 1 attn image, then resample that. If we have num_ms attn # images, then use each attn image of each frame. attn = attns[0] if len(attns) == 1 else attns[i] resam = get_resampler(attn, ref=ref, trans=tran) resampled_attns[i] = resam.forward(attn) ########################################################################### # Set up acquisition models ########################################################################### print("Setting up acquisition models...") if not use_gpu: acq_models = num_ms * [pet.AcquisitionModelUsingRayTracingMatrix()] else: acq_models = num_ms * [pet.AcquisitionModelUsingNiftyPET()] for acq_model in acq_models: acq_model.set_use_truncation(True) acq_model.set_cuda_verbosity(verbosity) # If present, create ASM from ECAT8 normalisation data asm_norm = None if norm_file: asm_norm = pet.AcquisitionSensitivityModel(norm_file) # Loop over each motion state for ind in range(num_ms): # Create attn ASM if necessary asm_attn = None if resampled_attns: asm_attn = get_asm_attn(sinos[ind], resampled_attns[i], acq_models[ind]) # Get ASM dependent on attn and/or norm asm = None if asm_norm and asm_attn: if ind == 0: print("ASM contains norm and attenuation...") asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn) elif asm_norm: if ind == 0: print("ASM contains norm...") asm = asm_norm elif asm_attn: if ind == 0: print("ASM contains attenuation...") asm = asm_attn if asm: acq_models[ind].set_acquisition_sensitivity(asm) if len(rands) > 0: acq_models[ind].set_background_term(rands[ind]) # Set up acq_models[ind].set_up(sinos[ind], image) ########################################################################### # Set up reconstructor ########################################################################### print("Setting up reconstructor...") # Create composition operators containing acquisition models and resamplers C = [ CompositionOperator(am, res, preallocate=True) for am, res in zip(*(acq_models, resamplers)) ] # Configure the PDHG algorithm if args['--normK'] and not args['--onlyNormK']: normK = float(args['--normK']) else: kl = [KullbackLeibler(b=sino, eta=(sino * 0 + 1e-5)) for sino in sinos] f = BlockFunction(*kl) K = BlockOperator(*C) # Calculate normK print("Calculating norm of the block operator...") normK = K.norm(iterations=10) print("Norm of the BlockOperator ", normK) if args['--onlyNormK']: exit(0) # Optionally rescale sinograms and BlockOperator using normK scale_factor = 1. / normK if args['--normaliseDataAndBlock'] else 1.0 kl = [ KullbackLeibler(b=sino * scale_factor, eta=(sino * 0 + 1e-5)) for sino in sinos ] f = BlockFunction(*kl) K = BlockOperator(*C) * scale_factor # If preconditioned if precond: def get_nonzero_recip(data): """Get the reciprocal of a datacontainer. Voxels where input == 0 will have their reciprocal set to 1 (instead of infinity)""" inv_np = data.as_array() inv_np[inv_np == 0] = 1 inv_np = 1. / inv_np data.fill(inv_np) tau = K.adjoint(K.range_geometry().allocate(1)) get_nonzero_recip(tau) tmp_sigma = K.direct(K.domain_geometry().allocate(1)) sigma = 0. * tmp_sigma get_nonzero_recip(sigma[0]) def precond_proximal(self, x, tau, out=None): """Modify proximal method to work with preconditioned tau""" pars = { 'algorithm': FGP_TV, 'input': np.asarray(x.as_array() / tau.as_array(), dtype=np.float32), 'regularization_parameter': self.lambdaReg, 'number_of_iterations': self.iterationsTV, 'tolerance_constant': self.tolerance, 'methodTV': self.methodTV, 'nonneg': self.nonnegativity, 'printingOut': self.printing } res, info = regularisers.FGP_TV(pars['input'], pars['regularization_parameter'], pars['number_of_iterations'], pars['tolerance_constant'], pars['methodTV'], pars['nonneg'], self.device) if out is not None: out.fill(res) else: out = x.copy() out.fill(res) out *= tau return out FGP_TV.proximal = precond_proximal print("Will run proximal with preconditioned tau...") # If not preconditioned else: sigma = float(args['--sigma']) # If we need to calculate default tau if args['--tau']: tau = float(args['--tau']) else: tau = 1 / (sigma * normK**2) if regularisation == 'none': G = IndicatorBox(lower=0) elif regularisation == 'FGP_TV': r_iterations = float(args['--reg_iters']) r_tolerance = 1e-7 r_iso = 0 r_nonneg = 1 r_printing = 0 device = 'gpu' if use_gpu else 'cpu' G = FGP_TV(r_alpha, r_iterations, r_tolerance, r_iso, r_nonneg, r_printing, device) else: raise error("Unknown regularisation") if precond: def PDHG_new_update(self): """Modify the PDHG update to allow preconditioning""" # save previous iteration self.x_old.fill(self.x) self.y_old.fill(self.y) # Gradient ascent for the dual variable self.operator.direct(self.xbar, out=self.y_tmp) self.y_tmp *= self.sigma self.y_tmp += self.y_old self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y) # Gradient descent for the primal variable self.operator.adjoint(self.y, out=self.x_tmp) self.x_tmp *= -1 * self.tau self.x_tmp += self.x_old self.g.proximal(self.x_tmp, self.tau, out=self.x) # Update self.x.subtract(self.x_old, out=self.xbar) self.xbar *= self.theta self.xbar += self.x PDHG.update = PDHG_new_update # Get filename outp_file = outp_prefix if descriptive_fname: if len(attn_files) > 0: outp_file += "_wAC" if norm_file: outp_file += "_wNorm" if use_gpu: outp_file += "_wGPU" outp_file += "_Reg-" + regularisation if regularisation == 'FGP_TV': outp_file += "-alpha" + str(r_alpha) outp_file += "-riters" + str(r_iterations) if args['--normK']: outp_file += '_userNormK' + str(normK) else: outp_file += '_calcNormK' + str(normK) if args['--normaliseDataAndBlock']: outp_file += '_wDataScale' else: outp_file += '_noDataScale' if not precond: outp_file += "_sigma" + str(sigma) outp_file += "_tau" + str(tau) else: outp_file += "_wPrecond" outp_file += "_nGates" + str(len(sino_files)) if resamplers is None: outp_file += "_noMotion" pdhg = PDHG(f=f, g=G, operator=K, sigma=sigma, tau=tau, max_iteration=num_iters, update_objective_interval=update_obj_fn_interval, x_init=image, log_file=outp_file + ".log") def callback_save(iteration, objective_value, solution): """Callback function to save images""" if (iteration + 1) % save_interval == 0: out = solution if not nifti else reg.NiftiImageData(solution) out.write(outp_file + "_iters" + str(iteration + 1)) pdhg.run(iterations=num_iters, callback=callback_save, verbose=True, very_verbose=True) if visualisations: # show reconstructed image out = pdhg.get_output() out_arr = out.as_array() z = out_arr.shape[0] // 2 show_2D_array('Reconstructed image', out.as_array()[z, :, :]) pylab.show()
############################################################################### # Setup and run the PDHG algorithm print("Running PDHG reconstruction") operator = Aop f = L2NormSquared(b=sinogram) g = ZeroFunction() ## Compute operator Norm normK = operator.norm() ## Primal & dual stepsizes sigma = 0.02 tau = 1 / (sigma * normK**2) pdhg = PDHG() pdhg.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 1000 pdhg.update_objective_interval = 100 pdhg.run(1000, verbose=True) #%% ############################################################################### # Setup and run the FISTA algorithm print("Running FISTA reconstruction") fidelity = FunctionOperatorComposition(L2NormSquared(b=sinogram), Aop) regularizer = ZeroFunction() fista = FISTA() fista.set_up(x_init=x_init, f=fidelity, g=regularizer)
am_rescaled = ScaledOperator(am, (1 / am_norm)) K = am_rescaled # In[ ]: sigma = 1.0 tau = 1.0 pet.set_max_omp_threads(15) # In[ ]: pdhg = PDHG(f=F, g=G, operator=K, sigma=sigma, tau=tau, max_iteration=1000, update_objective_interval=10) pdhg.run(20, very_verbose=True) # In[ ]: plt.figure() plt.imshow(pdhg.get_output().as_array()[75, :, :], cmap="inferno") plt.colorbar() plt.show() # # SPDHG without regularization # In[ ]:
def test_PDHG_Denoising(self): print ("PDHG Denoising with 3 noises") # adapted from demo PDHG_TV_Color_Denoising.py in CIL-Demos repository # loader = TestData(data_dir=os.path.join(os.environ['SIRF_INSTALL_PATH'], 'share','ccpi')) # loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi')) loader = TestData() data = loader.load(TestData.PEPPERS, size=(256,256)) ig = data.geometry ag = ig which_noise = 0 # Create noisy data. noises = ['gaussian', 'poisson', 's&p'] noise = noises[which_noise] def setup(data, noise): if noise == 's&p': n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10) elif noise == 'poisson': scale = 5 n1 = TestData.random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale elif noise == 'gaussian': n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10) else: raise ValueError('Unsupported Noise ', noise) noisy_data = ig.allocate() noisy_data.fill(n1) # Regularisation Parameter depending on the noise distribution if noise == 's&p': alpha = 0.8 elif noise == 'poisson': alpha = 1 elif noise == 'gaussian': alpha = .3 # fidelity if noise == 's&p': g = L1Norm(b=noisy_data) elif noise == 'poisson': g = KullbackLeibler(b=noisy_data) elif noise == 'gaussian': g = 0.5 * L2NormSquared(b=noisy_data) return noisy_data, alpha, g noisy_data, alpha, g = setup(data, noise) operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma) pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000, very_verbose=True) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size print ("RMSE", rmse) self.assertLess(rmse, 2e-4) which_noise = 1 noise = noises[which_noise] noisy_data, alpha, g = setup(data, noise) operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma, max_iteration=2000, update_objective_interval=200) pdhg1.run(1000) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size print ("RMSE", rmse) self.assertLess(rmse, 2e-4) which_noise = 2 noise = noises[which_noise] noisy_data, alpha, g = setup(data, noise) operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE) f1 = alpha * MixedL21Norm() # Compute operator Norm normK = operator.norm() # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) # Setup and run the PDHG algorithm pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma) pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size print ("RMSE", rmse) self.assertLess(rmse, 2e-4)