def test_FISTA_Denoising(self): print ("FISTA Denoising Poisson Noise Tikhonov") # adapted from demo FISTA_Tikhonov_Poisson_Denoising.py in CIL-Demos repository #loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi')) loader = TestData() data = loader.load(TestData.SHAPES) ig = data.geometry ag = ig N=300 # Create Noisy data with Poisson noise scale = 5 n1 = TestData.random_noise( data.as_array()/scale, mode = 'poisson', seed = 10)*scale noisy_data = ImageData(n1) # Regularisation Parameter alpha = 10 # Setup and run the FISTA algorithm operator = Gradient(ig) fid = KullbackLeibler(b=noisy_data) reg = FunctionOperatorComposition(alpha * L2NormSquared(), operator) x_init = ig.allocate() fista = FISTA(x_init=x_init , f=reg, g=fid) fista.max_iteration = 3000 fista.update_objective_interval = 500 fista.run(verbose=True) rmse = (fista.get_output() - data).norm() / data.as_array().size print ("RMSE", rmse) self.assertLess(rmse, 4.2e-4)
def setup(data, noise): if noise == 's&p': n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10) elif noise == 'poisson': scale = 5 n1 = TestData.random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale elif noise == 'gaussian': n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10) else: raise ValueError('Unsupported Noise ', noise) noisy_data = ig.allocate() noisy_data.fill(n1) # Regularisation Parameter depending on the noise distribution if noise == 's&p': alpha = 0.8 elif noise == 'poisson': alpha = 1 elif noise == 'gaussian': alpha = .3 # fidelity if noise == 's&p': g = L1Norm(b=noisy_data) elif noise == 'poisson': g = KullbackLeibler(b=noisy_data) elif noise == 'gaussian': g = 0.5 * L2NormSquared(b=noisy_data) return noisy_data, alpha, g
# Regularisation Parameter depending on the noise distribution if noise == 's&p': alpha = 0.8 elif noise == 'poisson': alpha = .3 elif noise == 'gaussian': alpha = .2 beta = 2 * alpha # Fidelity if noise == 's&p': f3 = L1Norm(b=noisy_data) elif noise == 'poisson': f3 = KullbackLeibler(noisy_data) elif noise == 'gaussian': f3 = 0.5 * L2NormSquared(b=noisy_data) if method == '0': # Create operators op11 = Gradient(ig) op12 = Identity(op11.range_geometry()) op22 = SymmetrizedGradient(op11.domain_geometry()) op21 = ZeroOperator(ig, op22.range_geometry()) op31 = Identity(ig, ag) op32 = ZeroOperator(op22.domain_geometry(), ag)
def test_KullbackLeibler(self): print("test_KullbackLeibler") M, N, K = 2, 3, 4 ig = ImageGeometry(N, M, K) u1 = ig.allocate('random_int', seed=500) g1 = ig.allocate('random_int', seed=100) b1 = ig.allocate('random_int', seed=1000) # with no data try: f = KullbackLeibler() except ValueError: print('Give data b=...\n') print('With negative data, no background\n') try: f = KullbackLeibler(b=-1 * g1) except ValueError: print('We have negative data\n') f = KullbackLeibler(b=g1) print('Check KullbackLeibler(x,x)=0\n') self.assertNumpyArrayAlmostEqual(0.0, f(g1)) print('Check gradient .... is OK \n') res_gradient = f.gradient(u1) res_gradient_out = u1.geometry.allocate() f.gradient(u1, out=res_gradient_out) self.assertNumpyArrayAlmostEqual(res_gradient.as_array(), \ res_gradient_out.as_array(),decimal = 4) print('Check proximal ... is OK\n') tau = 400.4 res_proximal = f.proximal(u1, tau) res_proximal_out = u1.geometry.allocate() f.proximal(u1, tau, out=res_proximal_out) self.assertNumpyArrayAlmostEqual(res_proximal.as_array(), \ res_proximal_out.as_array(), decimal =5) print('Check conjugate ... is OK\n') if (1 - u1.as_array()).all(): print('If 1-x<=0, Convex conjugate returns 0.0') self.assertNumpyArrayAlmostEqual(0.0, f.convex_conjugate(u1)) print('Check KullbackLeibler with background\n') eta = b1 f1 = KullbackLeibler(b=g1, eta=b1) tmp_sum = (u1 + eta).as_array() ind = tmp_sum >= 0 tmp = scipy.special.kl_div(f1.b.as_array()[ind], tmp_sum[ind]) self.assertNumpyArrayAlmostEqual(f1(u1), numpy.sum(tmp)) res_proximal_conj_out = u1.geometry.allocate() proxc = f.proximal_conjugate(u1, tau) f.proximal_conjugate(u1, tau, out=res_proximal_conj_out) print(res_proximal_conj_out.as_array()) print(proxc.as_array()) numpy.testing.assert_array_almost_equal( proxc.as_array(), res_proximal_conj_out.as_array())
asm_attn = pet.AcquisitionSensitivityModel(bin_eff) return asm_attn # In[ ]: # set up the acquisition model am = pet.AcquisitionModelUsingRayTracingMatrix() # ASM norm already there asm_attn = get_asm_attn(acq_data, attns, am) # Get ASM dependent on attn and/or norm asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn) am.set_acquisition_sensitivity(asm) am.set_up(acq_data, image) f_numba = KullbackLeibler(b=acq_data, eta=rand, use_numba=True) f_numpy = KullbackLeibler(b=acq_data, eta=rand, use_numba=False) fake_data = am.direct(image) t0 = time.time() res_numba = f_numba(fake_data) t1 = time.time() dt_numba = t1 - t0 t0 = time.time() res_numpy = f_numpy(fake_data) t1 = time.time() dt_numpy = t1 - t0 print("call took", t1 - t0) print("numba {} {}s ".format(res_numba, dt_numba))
plt.subplot(2, 1, 1) plt.imshow(data.as_array()) plt.title('Ground Truth') plt.colorbar() plt.subplot(2, 1, 2) plt.imshow(noisy_data.as_array()) plt.title('Noisy Data') plt.colorbar() plt.show() # Regularisation Parameter alpha = 10 # Setup and run the FISTA algorithm operator = Gradient(ig) fid = KullbackLeibler(noisy_data) def KL_Prox_PosCone(x, tau, out=None): if out is None: tmp = 0.5 * ((x - fid.bnoise - tau) + ((x + fid.bnoise - tau)**2 + 4 * tau * fid.b).sqrt()) return tmp.maximum(0) else: tmp = 0.5 * ((x - fid.bnoise - tau) + ((x + fid.bnoise - tau)**2 + 4 * tau * fid.b).sqrt()) x.add(fid.bnoise, out=out) out -= tau out *= out tmp = fid.b * (4 * tau)
def main(): ########################################################################### # Parse input files ########################################################################### if trans_pattern is None: raise AssertionError("--trans missing") if sino_pattern is None: raise AssertionError("--sino missing") trans_files = sorted(glob(trans_pattern)) sino_files = sorted(glob(sino_pattern)) attn_files = sorted(glob(attn_pattern)) rand_files = sorted(glob(rand_pattern)) num_ms = len(sino_files) # Check some sinograms found if num_ms == 0: raise AssertionError("No sinograms found!") # Should have as many trans as sinos if num_ms != len(trans_files): raise AssertionError("#trans should match #sinos. " "#sinos = " + str(num_ms) + ", #trans = " + str(len(trans_files))) # If any rand, check num == num_ms if len(rand_files) > 0 and len(rand_files) != num_ms: raise AssertionError("#rand should match #sinos. " "#sinos = " + str(num_ms) + ", #rand = " + str(len(rand_files))) # For attn, there should be 0, 1 or num_ms images if len(attn_files) > 1 and len(attn_files) != num_ms: raise AssertionError("#attn should be 0, 1 or #sinos") ########################################################################### # Read input ########################################################################### if trans_type == "tm": trans = [reg.AffineTransformation(file) for file in trans_files] elif trans_type == "disp": trans = [ reg.NiftiImageData3DDisplacement(file) for file in trans_files ] elif trans_type == "def": trans = [reg.NiftiImageData3DDeformation(file) for file in trans_files] else: raise error("Unknown transformation type") sinos_raw = [pet.AcquisitionData(file) for file in sino_files] attns = [pet.ImageData(file) for file in attn_files] rands = [pet.AcquisitionData(file) for file in rand_files] # Loop over all sinograms sinos = [0] * num_ms for ind in range(num_ms): # If any sinograms contain negative values # (shouldn't be the case), set them to 0 sino_arr = sinos_raw[ind].as_array() if (sino_arr < 0).any(): print("Input sinogram " + str(ind) + " contains -ve elements. Setting to 0...") sinos[ind] = sinos_raw[ind].clone() sino_arr[sino_arr < 0] = 0 sinos[ind].fill(sino_arr) else: sinos[ind] = sinos_raw[ind] # If rebinning is desired segs_to_combine = 1 if args['--numSegsToCombine']: segs_to_combine = int(args['--numSegsToCombine']) views_to_combine = 1 if args['--numViewsToCombine']: views_to_combine = int(args['--numViewsToCombine']) if segs_to_combine * views_to_combine > 1: sinos[ind] = sinos[ind].rebin(segs_to_combine, views_to_combine) # only print first time if ind == 0: print(f"Rebinned sino dimensions: {sinos[ind].dimensions()}") ########################################################################### # Initialise recon image ########################################################################### if initial_estimate: image = pet.ImageData(initial_estimate) else: # Create image based on ProjData image = sinos[0].create_uniform_image(0.0, (nxny, nxny)) # If using GPU, need to make sure that image is right size. if use_gpu: dim = (127, 320, 320) spacing = (2.03125, 2.08626, 2.08626) # elif non-default spacing desired elif args['--dxdy']: dim = image.dimensions() dxdy = float(args['--dxdy']) spacing = (image.voxel_sizes()[0], dxdy, dxdy) if use_gpu or args['--dxdy']: image.initialise(dim=dim, vsize=spacing) image.fill(0.0) ########################################################################### # Set up resamplers ########################################################################### resamplers = [get_resampler(image, trans=tran) for tran in trans] ########################################################################### # Resample attenuation images (if necessary) ########################################################################### resampled_attns = None if len(attns) > 0: resampled_attns = [0] * num_ms # if using GPU, dimensions of attn and recon images have to match ref = image if use_gpu else None for i in range(len(attns)): # if we only have 1 attn image, then we need to resample into # space of each gate. However, if we have num_ms attn images, then # assume they are already in the correct position, so use None as # transformation. tran = trans[i] if len(attns) == 1 else None # If only 1 attn image, then resample that. If we have num_ms attn # images, then use each attn image of each frame. attn = attns[0] if len(attns) == 1 else attns[i] resam = get_resampler(attn, ref=ref, trans=tran) resampled_attns[i] = resam.forward(attn) ########################################################################### # Set up acquisition models ########################################################################### print("Setting up acquisition models...") if not use_gpu: acq_models = num_ms * [pet.AcquisitionModelUsingRayTracingMatrix()] else: acq_models = num_ms * [pet.AcquisitionModelUsingNiftyPET()] for acq_model in acq_models: acq_model.set_use_truncation(True) acq_model.set_cuda_verbosity(verbosity) # If present, create ASM from ECAT8 normalisation data asm_norm = None if norm_file: asm_norm = pet.AcquisitionSensitivityModel(norm_file) # Loop over each motion state for ind in range(num_ms): # Create attn ASM if necessary asm_attn = None if resampled_attns: asm_attn = get_asm_attn(sinos[ind], resampled_attns[i], acq_models[ind]) # Get ASM dependent on attn and/or norm asm = None if asm_norm and asm_attn: if ind == 0: print("ASM contains norm and attenuation...") asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn) elif asm_norm: if ind == 0: print("ASM contains norm...") asm = asm_norm elif asm_attn: if ind == 0: print("ASM contains attenuation...") asm = asm_attn if asm: acq_models[ind].set_acquisition_sensitivity(asm) if len(rands) > 0: acq_models[ind].set_background_term(rands[ind]) # Set up acq_models[ind].set_up(sinos[ind], image) ########################################################################### # Set up reconstructor ########################################################################### print("Setting up reconstructor...") # Create composition operators containing acquisition models and resamplers C = [ CompositionOperator(am, res, preallocate=True) for am, res in zip(*(acq_models, resamplers)) ] # Configure the PDHG algorithm if args['--normK'] and not args['--onlyNormK']: normK = float(args['--normK']) else: kl = [KullbackLeibler(b=sino, eta=(sino * 0 + 1e-5)) for sino in sinos] f = BlockFunction(*kl) K = BlockOperator(*C) # Calculate normK print("Calculating norm of the block operator...") normK = K.norm(iterations=10) print("Norm of the BlockOperator ", normK) if args['--onlyNormK']: exit(0) # Optionally rescale sinograms and BlockOperator using normK scale_factor = 1. / normK if args['--normaliseDataAndBlock'] else 1.0 kl = [ KullbackLeibler(b=sino * scale_factor, eta=(sino * 0 + 1e-5)) for sino in sinos ] f = BlockFunction(*kl) K = BlockOperator(*C) * scale_factor # If preconditioned if precond: def get_nonzero_recip(data): """Get the reciprocal of a datacontainer. Voxels where input == 0 will have their reciprocal set to 1 (instead of infinity)""" inv_np = data.as_array() inv_np[inv_np == 0] = 1 inv_np = 1. / inv_np data.fill(inv_np) tau = K.adjoint(K.range_geometry().allocate(1)) get_nonzero_recip(tau) tmp_sigma = K.direct(K.domain_geometry().allocate(1)) sigma = 0. * tmp_sigma get_nonzero_recip(sigma[0]) def precond_proximal(self, x, tau, out=None): """Modify proximal method to work with preconditioned tau""" pars = { 'algorithm': FGP_TV, 'input': np.asarray(x.as_array() / tau.as_array(), dtype=np.float32), 'regularization_parameter': self.lambdaReg, 'number_of_iterations': self.iterationsTV, 'tolerance_constant': self.tolerance, 'methodTV': self.methodTV, 'nonneg': self.nonnegativity, 'printingOut': self.printing } res, info = regularisers.FGP_TV(pars['input'], pars['regularization_parameter'], pars['number_of_iterations'], pars['tolerance_constant'], pars['methodTV'], pars['nonneg'], self.device) if out is not None: out.fill(res) else: out = x.copy() out.fill(res) out *= tau return out FGP_TV.proximal = precond_proximal print("Will run proximal with preconditioned tau...") # If not preconditioned else: sigma = float(args['--sigma']) # If we need to calculate default tau if args['--tau']: tau = float(args['--tau']) else: tau = 1 / (sigma * normK**2) if regularisation == 'none': G = IndicatorBox(lower=0) elif regularisation == 'FGP_TV': r_iterations = float(args['--reg_iters']) r_tolerance = 1e-7 r_iso = 0 r_nonneg = 1 r_printing = 0 device = 'gpu' if use_gpu else 'cpu' G = FGP_TV(r_alpha, r_iterations, r_tolerance, r_iso, r_nonneg, r_printing, device) else: raise error("Unknown regularisation") if precond: def PDHG_new_update(self): """Modify the PDHG update to allow preconditioning""" # save previous iteration self.x_old.fill(self.x) self.y_old.fill(self.y) # Gradient ascent for the dual variable self.operator.direct(self.xbar, out=self.y_tmp) self.y_tmp *= self.sigma self.y_tmp += self.y_old self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y) # Gradient descent for the primal variable self.operator.adjoint(self.y, out=self.x_tmp) self.x_tmp *= -1 * self.tau self.x_tmp += self.x_old self.g.proximal(self.x_tmp, self.tau, out=self.x) # Update self.x.subtract(self.x_old, out=self.xbar) self.xbar *= self.theta self.xbar += self.x PDHG.update = PDHG_new_update # Get filename outp_file = outp_prefix if descriptive_fname: if len(attn_files) > 0: outp_file += "_wAC" if norm_file: outp_file += "_wNorm" if use_gpu: outp_file += "_wGPU" outp_file += "_Reg-" + regularisation if regularisation == 'FGP_TV': outp_file += "-alpha" + str(r_alpha) outp_file += "-riters" + str(r_iterations) if args['--normK']: outp_file += '_userNormK' + str(normK) else: outp_file += '_calcNormK' + str(normK) if args['--normaliseDataAndBlock']: outp_file += '_wDataScale' else: outp_file += '_noDataScale' if not precond: outp_file += "_sigma" + str(sigma) outp_file += "_tau" + str(tau) else: outp_file += "_wPrecond" outp_file += "_nGates" + str(len(sino_files)) if resamplers is None: outp_file += "_noMotion" pdhg = PDHG(f=f, g=G, operator=K, sigma=sigma, tau=tau, max_iteration=num_iters, update_objective_interval=update_obj_fn_interval, x_init=image, log_file=outp_file + ".log") def callback_save(iteration, objective_value, solution): """Callback function to save images""" if (iteration + 1) % save_interval == 0: out = solution if not nifti else reg.NiftiImageData(solution) out.write(outp_file + "_iters" + str(iteration + 1)) pdhg.run(iterations=num_iters, callback=callback_save, verbose=True, very_verbose=True) if visualisations: # show reconstructed image out = pdhg.get_output() out_arr = out.as_array() z = out_arr.shape[0] // 2 show_2D_array('Reconstructed image', out.as_array()[z, :, :]) pylab.show()
#am_norm = PowerMethod(am, 20)[0] am_norm = np.sqrt(4479081.53395) # # PDHG without regularization # In[ ]: # reg parameter alpha = 0.001 # explicit case # rescale KL # KL(lambda *x + eta, b) = lambda * KL(x + eta/lambda, b/lambda) f1 = ScaledFunction( KullbackLeibler(b=(1 / am_norm) * acq_data, eta=(1 / am_norm) * rand), am_norm) F = f1 G = IndicatorBox(lower=0) # rescale operators am_rescaled = ScaledOperator(am, (1 / am_norm)) K = am_rescaled # In[ ]: sigma = 1.0 tau = 1.0 pet.set_max_omp_threads(15)