def setUp(self): self.image1 = pet.ImageData( os.path.join(examples_data_path('PET'), 'thorax_single_slice', 'emission.hv')) self.image2 = pet.ImageData( os.path.join(examples_data_path('PET'), 'thorax_single_slice', 'emission.hv'))
def get_x(input_path, input_prefix): print("Getting x") x = [] x_fixed = [] x_moving_fixed = [] relative_path = input_path + "/fixed/" x_fixed_files = os.listdir(relative_path) x_fixed_files.sort(key=human_sorting) print("Get x fixed") for i in range(len(x_fixed_files)): if len(x_fixed_files[i].split(input_prefix)) > 1: x_fixed.append( rescale_linear( PET.ImageData(relative_path + x_fixed_files[i]).as_array().squeeze(), 0, 1)) print("Got x fixed") relative_path = input_path + "/moving/" x_moving_files = os.listdir(relative_path) x_moving_files.sort(key=human_sorting) print("Get x moving") for i in range(len(x_moving_files)): temp_relative_path = relative_path + x_moving_files[i] + "/" x_moving_files_fixed_files = os.listdir(temp_relative_path) x_moving_files_fixed_files.sort(key=human_sorting) x_moving = [] for j in range(len(x_moving_files_fixed_files)): if len(x_moving_files_fixed_files[j].split(input_prefix)) > 1: x_moving.append( rescale_linear( PET.ImageData(temp_relative_path + x_moving_files_fixed_files[j]).as_array( ).squeeze(), 0, 1)) x_moving_fixed.append(x_moving) print("Got x moving") for i in range(len(x_moving_fixed)): for j in range(len(x_moving_fixed[i])): x.append(np.asarray([x_fixed[i], x_moving_fixed[i][j]]).T) print("Got x") return np.nan_to_num(np.asarray(x)).astype(np.float)
def main(while_bool, generate_bool, fit_bool, test_bool, correct_bool): model = None while True: if generate_bool: print("Generate data") generate_data(PET.ImageData("blank_image.nii"), 1000, 1, "/home/alex/Documents/SIRF-SuperBuild_install/bin/stir_math", "/home/alex/Documents/SIRF-SuperBuild_install/bin/reg_resample") if fit_bool: print("Fit model") model = keras_reg.fit_model(model, False, True, True, False, "./training_data/", ".nii", "./results/", 1000) if test_bool: print("Test model") keras_reg.test_model(model, False, "./training_data/", ".nii", "./results/", "./results/") if correct_bool: print("Correct model") correct_data("/home/alex/Documents/SIRF-SuperBuild_install/bin/reg_resample", "./training_data/", "./corrected_data/", "./results/") if not while_bool: break
def main(): initial_image = pet.ImageData('blank_image.hv') generate_data(initial_image, 100, 10) keras_reg.fit_model(False, False, True, "training_data/", ".nii", "results/")
def test_main(rec=False, verb=False, throw=True): pet.MessageRedirector() for scheme in ("file", "memory"): pet.AcquisitionData.set_storage_scheme(scheme) original_verb = pet.get_verbosity() pet.set_verbosity(False) # create an acq_model that is explicitly a RayTracingMatrix am = pet.AcquisitionModelUsingRayTracingMatrix() # load sample data data_path = pet.examples_data_path('PET') raw_data_file = pet.existing_filepath(data_path, 'Utahscat600k_ca_seg4.hs') ad = pet.AcquisitionData(raw_data_file) # create sample image image = pet.ImageData() image.initialise(dim=(31, 111, 111), vsize=(2.25, 2.25, 2.25)) # set up Acquisition Model am.set_up(ad, image) # test for adjointnesss if not is_operator_adjoint(am, verbose=verb): raise AssertionError( 'AcquisitionModelUsingRayTracingMatrix is not adjoint') # Reset original verbose-ness pet.set_verbosity(original_verb) return 0, 1
def try_stirtonifti(nifti_filename): time.sleep(0.5) sys.stderr.write('\n# --------------------------------------------------------------------------------- #\n') sys.stderr.write('# Starting STIR to Nifti test...\n') sys.stderr.write('# --------------------------------------------------------------------------------- #\n') time.sleep(0.5) # Load the image as a NiftiImageData3D image_nifti = reg.NiftiImageData3D(nifti_filename) # Read as STIRImageData, convert to NiftiImageData3D and save to file image_stir = pet.ImageData(nifti_filename) image_nifti_from_stir = reg.NiftiImageData3D(image_stir) image_nifti_from_stir.write('results/stir_to_nifti.nii',image_nifti.get_original_datatype()) # Compare the two if image_nifti != image_nifti_from_stir: raise AssertionError("Conversion from STIR to Nifti failed.") # Resample and then check that voxel values match resample = reg.NiftyResample() resample.set_floating_image(image_stir) resample.set_reference_image(image_nifti) resample.set_interpolation_type_to_nearest_neighbour() resample.process() # as_array() of both original images should match if not numpy.array_equal(image_nifti.as_array(),resample.get_output().as_array()): raise AssertionError("as_array() of sirf.Reg.NiftiImageData and resampled sirf.STIR.ImageData are different.") time.sleep(0.5) sys.stderr.write('\n# --------------------------------------------------------------------------------- #\n') sys.stderr.write('# Finished STIR to Nifti test.\n') sys.stderr.write('# --------------------------------------------------------------------------------- #\n') time.sleep(0.5)
def test_BlockDataContainer_with_SIRF_DataContainer_subtract(self): os.chdir(self.cwd) image1 = pet.ImageData('emission.hv') image2 = pet.ImageData('emission.hv') image1.fill(2) image2.fill(1) print(image1.shape, image2.shape) bdc = BlockDataContainer(image1, image2) bdc1 = bdc.subtract(1.) image1.fill(1) image2.fill(0) bdc = BlockDataContainer(image1, image2) self.assertBlockDataContainerEqual(bdc, bdc1)
def test_BlockDataContainer_with_SIRF_DataContainer_multiply(self): os.chdir(self.cwd) image1 = pet.ImageData('emission.hv') image2 = pet.ImageData('emission.hv') image1.fill(1.) image2.fill(2.) print(image1.shape, image2.shape) tmp = image1.multiply(1.) numpy.testing.assert_array_equal(image1.as_array(), tmp.as_array()) tmp = image2.multiply(1.) numpy.testing.assert_array_equal(image2.as_array(), tmp.as_array()) # image.fill(1.) bdc = BlockDataContainer(image1, image2) bdc1 = bdc.multiply(1.) self.assertBlockDataContainerEqual(bdc, bdc1)
def main(): """Do main.""" # Acq model and template sino acq_model = pet.AcquisitionModelUsingRayTracingMatrix() acq_data = pet.AcquisitionData(sino_file) # If norm is present asm_norm = None if norm_e8_file: # create acquisition sensitivity model from ECAT8 normalisation data asm_norm = pet.AcquisitionSensitivityModel(norm_e8_file) # If attenuation is present asm_attn = None if attn_im_file: attn_image = pet.ImageData(attn_im_file) if trans: attn_image = resample_attn_image(attn_image) asm_attn = pet.AcquisitionSensitivityModel(attn_image, acq_model) # temporary fix pending attenuation offset fix in STIR: # converting attenuation into 'bin efficiency' asm_attn.set_up(acq_data) bin_eff = pet.AcquisitionData(acq_data) bin_eff.fill(1.0) print('applying attenuation (please wait, may take a while)...') asm_attn.unnormalise(bin_eff) asm_attn = pet.AcquisitionSensitivityModel(bin_eff) # Get ASM dependent on attn and/or norm if asm_norm and asm_attn: print("AcquisitionSensitivityModel contains norm and attenuation...") asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn) elif asm_norm: print("AcquisitionSensitivityModel contains norm...") asm = asm_norm elif asm_attn: print("AcquisitionSensitivityModel contains attenuation...") asm = asm_attn else: raise ValueError("Need norm and/or attn") # only need to project again if normalisation is added # (since attenuation has already been projected) if asm_norm: asm_attn.set_up(acq_data) bin_eff = pet.AcquisitionData(acq_data) bin_eff.fill(1.0) print('getting sinograms for multiplicative factors...') asm.set_up(acq_data) asm.unnormalise(bin_eff) print('writing multiplicative sinogram: ' + outp_file) bin_eff.write(outp_file)
def test_BlockDataContainer_with_SIRF_DataContainer_add(self): os.chdir(self.cwd) image1 = pet.ImageData('emission.hv') image2 = pet.ImageData('emission.hv') image1.fill(0) image2.fill(1) print(image1.shape, image2.shape) tmp = image1.add(1.) numpy.testing.assert_array_equal(image2.as_array(), tmp.as_array()) tmp = image2.subtract(1.) numpy.testing.assert_array_equal(image1.as_array(), tmp.as_array()) bdc = BlockDataContainer(image1, image2) bdc1 = bdc.add(1.) image1.fill(1) image2.fill(2) bdc = BlockDataContainer(image1, image2) self.assertBlockDataContainerEqual(bdc, bdc1)
def get_image(): im_size = (127, 320, 320) im_spacing = (2.03125, 2.08626, 2.08626) image = pet.ImageData() image.initialise(im_size, im_spacing) image.fill(0) cyl = get_elliptical_cylinder(200, 100, 1000) image.add_shape(cyl, scale=0.75) cyl = get_elliptical_cylinder(100, 50, 300, (20, 30, 10)) image.add_shape(cyl, scale=3) cyl = get_elliptical_cylinder(10, 150, 700, (-20, 50, 50)) image.add_shape(cyl, scale=1.5) return image
def setUp(self): data_path = os.path.join(examples_data_path('PET'), 'thorax_single_slice') image = pet.ImageData(os.path.join(data_path, 'emission.hv')) am = pet.AcquisitionModelUsingRayTracingMatrix() am.set_num_tangential_LORs(5) templ = pet.AcquisitionData( os.path.join(data_path, 'template_sinogram.hs')) am.set_up(templ, image) acquired_data = am.forward(image) obj_fun = pet.make_Poisson_loglikelihood(acquired_data) obj_fun.set_acquisition_model(am) obj_fun.set_up(image) self.obj_fun = obj_fun self.image = image
def main(): # direct all engine's messages to files msg_red = PET.MessageRedirector('info.txt', 'warn.txt', 'errr.txt') PET.AcquisitionData.set_storage_scheme('memory') # Create the Scatter Estimator # We can use a STIR parameter file like this # par_file_path = os.path.join(os.path.dirname(__file__), '..', '..', 'parameter_files') # se = PET.ScatterEstimator(PET.existing_filepath(par_file_path, 'scatter_estimation.par')) # However, we will just use all defaults here, and set variables below. se = PET.ScatterEstimator() prompts = PET.AcquisitionData(raw_data_file) se.set_input(prompts) se.set_attenuation_image(PET.ImageData(mu_map_file)) if randoms_data_file is None: randoms = None else: randoms = PET.AcquisitionData(randoms_data_file) se.set_randoms(randoms) if not(norm_file is None): se.set_asm(PET.AcquisitionSensitivityModel(norm_file)) if not(acf_file is None): se.set_attenuation_correction_factors(PET.AcquisitionData(acf_file)) # could set number of iterations if you want to se.set_num_iterations(1) print("number of scatter iterations that will be used: %d" % se.get_num_iterations()) se.set_output_prefix(output_prefix) se.set_up() se.process() scatter_estimate = se.get_output() ## show estimated scatter data scatter_estimate_as_array = scatter_estimate.as_array() show_2D_array('Scatter estimate', scatter_estimate_as_array[0, 0, :, :]) ## let's draw some profiles to check # we will average over all sinograms to reduce noise PET_plot_functions.plot_sinogram_profile(prompts, randoms=randoms, scatter=scatter_estimate)
def setUp(self): os.chdir(examples_data_path('PET')) #%% copy files to working folder and change directory to where the output files are shutil.rmtree('working_folder/thorax_single_slice',True) shutil.copytree('thorax_single_slice','working_folder/thorax_single_slice') os.chdir('working_folder/thorax_single_slice') image = pet.ImageData('emission.hv') am = pet.AcquisitionModelUsingRayTracingMatrix() am.set_num_tangential_LORs(5) templ = pet.AcquisitionData('template_sinogram.hs') am.set_up(templ,image) acquired_data=am.forward(image) obj_fun = pet.make_Poisson_loglikelihood(acquired_data) obj_fun.set_acquisition_model(am) obj_fun.set_up(image) self.obj_fun = obj_fun self.image = image
list_mu = [f for f in os.listdir(path_mu) if f.endswith(".nii")] #%% AC reconstruction tprint('Start AC Recon') for i, sino, random, mu in zip(range(len(path_sino)), sorted_alphanumeric(list_sino), sorted_alphanumeric(list_rando), sorted_alphanumeric(list_mu)): sino_pet = Pet.AcquisitionData(path_sino + sino) print(sino) randoms_pet = Pet.AcquisitionData(path_rando + random) print(random) mu_pet = Pet.ImageData(path_mu + mu) print(mu) # definitions for attenuation attn_acq_model = Pet.AcquisitionModelUsingRayTracingMatrix() asm_attn = Pet.AcquisitionSensitivityModel(mu_pet, attn_acq_model) # reconstruct the data (includes all) obj_fun = Pet.make_Poisson_loglikelihood(sino_pet) asm_attn.set_up(sino_pet) attn_factors = Pet.AcquisitionData(sino_pet) attn_factors.fill(1.0) asm_attn.unnormalise(attn_factors) asm_attn = Pet.AcquisitionSensitivityModel(attn_factors) asm = Pet.AcquisitionSensitivityModel(asm_norm, asm_attn) acq_model.set_acquisition_sensitivity(asm)
#%% Files attn_file = py_path + '/UKL_data/mu_Map/stir_mu_map.hv' # .nii possible, requires ITK print('mu-Map: {}'.format(attn_file)) # template for acq_data template_acq_data = Pet.AcquisitionData('Siemens_mMR', span=11, max_ring_diff=16, view_mash_factor=1) template_acq_data.write('template.hs') #%% resample mu-Map into correct space and transform via invers tm tprint('Start Resampling') attn_image = Pet.ImageData(attn_file) template_image = template_acq_data.create_uniform_image(1.0) # define space matrices tm_fwd = numpy.loadtxt(py_path + '/UKL_data/tm_epi/reg_NAC_EPI.txt') tm_inv = numpy.loadtxt(py_path + '/UKL_data/tm_epi/reg_NAC_EPI_inv.txt') # settings for attn resampler resamplers_attn = Reg.NiftyResample() resamplers_attn.set_reference_image(template_image) resamplers_attn.set_floating_image(attn_image) resamplers_attn.set_padding_value(0) resamplers_attn.set_interpolation_type_to_linear() i = 0 for num in num_tm:
def main(): # Make output folder if necessary if not os.path.isdir(data_path): os.makedirs(data_path) os.chdir(data_path) # Download the data print("downloading brainweb data...") [FDG_arr, uMap_arr, T1_arr] = download_data() # Get template PET image from template raw template_PET_raw = pet.AcquisitionData(template_PET_raw_path) template_PET_im = pet.ImageData(template_PET_raw) # Get template MR image from template raw template_MR_raw = mr.AcquisitionData(template_MR_raw_path) template_MR_raw.sort_by_time() template_MR_raw = mr.preprocess_acquisition_data(template_MR_raw) template_MR_im = simple_mr_recon(template_MR_raw) # Number voxels in (x,y) directions - nxy (dictated by MR image) nxy = template_MR_im.get_geometrical_info().get_size()[0] if nxy != template_MR_im.get_geometrical_info().get_size()[1]: raise AssertionError("Expected square image in (x,y) direction") if template_MR_im.get_geometrical_info().get_size()[2] > 1: raise AssertionError("Only currently designed for 2D image") # Create PET image dim = (1, nxy, nxy) size = FDG_arr.shape z_slice = size[0] // 2 xy_min = (size[1] - nxy) // 2 xy_max = xy_min + nxy voxel_size = template_PET_im.voxel_sizes() template_PET_im.initialise(dim, voxel_size) # Reorient template MR image with template PET image such that it's compatible with both template_MR_im.reorient(template_PET_im.get_geometrical_info()) ############################################################################################ # Crop brainweb image to right size ############################################################################################ # Convert brainweb's (127,344,344) to desired size print("Cropping brainweb images to size...") [FDG, uMap, T1] = [crop_brainweb(template_MR_im, im_arr, z_slice, xy_min, xy_max) \ for im_arr in [FDG_arr, uMap_arr, T1_arr]] ############################################################################################ # Apply motion ############################################################################################ print("Resampling images to different motion states...") FDGs = [0] * num_ms uMaps = [0] * num_ms T1s = [0] * num_ms for ind in range(num_ms): # Get TM for given motion state tm = get_and_save_tm(ind) # Get resampler res = get_resampler_from_tm(tm, template_MR_im) # Resample for im, modality in zip([FDG, uMap, T1], ['FDG', 'uMap', 'T1']): resampled = res.forward(im) if modality == 'FDG': FDGs[ind] = resampled elif modality == 'uMap': uMaps[ind] = resampled elif modality == 'T1': T1s[ind] = resampled else: raise AssertionError("Unknown modality") reg.NiftiImageData(resampled).write(modality + '_ms' + str(ind)) ############################################################################################ # MR: create k-space data for motion states ############################################################################################ # Create coil sensitivity data print("Calculating coil sensitivity map...") csm = mr.CoilSensitivityData() csm.smoothness = 500 csm.calculate(template_MR_raw) # Create interleaved sampling print("Creating raw k-space data for MR motion states...") mvec = [] for ind in range(num_ms): mvec.append(np.arange(ind, template_MR_raw.number(), num_ms)) # Go through motion states and create k-space for ind in range(num_ms): acq_ms = template_MR_raw.new_acquisition_data(empty=True) # Set first two (??) acquisition acq_ms.append_acquisition(template_MR_raw.acquisition(0)) acq_ms.append_acquisition(template_MR_raw.acquisition(1)) # Add motion resolved data for jnd in range(len(mvec[ind])): if mvec[ind][jnd] < template_MR_raw.number() - 1 and mvec[ind][ jnd] > 1: # Ensure first and last are not added twice cacq = template_MR_raw.acquisition(mvec[ind][jnd]) acq_ms.append_acquisition(cacq) # Set last acquisition acq_ms.append_acquisition( template_MR_raw.acquisition(template_MR_raw.number() - 1)) # Create acquisition model AcqMod = mr.AcquisitionModel(acq_ms, T1s[ind]) AcqMod.set_coil_sensitivity_maps(csm) # Forward project! acq_ms_sim = AcqMod.forward(T1s[ind]) # Save print("writing: " + 'raw_T1_ms' + str(ind) + '.h5') acq_ms_sim.write('raw_T1_ms' + str(ind) + '.h5') ############################################################################################ # PET: create sinograms ############################################################################################ print("Creating singorams for PET motion states...") stir_uMap = template_PET_im.clone() stir_FDG = template_PET_im.clone() for ind in range(num_ms): stir_uMap.fill(uMaps[ind].as_array()) stir_FDG.fill(FDGs[ind].as_array()) am = get_acquisition_model(stir_uMap, template_PET_raw) FDG_sino = am.forward(stir_FDG) FDG_sino = add_noise(0.25, FDG_sino) FDG_sino.write('raw_FDG_ms' + str(ind))
def main(): generate_data( PET.ImageData("blank_image.hv"), 10, 10, "/home/alex/Documents/SIRF-SuperBuild_install/bin/stir_math", "/home/alex/Documents/SIRF-SuperBuild_install/bin/reg_resample")
def main(): ## PET.AcquisitionData.set_storage_scheme('memory') # no info printing from the engine, warnings and errors sent to stdout msg_red = PET.MessageRedirector() # Create a template Acquisition Model #acq_template = AcquisitionData('Siemens mMR', 1, 0, 1) acq_template = PET.AcquisitionData( acq_template_filename) #q.get_uniform_copy() # create the attenuation image atten_image = PET.ImageData(acq_template) image_size = atten_image.dimensions() voxel_size = atten_image.voxel_sizes() # create a cylindrical water phantom water_cyl = PET.EllipticCylinder() water_cyl.set_length(image_size[0] * voxel_size[0]) water_cyl.set_radii((image_size[1]*voxel_size[1]*0.25, \ image_size[2]*voxel_size[2]*0.25)) water_cyl.set_origin((image_size[0] * voxel_size[0] * 0.5, 0, 0)) # add the shape to the image atten_image.add_shape(water_cyl, scale=9.687E-02) # z-pixel coordinate of the xy-crossection to show z = int(image_size[0] * 0.5) # show the phantom image atten_image_array = atten_image.as_array() show_2D_array('Attenuation image', atten_image_array[z, :, :]) # Create the activity image act_image = atten_image.clone() act_image.fill(0.0) # create the activity cylinder act_cyl = PET.EllipticCylinder() act_cyl.set_length(image_size[0] * voxel_size[0]) act_cyl.set_radii((image_size[1] * voxel_size[1] * 0.125, \ image_size[2] * voxel_size[2] * 0.125)) act_cyl.set_origin((0, image_size[1] * voxel_size[1] * 0.06, \ image_size[2] * voxel_size[2] * 0.06)) # add the shape to the image act_image.add_shape(act_cyl, scale=1) # z-pixel coordinate of the xy-crossection to show z = int(image_size[0] * 0.5) # show the phantom image act_image_array = act_image.as_array() show_2D_array('Activity image', act_image_array[z, :, :]) # Create the Single Scatter Simulation model sss = PET.SingleScatterSimulator() # Set the attenuation image sss.set_attenuation_image(atten_image) # set-up the scatter simulator sss.set_up(acq_template, act_image) # Simulate! sss_data = sss.forward(act_image) # show simulated scatter data simulated_scatter_as_array = sss_data.as_array() show_2D_array('scatter simulation', simulated_scatter_as_array[0, 0, :, :]) sss_data.write(output_file) ## let's also compute the unscattered counts (at the same low resolution) and compare acq_model = PET.AcquisitionModelUsingRayTracingMatrix() asm = PET.AcquisitionSensitivityModel(atten_image, acq_model) acq_model.set_acquisition_sensitivity(asm) acq_model.set_up(acq_template, act_image) #unscattered_data = acq_template.get_uniform_copy() unscattered_data = acq_model.forward(act_image) simulated_unscatter_as_array = unscattered_data.as_array() show_2D_array('unscattered simulation', simulated_unscatter_as_array[0, 0, :, :]) plt.figure() ax = plt.subplot(111) plt.plot(simulated_unscatter_as_array[0, 4, 0, :], label='unscattered') plt.plot(simulated_scatter_as_array[0, 4, 0, :], label='scattered') ax.legend() plt.show()
def main(): initial_image = pet.ImageData('blank_image.hv') generate_data(initial_image, 10, 10)
def main(): ########################################################################### # Parse input files ########################################################################### if trans_pattern is None: raise AssertionError("--trans missing") if sino_pattern is None: raise AssertionError("--sino missing") trans_files = sorted(glob(trans_pattern)) sino_files = sorted(glob(sino_pattern)) attn_files = sorted(glob(attn_pattern)) rand_files = sorted(glob(rand_pattern)) num_ms = len(sino_files) # Check some sinograms found if num_ms == 0: raise AssertionError("No sinograms found!") # Should have as many trans as sinos if num_ms != len(trans_files): raise AssertionError("#trans should match #sinos. " "#sinos = " + str(num_ms) + ", #trans = " + str(len(trans_files))) # If any rand, check num == num_ms if len(rand_files) > 0 and len(rand_files) != num_ms: raise AssertionError("#rand should match #sinos. " "#sinos = " + str(num_ms) + ", #rand = " + str(len(rand_files))) # For attn, there should be 0, 1 or num_ms images if len(attn_files) > 1 and len(attn_files) != num_ms: raise AssertionError("#attn should be 0, 1 or #sinos") ########################################################################### # Read input ########################################################################### if trans_type == "tm": trans = [reg.AffineTransformation(file) for file in trans_files] elif trans_type == "disp": trans = [ reg.NiftiImageData3DDisplacement(file) for file in trans_files ] elif trans_type == "def": trans = [reg.NiftiImageData3DDeformation(file) for file in trans_files] else: raise error("Unknown transformation type") sinos_raw = [pet.AcquisitionData(file) for file in sino_files] attns = [pet.ImageData(file) for file in attn_files] rands = [pet.AcquisitionData(file) for file in rand_files] # Loop over all sinograms sinos = [0] * num_ms for ind in range(num_ms): # If any sinograms contain negative values # (shouldn't be the case), set them to 0 sino_arr = sinos_raw[ind].as_array() if (sino_arr < 0).any(): print("Input sinogram " + str(ind) + " contains -ve elements. Setting to 0...") sinos[ind] = sinos_raw[ind].clone() sino_arr[sino_arr < 0] = 0 sinos[ind].fill(sino_arr) else: sinos[ind] = sinos_raw[ind] # If rebinning is desired segs_to_combine = 1 if args['--numSegsToCombine']: segs_to_combine = int(args['--numSegsToCombine']) views_to_combine = 1 if args['--numViewsToCombine']: views_to_combine = int(args['--numViewsToCombine']) if segs_to_combine * views_to_combine > 1: sinos[ind] = sinos[ind].rebin(segs_to_combine, views_to_combine) # only print first time if ind == 0: print(f"Rebinned sino dimensions: {sinos[ind].dimensions()}") ########################################################################### # Initialise recon image ########################################################################### if initial_estimate: image = pet.ImageData(initial_estimate) else: # Create image based on ProjData image = sinos[0].create_uniform_image(0.0, (nxny, nxny)) # If using GPU, need to make sure that image is right size. if use_gpu: dim = (127, 320, 320) spacing = (2.03125, 2.08626, 2.08626) # elif non-default spacing desired elif args['--dxdy']: dim = image.dimensions() dxdy = float(args['--dxdy']) spacing = (image.voxel_sizes()[0], dxdy, dxdy) if use_gpu or args['--dxdy']: image.initialise(dim=dim, vsize=spacing) image.fill(0.0) ########################################################################### # Set up resamplers ########################################################################### resamplers = [get_resampler(image, trans=tran) for tran in trans] ########################################################################### # Resample attenuation images (if necessary) ########################################################################### resampled_attns = None if len(attns) > 0: resampled_attns = [0] * num_ms # if using GPU, dimensions of attn and recon images have to match ref = image if use_gpu else None for i in range(len(attns)): # if we only have 1 attn image, then we need to resample into # space of each gate. However, if we have num_ms attn images, then # assume they are already in the correct position, so use None as # transformation. tran = trans[i] if len(attns) == 1 else None # If only 1 attn image, then resample that. If we have num_ms attn # images, then use each attn image of each frame. attn = attns[0] if len(attns) == 1 else attns[i] resam = get_resampler(attn, ref=ref, trans=tran) resampled_attns[i] = resam.forward(attn) ########################################################################### # Set up acquisition models ########################################################################### print("Setting up acquisition models...") if not use_gpu: acq_models = num_ms * [pet.AcquisitionModelUsingRayTracingMatrix()] else: acq_models = num_ms * [pet.AcquisitionModelUsingNiftyPET()] for acq_model in acq_models: acq_model.set_use_truncation(True) acq_model.set_cuda_verbosity(verbosity) # If present, create ASM from ECAT8 normalisation data asm_norm = None if norm_file: asm_norm = pet.AcquisitionSensitivityModel(norm_file) # Loop over each motion state for ind in range(num_ms): # Create attn ASM if necessary asm_attn = None if resampled_attns: asm_attn = get_asm_attn(sinos[ind], resampled_attns[i], acq_models[ind]) # Get ASM dependent on attn and/or norm asm = None if asm_norm and asm_attn: if ind == 0: print("ASM contains norm and attenuation...") asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn) elif asm_norm: if ind == 0: print("ASM contains norm...") asm = asm_norm elif asm_attn: if ind == 0: print("ASM contains attenuation...") asm = asm_attn if asm: acq_models[ind].set_acquisition_sensitivity(asm) if len(rands) > 0: acq_models[ind].set_background_term(rands[ind]) # Set up acq_models[ind].set_up(sinos[ind], image) ########################################################################### # Set up reconstructor ########################################################################### print("Setting up reconstructor...") # Create composition operators containing acquisition models and resamplers C = [ CompositionOperator(am, res, preallocate=True) for am, res in zip(*(acq_models, resamplers)) ] # Configure the PDHG algorithm if args['--normK'] and not args['--onlyNormK']: normK = float(args['--normK']) else: kl = [KullbackLeibler(b=sino, eta=(sino * 0 + 1e-5)) for sino in sinos] f = BlockFunction(*kl) K = BlockOperator(*C) # Calculate normK print("Calculating norm of the block operator...") normK = K.norm(iterations=10) print("Norm of the BlockOperator ", normK) if args['--onlyNormK']: exit(0) # Optionally rescale sinograms and BlockOperator using normK scale_factor = 1. / normK if args['--normaliseDataAndBlock'] else 1.0 kl = [ KullbackLeibler(b=sino * scale_factor, eta=(sino * 0 + 1e-5)) for sino in sinos ] f = BlockFunction(*kl) K = BlockOperator(*C) * scale_factor # If preconditioned if precond: def get_nonzero_recip(data): """Get the reciprocal of a datacontainer. Voxels where input == 0 will have their reciprocal set to 1 (instead of infinity)""" inv_np = data.as_array() inv_np[inv_np == 0] = 1 inv_np = 1. / inv_np data.fill(inv_np) tau = K.adjoint(K.range_geometry().allocate(1)) get_nonzero_recip(tau) tmp_sigma = K.direct(K.domain_geometry().allocate(1)) sigma = 0. * tmp_sigma get_nonzero_recip(sigma[0]) def precond_proximal(self, x, tau, out=None): """Modify proximal method to work with preconditioned tau""" pars = { 'algorithm': FGP_TV, 'input': np.asarray(x.as_array() / tau.as_array(), dtype=np.float32), 'regularization_parameter': self.lambdaReg, 'number_of_iterations': self.iterationsTV, 'tolerance_constant': self.tolerance, 'methodTV': self.methodTV, 'nonneg': self.nonnegativity, 'printingOut': self.printing } res, info = regularisers.FGP_TV(pars['input'], pars['regularization_parameter'], pars['number_of_iterations'], pars['tolerance_constant'], pars['methodTV'], pars['nonneg'], self.device) if out is not None: out.fill(res) else: out = x.copy() out.fill(res) out *= tau return out FGP_TV.proximal = precond_proximal print("Will run proximal with preconditioned tau...") # If not preconditioned else: sigma = float(args['--sigma']) # If we need to calculate default tau if args['--tau']: tau = float(args['--tau']) else: tau = 1 / (sigma * normK**2) if regularisation == 'none': G = IndicatorBox(lower=0) elif regularisation == 'FGP_TV': r_iterations = float(args['--reg_iters']) r_tolerance = 1e-7 r_iso = 0 r_nonneg = 1 r_printing = 0 device = 'gpu' if use_gpu else 'cpu' G = FGP_TV(r_alpha, r_iterations, r_tolerance, r_iso, r_nonneg, r_printing, device) else: raise error("Unknown regularisation") if precond: def PDHG_new_update(self): """Modify the PDHG update to allow preconditioning""" # save previous iteration self.x_old.fill(self.x) self.y_old.fill(self.y) # Gradient ascent for the dual variable self.operator.direct(self.xbar, out=self.y_tmp) self.y_tmp *= self.sigma self.y_tmp += self.y_old self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y) # Gradient descent for the primal variable self.operator.adjoint(self.y, out=self.x_tmp) self.x_tmp *= -1 * self.tau self.x_tmp += self.x_old self.g.proximal(self.x_tmp, self.tau, out=self.x) # Update self.x.subtract(self.x_old, out=self.xbar) self.xbar *= self.theta self.xbar += self.x PDHG.update = PDHG_new_update # Get filename outp_file = outp_prefix if descriptive_fname: if len(attn_files) > 0: outp_file += "_wAC" if norm_file: outp_file += "_wNorm" if use_gpu: outp_file += "_wGPU" outp_file += "_Reg-" + regularisation if regularisation == 'FGP_TV': outp_file += "-alpha" + str(r_alpha) outp_file += "-riters" + str(r_iterations) if args['--normK']: outp_file += '_userNormK' + str(normK) else: outp_file += '_calcNormK' + str(normK) if args['--normaliseDataAndBlock']: outp_file += '_wDataScale' else: outp_file += '_noDataScale' if not precond: outp_file += "_sigma" + str(sigma) outp_file += "_tau" + str(tau) else: outp_file += "_wPrecond" outp_file += "_nGates" + str(len(sino_files)) if resamplers is None: outp_file += "_noMotion" pdhg = PDHG(f=f, g=G, operator=K, sigma=sigma, tau=tau, max_iteration=num_iters, update_objective_interval=update_obj_fn_interval, x_init=image, log_file=outp_file + ".log") def callback_save(iteration, objective_value, solution): """Callback function to save images""" if (iteration + 1) % save_interval == 0: out = solution if not nifti else reg.NiftiImageData(solution) out.write(outp_file + "_iters" + str(iteration + 1)) pdhg.run(iterations=num_iters, callback=callback_save, verbose=True, very_verbose=True) if visualisations: # show reconstructed image out = pdhg.get_output() out_arr = out.as_array() z = out_arr.shape[0] // 2 show_2D_array('Reconstructed image', out.as_array()[z, :, :]) pylab.show()
if os.path.exists(working_folder): shutil.rmtree(working_folder) if not os.path.exists(working_folder): os.makedirs(working_folder, mode=0o770) # change the current working directory to the given path os.chdir(working_folder) # input files list_file = data_path + list_file norm_file = data_path + norm_file print('LM data: {}'.format(list_file)) print('Norm data: {}'.format(norm_file)) print('mu-Map: {}'.format(attn_file)) attn_image = Pet.ImageData(attn_file) # output filename prefixes sino_file = 'sino' #%% Create folders for results path_sino = working_folder + '/sino/' path_rando = working_folder + '/rando/' path_NAC = working_folder + '/recon/NAC/' path_smooth = working_folder + '/recon/SMOOTH/' path_tm = working_folder + '/tm/' path_mu = working_folder + '/mu/' path_AC = working_folder + '/recon/AC/' path_moco = working_folder + '/moco/'
tm_nacs = [0] * num_motion_steps # transform new TM matrices into PET space and save as file and in list n = 0 for item in num_tm: tm_epi = numpy.loadtxt(path_EPI + sorted_alphanumeric(os.listdir(path_EPI))[item]) tm_nac = tm_epi #tm_inv * tm_epi * tm_fwd numpy.savetxt(path_tm + 'tm_' + str(item), tm_nac) tm_nacs[n] = tm_nac n += 1 #%% resample mu-Map into correct space and transform via invers tm tprint('Start Resampling mu-Maps') attn_image = Pet.ImageData(attn_file) template_image = template_acq_data.create_uniform_image(1.0) i = 0 for num in num_tm: print('Begin resampling mu-Maps: {}'.format(path_EPI + 'tm_epi_' + str(num) + '.txt')) # read matrix and calculate invers matrix = numpy.loadtxt(path_EPI + 'tm_epi_' + str(num) + '.txt') matrix2 = numpy.linalg.inv(matrix) # create affine transformation from numpy array tm = Reg.AffineTransformation(matrix2) resampler = Reg.NiftyResample()
image_shape = input_image.as_array().shape for i in range(random.randint(2, 10)): shape = PET.EllipticCylinder() shape.set_length(1) shape.set_radii((random.uniform(1, image_shape[1] / 8), random.uniform(1, image_shape[2] / 8))) radii = shape.get_radii() shape.set_origin((0, random.uniform(-(image_shape[1] / 4) + radii[1], image_shape[1] / 4 - radii[1]), random.uniform(-(image_shape[2] / 4) + radii[0], image_shape[2] / 4 - radii[0]))) input_image.add_shape(shape, scale=random.uniform(0, 1)) input_image = add_noise(input_image) input_image = blur_image(input_image, 1) return input_image if __name__ == "__main__": image = generate_image(PET.ImageData("blank_image.hv")) plt.imshow(image.as_array()[0, :, :]) plt.show()
seconds = 600 data_path = '/home/edo/scratch/code/PETMR/install/share/sirf/NEMA' os.chdir(os.path.abspath(data_path)) acq_data = pet.AcquisitionData('NEMA_sino_0-{}s.hs'.format(seconds)) # fix a problem with the header which doesn't allow # to do algebra with randoms and sinogram # rand_arr = pet.AcquisitionData('{}/sino_randoms_f1g1d0b0.hs'.format(data_path)).as_array() rand_arr = pet.AcquisitionData('NEMA_randoms_0-{}s.hs'.format(seconds)) rand = acq_data * 0 rand.fill(rand_arr) image = acq_data.create_uniform_image(1., (127, 220, 220)) image.initialise(dim=(127, 220, 220), vsize=(2.03125, 1.7080754, 1.7080754)) attns = pet.ImageData('mu_map.hv') asm_norm = pet.AcquisitionSensitivityModel('norm.n.hdr') def get_asm_attn(sino, attn, acq_model): """Get attn ASM from sino, attn image and acq model.""" asm_attn = pet.AcquisitionSensitivityModel(attn, acq_model) # temporary fix pending attenuation offset fix in STIR: # converting attenuation into 'bin efficiency' asm_attn.set_up(sino) bin_eff = pet.AcquisitionData(sino) bin_eff.fill(1.0) asm_attn.unnormalise(bin_eff) asm_attn = pet.AcquisitionSensitivityModel(bin_eff) return asm_attn