Пример #1
0
    def setUp(self):
        data_path = os.path.join(examples_data_path('PET'),
                                 'thorax_single_slice')

        image = pet.ImageData(os.path.join(data_path, 'emission.hv'))

        am = pet.AcquisitionModelUsingRayTracingMatrix()
        am.set_num_tangential_LORs(5)
        templ = pet.AcquisitionData(
            os.path.join(data_path, 'template_sinogram.hs'))
        am.set_up(templ, image)
        acquired_data = am.forward(image)

        obj_fun = pet.make_Poisson_loglikelihood(acquired_data)
        obj_fun.set_acquisition_model(am)
        obj_fun.set_up(image)

        self.obj_fun = obj_fun
        self.image = image
Пример #2
0
    def setUp(self):

        os.chdir(examples_data_path('PET'))
        #%% copy files to working folder and change directory to where the output files are
        shutil.rmtree('working_folder/thorax_single_slice',True)
        shutil.copytree('thorax_single_slice','working_folder/thorax_single_slice')
        os.chdir('working_folder/thorax_single_slice')


        image = pet.ImageData('emission.hv')

        am = pet.AcquisitionModelUsingRayTracingMatrix()
        am.set_num_tangential_LORs(5)
        templ = pet.AcquisitionData('template_sinogram.hs')
        am.set_up(templ,image)
        acquired_data=am.forward(image)

        obj_fun = pet.make_Poisson_loglikelihood(acquired_data)
        obj_fun.set_acquisition_model(am)
        obj_fun.set_up(image)

        self.obj_fun = obj_fun
        self.image = image
Пример #3
0
def test_main(rec=False, verb=False, throw=True):

    # Set STIR verbosity to off
    original_verb = pet.get_verbosity()
    pet.set_verbosity(1)

    time.sleep(0.5)
    sys.stderr.write("Testing NiftyPET projector...")
    time.sleep(0.5)

    # Get image
    image = get_image()

    # Get AM
    try:
        acq_model = pet.AcquisitionModelUsingNiftyPET()
    except:
        return 1, 1
    acq_model.set_cuda_verbosity(verb)

    data_path = examples_data_path('PET')
    # raw_data_path = pet.existing_filepath(os.path.join(data_path, 'mMR'), 'mMR_template_span11.hs')
    raw_data_path = os.path.join(data_path, 'mMR')
    template_acq_data = pet.AcquisitionData(
        os.path.join(raw_data_path, 'mMR_template_span11.hs'))

    acq_model.set_up(template_acq_data, image)

    # Test operator adjointness
    if verb:
        print('testing adjointness')
    if not is_operator_adjoint(acq_model, num_tests=1, verbose=True):
        raise AssertionError('NiftyPet AcquisitionModel is not adjoint')

    # Generate test data
    simulated_acq_data = acq_model.forward(image)
    simulated_acq_data_w_noise = add_noise(simulated_acq_data, 10)

    obj_fun = pet.make_Poisson_loglikelihood(template_acq_data)
    obj_fun.set_acquisition_model(acq_model)

    recon = pet.OSMAPOSLReconstructor()
    recon.set_objective_function(obj_fun)
    recon.set_num_subsets(1)
    recon.set_num_subiterations(1)
    recon.set_input(simulated_acq_data_w_noise)
    if verb:
        print('setting up, please wait...')
    initial_estimate = image.get_uniform_copy()
    recon.set_up(initial_estimate)

    if verb:
        print('reconstructing...')
    recon.set_current_estimate(initial_estimate)
    recon.process()
    reconstructed_im = recon.get_output()
    if not reconstructed_im:
        raise AssertionError()

    # Reset original verbose-ness
    pet.set_verbosity(original_verb)

    return 0, 1
#%% NAC reconstruction

tprint('Start NAC Recon')

for i, sino, random in zip(range(len(path_sino)),
                           sorted_alphanumeric(list_sino),
                           sorted_alphanumeric(list_rando)):

    sino_pet = Pet.AcquisitionData(path_sino + sino)
    print(sino)
    randoms_pet = Pet.AcquisitionData(path_rando + random)
    print(random)

    # reconstruct the data (without mu-map)
    obj_fun = Pet.make_Poisson_loglikelihood(sino_pet)
    acq_model.set_background_term(randoms_pet)
    recon.set_objective_function(obj_fun)
    initial_image = sino_pet.create_uniform_image(1.0)
    image = initial_image
    recon.set_up(image)

    recon.set_current_estimate(image)
    recon.process()

    # save recon images
    recon_image = recon.get_output()
    recon_image.write(path_NAC + 'NAC_' + str(i))

    # save Image as .nii
    recon_image = Reg.NiftiImageData(recon.get_output())
for i in range(len(time_intervals)-1):
    print('Begin reconstruction: Frame {}'.format(i))

    # listmode-to-sinogram
    lm2sino.set_time_interval(time_intervals[i], time_intervals[i+1])
    lm2sino.set_up()
    lm2sino.process()
    acq_data = lm2sino.get_output()
    acq_data.write(working_folder + '/sino/sino'+str(i))

    # randoms estimate
    randoms = lm2sino.estimate_randoms()
    randoms.write(working_folder + '/rando/rando'+str(i))

    # reconstruct the data (includes all)
    obj_fun = Pet.make_Poisson_loglikelihood(acq_data)
    asm_attn.set_up(acq_data)
    attn_factors = Pet.AcquisitionData(acq_data)
    attn_factors.fill(1.0)
    asm_attn.unnormalise(attn_factors)
    asm_attn = Pet.AcquisitionSensitivityModel(attn_factors)
    asm = Pet.AcquisitionSensitivityModel(asm_norm, asm_attn)
    acq_model.set_acquisition_sensitivity(asm)
    acq_model.set_background_term(randoms)
    obj_fun.set_acquisition_model(acq_model)
    recon.set_objective_function(obj_fun)
    initial_image = acq_data.create_uniform_image(1.0)
    image = initial_image
    recon.set_up(image)

    recon.set_current_estimate(image)