示例#1
0
def main():

    # direct all engine's messages to files
    msg_red = MessageRedirector('info.txt', 'warn.txt', 'errr.txt')

    # select acquisition data storage scheme
    AcquisitionData.set_storage_scheme(storage)

    # create listmode-to-sinograms converter object
    lm2sino = ListmodeToSinograms()

    # set input, output and template files
    lm2sino.set_input(list_file)
    lm2sino.set_output_prefix(sino_file)
    lm2sino.set_template(tmpl_file)

    # set interval
    lm2sino.set_time_interval(interval[0], interval[1])

    # set flags such that we only get the delayed coincidences
    lm2sino.flag_on('store_delayeds')
    lm2sino.flag_off('store_prompts')

    # set up the converter
    lm2sino.set_up()

    # convert
    lm2sino.process()

    # get access to the sinograms
    delayeds_acq_data = lm2sino.get_output()

    # estimate the randoms from the delayeds via Maximum Likelihood estimation
    # This will take at least a few seconds
    randoms_estimate_acq_data = lm2sino.estimate_randoms()

    # copy the acquisition data into Python arrays
    delayeds_acq_array = delayeds_acq_data.as_array()
    randoms_estimate_acq_array = randoms_estimate_acq_data.as_array()
    acq_dim = delayeds_acq_array.shape
    print('acquisition data dimensions: %dx%dx%dx%d' % acq_dim)
    print(
        'The total number of delayed coincidences and estimated randoms have to be very similar.'
    )
    print('Let us check this:')
    print('total delayeds: %.1f, total estimated randoms: %.1f' %
          (delayeds_acq_array.sum(), randoms_estimate_acq_array.sum()))
    print(
        'Max values should be somewhat similar, but this depends on statistics of course.'
    )
    print('max delayeds: %f, max estimated randoms: %f' %
          (delayeds_acq_array.max(), randoms_estimate_acq_array.max()))

    print('A single sinogram (this will look very different for noisy data)')
    z = acq_dim[1] // 2
    if show_plot:
        show_3D_array(np.stack((delayeds_acq_array[0, z, :, :],
                                randoms_estimate_acq_array[0, z, :, :])),
                      titles=('raw delayeds', ' estimated randoms'))
        pylab.show()
示例#2
0
def main():

    # MR
    # specify the MR raw data source
    input_data = MR.AcquisitionData(input_file)
    # pre-process acquisitions
    processed_data = MR.preprocess_acquisition_data(input_data)
    # perform reconstruction
    recon = MR.FullySampledReconstructor()
    recon.set_input(processed_data)
    recon.process()
    complex_image = recon.get_output()

    # PET
    # convert MR image into PET image
    image_arr = abs(complex_image.as_array())  # image as Python array
    image = PET.ImageData()  # empty PET ImageData object
    image.initialise(image_arr.shape[::-1])  # set image shape
    image.fill(image_arr)  # fill image with values
    # apply filter that zeroes the image outside a cylinder of the same
    # diameter as the image xy-section size
    filter = PET.TruncateToCylinderProcessor()
    filter.set_input(image)
    filter.process()
    processed_image = filter.get_output()
    # shortcuts for the above 3 lines
    # image is intact
    ##    processed_image = filter.process(image)
    # image is modified
    ##    filter.apply(image)
    # display image
    pUtil.show_3D_array(image.as_array(), \
                        suptitle = 'MR Image', label = 'slice', show = False)
    pUtil.show_3D_array(processed_image.as_array(), \
                        suptitle = 'PET Processed Image', label = 'slice')
示例#3
0
# set a portion of bin efficiencies to zero;
bin_efficiencies_array = bin_efficiencies.as_array()
bin_efficiencies_array[0, :, 5:20, :] = 0
bin_efficiencies.fill(bin_efficiencies_array)
#%% Create a new acquisition model
am2 = pet.AcquisitionModelUsingRayTracingMatrix()
am2.set_num_tangential_LORs(5)
am2.set_up(templ, image)
# now include the bin efficiencies in our acquisition model
asm = pet.AcquisitionSensitivityModel(bin_efficiencies)
am2.set_acquisition_sensitivity(asm)
am2.set_up(templ, image)
#%% forward project the image again with this acquisition model and display
acquired_data = am2.forward(image)
acquisition_array = acquired_data.as_array()
show_3D_array(acquisition_array[0, :, :, :])

#%% Let us reconstruct this data with the original acquisition model (without bin efficiencies)
obj_fun.set_acquisition_data(acquired_data)
obj_fun.set_acquisition_model(am)
reconstructed_image.fill(1)
recon.set_up(reconstructed_image)
recon.set_num_subiterations(10)
recon.reconstruct(reconstructed_image)
#%% display
# we fix the max for the colour scale related to the true max
cmax = image.as_array().max() * 1.2
reconstructed_array = reconstructed_image.as_array()
plt.figure()
imshow(reconstructed_array[slice_num, :, :, ], [0, cmax],
       'reconstructed image with original acquisition model')
示例#4
0
def main():

    # locate the k-space raw data file
    input_file = existing_filepath(data_path, data_file)

    # acquisition data will be read from an HDF file input_file
    acq_data = AcquisitionData(input_file)
    print('---\n acquisition data norm: %e' % acq_data.norm())

    # pre-process acquisition data
    print('---\n pre-processing acquisition data...')
    processed_data = preprocess_acquisition_data(acq_data)
    print('---\n processed acquisition data norm: %e' % processed_data.norm())

    # perform reconstruction to obtain a meaningful ImageData object
    # (cannot be obtained in any other way at present)
    recon = FullySampledReconstructor()
    recon.set_input(processed_data)
    recon.process()
    complex_images = recon.get_output()
    print('---\n reconstructed images norm: %e' % complex_images.norm())

    for i in range(complex_images.number()):
        complex_image = complex_images.image(i)
        print('--- image %d' % i)
        for p in [ \
            'version', 'flags', 'data_type', 'channels', \
            'slice', 'repetition', \
            'image_type', 'image_index', 'image_series_index' \
            ]:
            form = p + ' %d'
            print(form % complex_image.info(p))
        print('matrix size:'),
        print(complex_image.matrix_size())
        print('patient_table_position:'),
        print(complex_image.patient_table_position())

    ind = complex_images.get_info('image_index')
    print('image indices:')
    print(ind)
    ptp = complex_images.get_info('patient_table_position')
    print('patient table positions:')
    print(ptp)

    # sort processed acquisition data;
    # sorting currently performed with respect to (in this order):
    #    - repetition
    #    - slice
    #    - kspace encode step 1
    print('---\n sorting acquisition data...')
    processed_data.sort()

    # compute coil sensitivity maps
    print('---\n computing coil sensitivity maps...')
    csms = CoilSensitivityData()
    csms.calculate(processed_data)
    # alternatively, coil sensitivity maps can be computed from
    # CoilImageData - see coil_sensitivity_maps.py

    # create acquisition model based on the acquisition parameters
    # stored in processed_data and image parameters stored in complex_images
    ##    acq_model = AcquisitionModel(processed_data, complex_images)
    acq_model = AcquisitionModel()
    acq_model.set_up(processed_data, complex_images)
    acq_model.set_coil_sensitivity_maps(csms)

    # use the acquisition model (forward projection) to produce simulated
    # acquisition data
    simulated_acq_data = acq_model.forward(complex_images)
    print('---\n reconstructed images forward projection norm %e'\
          % simulated_acq_data.norm())
    if output_file is not None:
        simulated_acq_data.write(output_file)

    # get simulated acquisition data as a Python ndarray
    simulated_acq_array = simulated_acq_data.as_array()
    # display simulated acquisition data
    #    simulated_acq_array = numpy.transpose(simulated_acq_array,(1,2,0))
    simulated_acq_array = numpy.transpose(simulated_acq_array, (1, 0, 2))
    title = 'Simulated acquisition data (magnitude)'
    show_3D_array(simulated_acq_array, power = 0.2, suptitle = title, \
                  xlabel = 'samples', ylabel = 'readouts', label = 'coil')

    # backproject simulated acquisition data
    backprojected_data = acq_model.backward(simulated_acq_data)
    # show backprojected data
    backprojected_array = backprojected_data.as_array()
    title = 'Backprojected data (magnitude)'
    show_3D_array(abs(backprojected_array), suptitle = title, \
                  xlabel = 'samples', ylabel = 'readouts', label = 'slice')
#%% create a shape
shape = pet.EllipticCylinder()
# define its size (in mm)
shape.set_length(50)
shape.set_radii((30, 40))
# centre of shape in (x,y,z) coordinates where (0,0,0) is centre of first plane
shape.set_origin((60, -30, 20))

#%% add the shape to the image
# first set the image values to 0
image.fill(0)
image.add_shape(shape, scale=1)

#%% add same shape at different location and with different intensity
shape.set_origin((-60, -30, 40))
image.add_shape(shape, scale=0.75)

#%% show the phantom image as a sequence of transverse images
show_3D_array(image.as_array())

#%% forward project this image and display all sinograms
acquired_data = am.forward(image)
acquisition_array = acquired_data.as_array()
show_3D_array(acquisition_array)
#%% Show every 8th view
# Doing this here with a complicated one-liner...
show_3D_array(
    acquisition_array[:, range(0, acquisition_array.shape[1], 8), :].transpose(
        1, 0, 2))
# You could now of course try the animation of the previous demo...
示例#6
0
def main():

    # locate the k-space raw data file
    input_file = existing_filepath(data_path, data_file)

    # acquisition data will be read from an HDF file input_file
    acq_data = AcquisitionData(input_file)
    print('---\n acquisition data norm: %e' % acq_data.norm())

    # pre-process acquisition data
    print('---\n pre-processing acquisition data...')
    processed_data = preprocess_acquisition_data(acq_data)
    print('---\n processed acquisition data norm: %e' % processed_data.norm())

    # perform reconstruction to obtain a meaningful ImageData object
    # (cannot be obtained in any other way at present)
    recon = FullySampledReconstructor()
    recon.set_input(processed_data)
    recon.process()
    complex_images = recon.get_output()
    print('---\n reconstructed images norm: %e' % complex_images.norm())

    # sort processed acquisition data;
    # sorting currently performed with respect to (in this order):
    #    - repetition
    #    - slice
    #    - kspace encode step 1
    print('---\n sorting acquisition data...')
    processed_data.sort()

    # compute coil sensitivity maps
    print('---\n computing coil sensitivity maps...')
    csms = CoilSensitivityData()
    csms.calculate(processed_data)
    # alternatively, coil sensitivity maps can be computed from
    # CoilImageData - see coil_sensitivity_maps.py

    # create acquisition model based on the acquisition parameters
    # stored in processed_data and image parameters stored in complex_images
    acq_model = AcquisitionModel(processed_data, complex_images)
    acq_model.set_coil_sensitivity_maps(csms)

    # use the acquisition model (forward projection) to produce simulated
    # acquisition data
    simulated_acq_data = acq_model.forward(complex_images)
    print('---\n reconstructed images forward projection norm %e'\
          % simulated_acq_data.norm())

    # get simulated acquisition data as a Python ndarray
    simulated_acq_array = simulated_acq_data.as_array()
    # display simulated acquisition data
    #    simulated_acq_array = numpy.transpose(simulated_acq_array,(1,2,0))
    simulated_acq_array = numpy.transpose(simulated_acq_array, (1, 0, 2))
    title = 'Simulated acquisition data (magnitude)'
    show_3D_array(simulated_acq_array, power = 0.2, suptitle = title, \
                  xlabel = 'samples', ylabel = 'readouts', label = 'coil')

    # backproject simulated acquisition data
    backprojected_data = acq_model.backward(simulated_acq_data)
    # show backprojected data
    backprojected_array = backprojected_data.as_array()
    title = 'Backprojected data (magnitude)'
    show_3D_array(abs(backprojected_array), suptitle = title, \
                  xlabel = 'samples', ylabel = 'readouts', label = 'slice')
示例#7
0
def main():

    # Acquisitions will be read from this HDF file
    input_file = existing_filepath(data_path, data_file)

    # Initially we create a container that points to the h5 file. Data is
    # not read from file until the gadgetron is called using
    # the 'process' method.

    # Create an acquisition container of type pGadgetron.AcquisitionData
    print('---\n reading in file %s...' % input_file)
    acq_data = AcquisitionData(input_file)

    # Pre-process this input data using three preparation gadgets
    # from gadgetron.
    # List gadgets to use (not all may be required for this test data).
    prep_gadgets = ['NoiseAdjustGadget', 'AsymmetricEchoAdjustROGadget', \
                                'RemoveROOversamplingGadget' ]

    # Call gadgetron by using the 'process' method. This runs the gadgets
    # specified in prep_gadgets, returning an instance
    # of an mGadgetron.AcquisitionsContainer
    preprocessed_data = acq_data.process(prep_gadgets)

    # Extract sorted k-space, permute dimensions and display
    acq_array = preprocessed_data.as_array(0)
    [ns, nc, nro] = preprocessed_data.dimensions()  # [nx ncoil ny]
    acq_array = numpy.transpose(acq_array, (1, 0, 2))
    title = 'Acquisition data (magnitude)'
    show_3D_array(acq_array, power = 0.2, \
                  suptitle = title, title_size = 16, \
                  xlabel = 'samples', ylabel = 'readouts', label = 'coil')

    # Perform reconstruction of the preprocessed data.

    # 1) Create a recon object for the desired reconstruction.

    # In this demo, the recon object is created using the class
    # Reconstructor(). A simpler class is available in the SIRF code
    # for a GRAPPA reconstruction:
    #   recon = CartesianGRAPPAReconstructor()

    recon_gadgets = [
        'AcquisitionAccumulateTriggerGadget', 'BucketToBufferGadget',
        'GenericReconCartesianReferencePrepGadget',
        'GRAPPA:GenericReconCartesianGrappaGadget',
        'GenericReconFieldOfViewAdjustmentGadget',
        'GenericReconImageArrayScalingGadget', 'ImageArraySplitGadget'
    ]

    recon = Reconstructor(recon_gadgets)

    # 2) The GRAPPA gadget can compute G-factors in addition to
    # reconstructed images. We can set a gadget property as below if the gadget
    # has been identified with a label. In the above list of recon_gadgets,
    # the 4th is labelled 'GRAPPA' and we can use this label as below:
    recon.set_gadget_property('GRAPPA', 'send_out_gfactor', True)

    # If the chain had been set using
    # recon = CartesianGRAPPAReconstructor(), an alternative method
    # would be available:
    #  recon.compute_gfactors(True)

    # 3) set the reconstruction input to be the data we just preprocessed.
    recon.set_input(preprocessed_data)

    # 4) Run the reconstruction using 'process' to call gadgetron.
    print('---\n reconstructing...\n')
    recon.process()

    # Output

    # Reconstructed data sits in memory. We need to first get data
    # for both the reconstructed images and g-factors, before extracting the
    # data as Python arrays.

    # Get image and gfactor data as objects of type mGadgetron.ImageData
    # (Note this syntax may change in the future with the addition of a
    #  method '.get_gfactor'.)
    image_data = recon.get_output('image')
    gfact_data = recon.get_output('gfactor')

    # Return as Python matrices the data pointed to by the containers.
    # Note the image data is complex.
    image_as_3D_array = image_data.as_array()
    maxv = numpy.amax(abs(image_as_3D_array))
    title = 'Reconstructed image data (magnitude)'
    show_3D_array(abs(image_as_3D_array), \
                  suptitle = title, title_size = 16, \
                  xlabel = 'samples', ylabel = 'readouts', label = 'slice', \
                  scale = (0, maxv))

    gfactor_as_3D_array = gfact_data.as_array()
    maxv = numpy.amax(abs(gfactor_as_3D_array))
    title = 'G-factor data (magnitude)'
    show_3D_array(abs(gfactor_as_3D_array),
                  suptitle = title, title_size = 16, \
                  xlabel = 'samples', ylabel = 'readouts', label = 'slice', \
                  scale = (0, maxv))
示例#8
0
# set a portion of bin efficiencies to zero;
bin_efficiencies_array = bin_efficiencies.as_array()
bin_efficiencies_array[:, 5:20, :] = 0
bin_efficiencies.fill(bin_efficiencies_array)
#%% Create a new acquisition model
am2 = pet.AcquisitionModelUsingRayTracingMatrix()
am2.set_num_tangential_LORs(5)
am2.set_up(templ, image)
# now include the bin efficiencies in our acquisition model
asm = pet.AcquisitionSensitivityModel(bin_efficiencies)
am2.set_acquisition_sensitivity(asm)
am2.set_up(templ, image)
#%% forward project the image again with this acquisition model and display
acquired_data = am2.forward(image)
acquisition_array = acquired_data.as_array()
show_3D_array(acquisition_array)

#%% Let us reconstruct this data with the original acquisition model (without bin efficiencies)
obj_fun.set_acquisition_data(acquired_data)
obj_fun.set_acquisition_model(am)
reconstructed_image.fill(1)
recon.set_up(reconstructed_image)
recon.set_num_subiterations(10)
recon.reconstruct(reconstructed_image)
#%% display
# we fix the max for the colour scale related to the true max
cmax = image.as_array().max() * 1.2
reconstructed_array = reconstructed_image.as_array()
plt.figure()
imshow(reconstructed_array[slice, :, :, ], [0, cmax],
       'reconstructed image with original acquisition model')