def setUp(self): self.image1 = pet.ImageData( os.path.join(examples_data_path('PET'), 'thorax_single_slice', 'emission.hv')) self.image2 = pet.ImageData( os.path.join(examples_data_path('PET'), 'thorax_single_slice', 'emission.hv'))
def setUp(self): image1 = reg.ImageData(os.path.join( examples_data_path('Registration'),'test2.nii.gz') ) image2 = reg.ImageData(os.path.join( examples_data_path('Registration'),'test2.nii.gz') ) self.image1 = image1 self.image2 = image2
def setUp(self): image1 = mr.AcquisitionData( os.path.join(examples_data_path('MR'), 'simulated_MR_2D_cartesian.h5')) image2 = mr.AcquisitionData( os.path.join(examples_data_path('MR'), 'simulated_MR_2D_cartesian.h5')) image1.sort() image2.sort() self.image1 = image1 self.image2 = image2
def setUp(self): if os.path.exists( os.path.join(examples_data_path('PET'), 'mMR', 'mMR_template_span11_small.hs')): template = pet.AcquisitionData( os.path.join(examples_data_path('PET'), 'mMR', 'mMR_template_span11_small.hs')) self.image1 = template.get_uniform_copy(0) self.image2 = template.get_uniform_copy(0) # assert False self.set_storage_scheme()
def setUp(self): path = os.path.join(examples_data_path('PET'), 'thorax_single_slice', 'template_sinogram.hs') if os.path.exists(path): template = pet.AcquisitionData(path) self.image1 = template.get_uniform_copy(0) self.image2 = template.get_uniform_copy(0) # assert False self.set_storage_scheme()
def setUp(self): if has_sirf: os.chdir(examples_data_path('PET')) # Copy files to a working folder and change directory to where these files are. # We do this to avoid cluttering your SIRF files. This way, you can delete # working_folder and start from scratch. shutil.rmtree('working_folder/brain', True) shutil.copytree('brain', 'working_folder/brain') os.chdir('working_folder/brain') self.cwd = os.getcwd()
def setUp(self): acq_data = mr.AcquisitionData( os.path.join(examples_data_path('MR'), 'simulated_MR_2D_cartesian.h5')) # ad1.sort() preprocessed_data = mr.preprocess_acquisition_data(acq_data) recon = mr.FullySampledReconstructor() recon.set_input(preprocessed_data) recon.process() image_data = recon.get_output() self.image1 = image_data self.image2 = image_data * 1
def setUp(self): data_path = os.path.join(examples_data_path('PET'), 'thorax_single_slice') image = pet.ImageData(os.path.join(data_path, 'emission.hv')) am = pet.AcquisitionModelUsingRayTracingMatrix() am.set_num_tangential_LORs(5) templ = pet.AcquisitionData( os.path.join(data_path, 'template_sinogram.hs')) am.set_up(templ, image) acquired_data = am.forward(image) obj_fun = pet.make_Poisson_loglikelihood(acquired_data) obj_fun.set_acquisition_model(am) obj_fun.set_up(image) self.obj_fun = obj_fun self.image = image
def setUp(self): os.chdir(examples_data_path('PET')) #%% copy files to working folder and change directory to where the output files are shutil.rmtree('working_folder/thorax_single_slice',True) shutil.copytree('thorax_single_slice','working_folder/thorax_single_slice') os.chdir('working_folder/thorax_single_slice') image = pet.ImageData('emission.hv') am = pet.AcquisitionModelUsingRayTracingMatrix() am.set_num_tangential_LORs(5) templ = pet.AcquisitionData('template_sinogram.hs') am.set_up(templ,image) acquired_data=am.forward(image) obj_fun = pet.make_Poisson_loglikelihood(acquired_data) obj_fun.set_acquisition_model(am) obj_fun.set_up(image) self.obj_fun = obj_fun self.image = image
def test_main(rec=False, verb=False, throw=True): # Set STIR verbosity to off original_verb = pet.get_verbosity() pet.set_verbosity(1) time.sleep(0.5) sys.stderr.write("Testing NiftyPET projector...") time.sleep(0.5) # Get image image = get_image() # Get AM try: acq_model = pet.AcquisitionModelUsingNiftyPET() except: return 1, 1 acq_model.set_cuda_verbosity(verb) data_path = examples_data_path('PET') # raw_data_path = pet.existing_filepath(os.path.join(data_path, 'mMR'), 'mMR_template_span11.hs') raw_data_path = os.path.join(data_path, 'mMR') template_acq_data = pet.AcquisitionData( os.path.join(raw_data_path, 'mMR_template_span11.hs')) acq_model.set_up(template_acq_data, image) # Test operator adjointness if verb: print('testing adjointness') if not is_operator_adjoint(acq_model, num_tests=1, verbose=True): raise AssertionError('NiftyPet AcquisitionModel is not adjoint') # Generate test data simulated_acq_data = acq_model.forward(image) simulated_acq_data_w_noise = add_noise(simulated_acq_data, 10) obj_fun = pet.make_Poisson_loglikelihood(template_acq_data) obj_fun.set_acquisition_model(acq_model) recon = pet.OSMAPOSLReconstructor() recon.set_objective_function(obj_fun) recon.set_num_subsets(1) recon.set_num_subiterations(1) recon.set_input(simulated_acq_data_w_noise) if verb: print('setting up, please wait...') initial_estimate = image.get_uniform_copy() recon.set_up(initial_estimate) if verb: print('reconstructing...') recon.set_current_estimate(initial_estimate) recon.process() reconstructed_im = recon.get_output() if not reconstructed_im: raise AssertionError() # Reset original verbose-ness pet.set_verbosity(original_verb) return 0, 1
__version__ = '0.1.0' from docopt import docopt args = docopt(__doc__, version=__version__) import time # import SIRF utilities from sirf.Utilities import examples_data_path, existing_filepath, error # import MR engine types from sirf.Gadgetron import AcquisitionData, Reconstructor # process command-line options data_file = args['--file'] data_path = args['--path'] if data_path is None: data_path = examples_data_path('MR') output_file = args['--output'] def main(): # locate the input data input_file = existing_filepath(data_path, data_file) acq_data = AcquisitionData(input_file) # create reconstruction object # Rather than using a predefined image reconstruction object, here a new # image reconstruction object is created by concatinating multiple gadgets # (for more information on Gadgetron and its gadgets please see: # https://github.com/gadgetron/.). # Parameters for individual gadgets can be defined either during the # creation of the reconstruction object:
# import engine module import sirf.Reg from sirf.Utilities import examples_data_path exec('import p' + args['--eng_ref'] + ' as eng_ref') exec('import p' + args['--eng_flo'] + ' as eng_flo') # process command-line options ref_file = args['--ref'] flo_file = args['--flo'] algo = args['--algo'] pad = args['--pad'] # if using the default for any, need to get the examples folder if (ref_file or flo_file) is None: examples_path = examples_data_path('Registration') # reference if ref_file is None: ref_file = examples_path + "/test.nii.gz" # floating if flo_file is None: flo_file = examples_path + "/test2.nii.gz" # parse the transformations trans_filenames_str = args['--trans_filenames'] trans_types_str = args['--trans_types'] trans_filenames = list() trans_types = list() if trans_filenames_str:
def test_main(rec=False, verb=False, throw=True): data_file = 'simulated_MR_2D_cartesian.h5' data_path = examples_data_path('MR') output_file = None type_to_save = 'all' show_plot = False algorithm = 'SimpleReconGadget' # locate the input data input_file = existing_filepath(data_path, data_file) acq_data = AcquisitionData(input_file) if algorithm == 'SimpleReconGadget': extra_gadgets = [algorithm] else: extra_gadgets = [algorithm, 'GenericReconFieldOfViewAdjustmentGadget'] # create reconstruction object # Rather than using a predefined image reconstruction object, here a new # image reconstruction object is created by concatinating multiple gadgets # (for more information on Gadgetron and its gadgets please see: # https://github.com/gadgetron/.). # Parameters for individual gadgets can be defined either during the # creation of the reconstruction object: # e.g. AcquisitionAccumulateTriggerGadget(trigger_dimension=repetition) # or by giving a gadget a label (cf. label ex: for the last gadget) # and using set_gadget_property(label, propery, value). # The gadgets will be concatenated and will be executed as soon as # process() is called. recon_gadgets = ['NoiseAdjustGadget', 'AsymmetricEchoAdjustROGadget', 'RemoveROOversamplingGadget', 'AcquisitionAccumulateTriggerGadget(trigger_dimension=repetition)', 'BucketToBufferGadget(split_slices=true, verbose=false)'] \ + extra_gadgets + \ ['ImageArraySplitGadget', 'ex:ExtractGadget' ] recon = Reconstructor(recon_gadgets) # ExtractGadget defines which type of image should be returned: # none 0 # magnitude 1 # real 2 # imag 4 # phase 8 # in this example '5' returns both magnitude and imaginary part ## recon.set_gadget_property('ex', 'extract_mask', 5) # === THE ABOVE IS OBSOLETE, NOW SHOULD USE ===> if type_to_save=='mag' or type_to_save=='all': recon.set_gadget_property('ex', 'extract_magnitude', True) if type_to_save=='imag' or type_to_save=='all': recon.set_gadget_property('ex', 'extract_imag', True) # provide raw k-space data as input recon.set_input(acq_data) # optionally set Gadgetron server host and port recon.set_host('localhost') # On VM you can try a port other than the default 9002, e.g. 9003, by taking # the following steps: # 1) in ~/devel/install/share/gadgetron/config/gadgetron.xml replace # <port>9002</port> with <port>9003</port> # 2) go to Settings->Network->Advanced->Port Forwarding and add new rule # (click on green + in the upper right corner) with Host and Guest ports # set to 9003 # 3) uncomment the next line #recon.set_port('9003') # Note: each gadget chain can run on a different VM - to try, start two VMs # and do the above steps 1 and 2 on one of them, then add # recon.set_port('9003') before recon.process in grappa_detail.py # (where preprocessing will still run on default port 9002). # perform reconstruction recon.process() # retrieve reconstructed image data image_data = recon.get_output() # show reconstructed image data if show_plot: for im in range(image_data.number()): image = image_data.image(im) # image types series # magnitude 1 0 # phase 2 3000 # real 3 1000 # imag 4 2000 im_type = image.image_type() im_series = image.image_series_index() print('image: %d, type: %d, series: %d' % (im, im_type, im_series)) image_data.show(title = 'Images magnitude and imaginary part') if output_file is not None: filename = output_file i = filename.find('.') if i < 0: ext = 'h5' else: ext = filename[i + 1:] filename = filename[:i] print('writing to %s' % (filename + '.' + ext)) image_data.write(filename, ext=ext) return 0, 1
args = docopt(__doc__, version=__version__) def check_file_exists(filename): """Check file exists, else throw error.""" if not path.isfile(filename): raise error('File not found: %s' % filename) # process command-line options data_path = args['--path'] if data_path is None: # default to data/examples/PET/mMR # Note: seem to need / even on Windows #data_path = os.path.join(examples_data_path('PET'), 'mMR') data_path = examples_data_path('PET') + '/mMR' print('Finding files in %s' % data_path) # Sinogram. if sino not found, get the one in the example data sino_file = existing_filepath(data_path, args['--sino']) # Attenuation - image attn_im_file = existing_filepath(data_path, args['--attn']) # Norm - ECAT8 norm_e8_file = existing_filepath(data_path, args['--norm']) # Attn transformation trans = args['--trans'] if trans: check_file_exists(trans)
import numpy as np from random import randint from sirf.Utilities import error from docopt import docopt __version__ = '0.1.0' args = docopt(__doc__, version=__version__) # path data_path = args['--path'] # PET sino if args['--PET']: template_PET_raw_path = args['--PET'] else: template_PET_raw_path = os.path.join(examples_data_path('PET'), 'mMR/mMR_template_single_slice.hs') # MR sino if args['--MR']: template_MR_raw_path = args['--MR'] else: template_MR_raw_path = os.path.join(examples_data_path('MR'), 'grappa2_1rep.h5') # Num motion states num_ms = int(args['--num_ms']) def get_resampler_from_tm(tm, image): """returns a NiftyResampler object for the specified transform matrix and image"""
def tearDown(self): os.chdir(examples_data_path('PET')) #%% copy files to working folder and change directory to where the output files are shutil.rmtree('working_folder/thorax_single_slice',True)