def test_pipeline_warpping(self): """ Method to test the xml description to pipeline on the fly warpping. """ pipeline = get_process_instance("capsul.utils.test.xml_pipeline.xml") self.assertTrue(isinstance(pipeline, Pipeline)) for node_name in ["", "p1", "p2"]: self.assertTrue(node_name in pipeline.nodes)
def __init__(self, process, iterative_parameters): super(ProcessIteration, self).__init__() self.process = get_process_instance(process) self.regular_parameters = set() self.iterative_parameters = set(iterative_parameters) # Check that all iterative parameters are valid process parameters user_traits = self.process.user_traits() for parameter in self.iterative_parameters: if parameter not in user_traits: raise ValueError('Cannot iterate on parameter %s ' 'that is not a parameter of process %s' % (parameter, self.process.id)) # Create iterative process parameters by copying process parameter # and changing iterative parameters to list for name, trait in user_traits.iteritems(): if name in iterative_parameters: self.add_trait(name, List(trait, output=trait.output, optional=trait.optional)) else: self.regular_parameters.add(name) self.add_trait(name, trait) # copy initial value of the underlying process to self # Note: should be this be done via a links system ? setattr(self, name, getattr(self.process, name))
def test_simple_run(self): """ Method to test a simple 1 cpu call with the scheduler. """ # Configure the environment study_config = StudyConfig( modules=[], use_smart_caching=True, output_directory=self.outdir, number_of_cpus=1, generate_logging=True, use_scheduler=True) # Create pipeline pipeline = get_process_instance(self.pipeline_name) pipeline.date_in_filename = True # Set pipeline input parameters dicom_dataset = get_sample_data("dicom") dcmfolder = os.path.join(self.outdir, "dicom") if not os.path.isdir(dcmfolder): os.makedirs(dcmfolder) shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm")) pipeline.source_dir = dcmfolder # View pipeline if 0: from capsul.qt_gui.widgets import PipelineDevelopperView from PySide import QtGui app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline) view1.show() app.exec_() # Execute the pipeline in the configured study study_config.run(pipeline)
def pilot_gdti_estimation(): """ Generalized diffusion tensor estimation ======================================= """ # System import import os import sys import datetime import PySide.QtGui as QtGui # CAPSUL import from capsul.qt_gui.widgets import PipelineDevelopperView from capsul.study_config.study_config import StudyConfig from capsul.process.loader import get_process_instance """ Study configuration ------------------- We first define the working directory and guarantee this folder exists on the file system: """ working_dir = "/volatile/nsap/clindmri/gdti" if not os.path.isdir(working_dir): os.makedirs(working_dir) """ And then define the study configuration (here we activate the smart caching module that will be able to remember which process has already been processed): """ study_config = StudyConfig(modules=["SmartCachingConfig"], use_smart_caching=True, output_directory=working_dir) # Create pipeline start_time = datetime.datetime.now() print "Start Pipeline Creation", start_time pipeline = get_process_instance("clindmri.estimation.gdti.xml") print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # View pipeline if 0: app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline) view1.show() app.exec_() del view1 # Set pipeline input parameters pipeline.dfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.nii.gz" pipeline.bvalfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bval" pipeline.bvecfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bvec" pipeline.order = 2 pipeline.odf = False print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # Execute the pipeline in the configured study study_config.run(pipeline, verbose=1)
def test_process_warpping(self): """ Method to test the function to process on the fly warpping. """ process = get_process_instance( "capsul.process.test.test_load_from_description.to_warp_func") self.assertTrue(isinstance(process, Process)) for input_name in ["parameter1", "parameter2", "parameter3"]: self.assertTrue(input_name in process.traits(output=False)) for output_name in ["output1", "output2"]: self.assertTrue(output_name in process.traits(output=True)) process() self.assertEqual(process.output1, 1) self.assertEqual(process.output2, "done")
modules=["MatlabConfig", "SPMConfig", "NipypeConfig", "FSLConfig", "FreeSurferConfig", "SmartCachingConfig"], matlab_exec="/neurospin/local/bin/matlab", spm_directory="/i2bm/local/spm8-6313", use_matlab=True, use_spm=True, use_nipype=True, use_smart_caching=True, output_directory="/volatile/nsap/catalogue/spm_first_level/") print "Done in {0} seconds".format(datetime.datetime.now() - start_time) # Create pipeline start_time = datetime.datetime.now() print "Start Pipeline Creation", start_time pipeline = get_process_instance( "caps.nsap.functional_statistic.pipeline.spm_first_level_pipeline.xml") print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # Set pipeline input parameters start_time = datetime.datetime.now() print "Start Parametrization", start_time localizer_dataset = get_sample_data("localizer") pipeline.behavioral_data = [localizer_dataset.onsets] pipeline.fmri_sessions = [localizer_dataset.preproc_fmri] pipeline.time_repetition = localizer_dataset.TR pipeline.realignment_parameters = localizer_dataset.mouvment_parameters pipeline.condition_name = "Conditions" pipeline.onset_name = "Onsets" pipeline.duration_name = "Durations" pipeline.delimiter = ";"
def pilot_qa_fmri(): """ Imports ------- This code needs 'capsul' and 'mmutils' package in order to instanciate and execute the pipeline and to get a toy dataset. These packages are available in the 'neurospin' source list or in pypi. """ # Capsul import from capsul.study_config.study_config import StudyConfig from capsul.process.loader import get_process_instance # Mmutils import from mmutils.toy_datasets import get_sample_data """ Parameters ---------- The 'pipeline_name' parameter contains the location of the pipeline XML description that will perform the DICOMs conversion, and the 'outdir' the location of the pipeline's results: in this case a temporary directory. """ pipeline_name = "mmqa.fmri.fmri_quality_assurance_bbox.xml" outdir = tempfile.mkdtemp() """ Capsul configuration -------------------- A 'StudyConfig' has to be instantiated in order to execute the pipeline properly. It enables us to define the results directory through the 'output_directory' attribute, the number of CPUs to be used through the 'number_of_cpus' attributes, and to specify that we want a log of the processing step through the 'generate_logging'. The 'use_scheduler' must be set to True if more than 1 CPU is used. """ study_config = StudyConfig(number_of_cpus=1, generate_logging=True, use_scheduler=True, output_directory=outdir) """ Get the toy dataset ------------------- The toy dataset is composed of a functional image that is downloaded if it is necessary throught the 'get_sample_data' function and exported locally. """ localizer_dataset = get_sample_data("localizer_extra") """ Pipeline definition ------------------- The pipeline XML description is first imported throught the 'get_process_instance' method, and the resulting pipeline instance is parametrized: in this example we decided to set the date in the converted file name and we set two DICOM directories to be converted in Nifti format. """ pipeline = get_process_instance(pipeline_name) pipeline.image_file = localizer_dataset.fmri pipeline.repetition_time = 2.0 pipeline.exclude_volume = [] pipeline.roi_size = 21 pipeline.score_file = os.path.join(outdir, "scores.json") """ Pipeline representation ----------------------- By executing this block of code, a pipeline representation can be displayed. This representation is composed of boxes connected to each other. """ if 0: from capsul.qt_gui.widgets import PipelineDevelopperView from PySide import QtGui app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline) view1.show() app.exec_() """ Pipeline execution ------------------ Finally the pipeline is eecuted in the defined 'study_config'. """ study_config.run(pipeline) """ Access the result ----------------- Display the computed scores """ scores_file = pipeline.scores_file with open(scores_file, "r") as _file: scores = json.load(_file) for key, value in scores.iteritems(): print "{0} = {1}".format(key, value)
def pilot_qa_fmri(): """ Imports ------- This code needs 'capsul' and 'mmutils' package in order to instanciate and execute the pipeline and to get a toy dataset. These packages are available in the 'neurospin' source list or in pypi. """ # Capsul import from capsul.study_config.study_config import StudyConfig from capsul.process.loader import get_process_instance # Mmutils import from mmutils.toy_datasets import get_sample_data """ Parameters ---------- The 'pipeline_name' parameter contains the location of the pipeline XML description that will perform the DICOMs conversion, and the 'outdir' the location of the pipeline's results: in this case a temporary directory. """ pipeline_name = "mmqa.fmri.fmri_quality_assurance_bbox.xml" outdir = tempfile.mkdtemp() """ Capsul configuration -------------------- A 'StudyConfig' has to be instantiated in order to execute the pipeline properly. It enables us to define the results directory through the 'output_directory' attribute, the number of CPUs to be used through the 'number_of_cpus' attributes, and to specify that we want a log of the processing step through the 'generate_logging'. The 'use_scheduler' must be set to True if more than 1 CPU is used. """ study_config = StudyConfig( number_of_cpus=1, generate_logging=True, use_scheduler=True, output_directory=outdir) """ Get the toy dataset ------------------- The toy dataset is composed of a functional image that is downloaded if it is necessary throught the 'get_sample_data' function and exported locally. """ localizer_dataset = get_sample_data("localizer_extra") """ Pipeline definition ------------------- The pipeline XML description is first imported throught the 'get_process_instance' method, and the resulting pipeline instance is parametrized: in this example we decided to set the date in the converted file name and we set two DICOM directories to be converted in Nifti format. """ pipeline = get_process_instance(pipeline_name) pipeline.image_file = localizer_dataset.fmri pipeline.repetition_time = 2.0 pipeline.exclude_volume = [] pipeline.roi_size = 21 pipeline.score_file = os.path.join(outdir, "scores.json") """ Pipeline representation ----------------------- By executing this block of code, a pipeline representation can be displayed. This representation is composed of boxes connected to each other. """ if 0: from capsul.qt_gui.widgets import PipelineDevelopperView from PySide import QtGui app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline) view1.show() app.exec_() """ Pipeline execution ------------------ Finally the pipeline is eecuted in the defined 'study_config'. """ study_config.run(pipeline) """ Access the result ----------------- Display the computed scores """ scores_file = pipeline.scores_file with open(scores_file, "r") as _file: scores = json.load(_file) for key, value in scores.iteritems(): print "{0} = {1}".format(key, value)
# Configure the environment start_time = datetime.datetime.now() print "Start Configuration", start_time study_config = StudyConfig( modules=["SmartCachingConfig"], use_smart_caching=True, output_directory="/volatile/nsap/catalogue/quality_assurance/") print "Done in {0} seconds".format(datetime.datetime.now() - start_time) # Create pipeline start_time = datetime.datetime.now() print "Start Pipeline Creation", start_time pipeline = get_process_instance("mmqa.fmri.fmri_quality_assurance.xml") print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # Set pipeline input parameters start_time = datetime.datetime.now() print "Start Parametrization", start_time localizer_dataset = get_sample_data("localizer") pipeline.image_file = localizer_dataset.fmri pipeline.repetition_time = localizer_dataset.TR print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # View pipeline app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline)
study_config = StudyConfig( modules=["SmartCachingConfig"], # matlab_exec="/neurospin/local/bin/matlab", # spm_directory="/i2bm/local/spm8-6313", # use_matlab=True, # use_spm=True, # use_nipype=True, use_smart_caching=False, output_directory="/volatile/nsap/frouin/my_caps_bank") print "Done in {0} seconds".format(datetime.datetime.now() - start_time) # Create pipeline start_time = datetime.datetime.now() print "Start Pipeline Creation", start_time pipeline = get_process_instance("my_caps_bank.baby.two_nodes_pipeline.xml") print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # View pipeline #app = QtGui.QApplication(sys.argv) #view1 = PipelineDevelopperView(pipeline) #view1.show() #app.exec_() #del view1 # Set pipeline input parameters #start_time = datetime.datetime.now() #print "Start Parametrization", start_time pipeline.thefilein = '/tmp/clone/tuyaux/data/toto.txt.gz' #pipeline.offset = -1000
def pilot_dcm2nii(): """ Imports ------- This code needs 'capsul' and 'mmutils' package in order to instanciate and execute the pipeline and to get a toy dataset. These packages are available in the 'neurospin' source list or in pypi. """ import os import sys import shutil import tempfile from capsul.study_config.study_config import StudyConfig from capsul.process.loader import get_process_instance from mmutils.toy_datasets import get_sample_data """ Parameters ---------- The 'pipeline_name' parameter contains the location of the pipeline XML description that will perform the DICOMs conversion, and the 'outdir' the location of the pipeline's results: in this case a temporary directory. """ pipeline_name = "dcmio.dcmconverter.dcm_to_nii.xml" outdir = tempfile.mkdtemp() """ Capsul configuration -------------------- A 'StudyConfig' has to be instantiated in order to execute the pipeline properly. It enables us to define the results directory through the 'output_directory' attribute, the number of CPUs to be used through the 'number_of_cpus' attributes, and to specify that we want a log of the processing step through the 'generate_logging'. The 'use_scheduler' must be set to True if more than 1 CPU is used. """ study_config = StudyConfig( modules=[], output_directory=outdir, number_of_cpus=1, generate_logging=True, use_scheduler=True) """ Get the toy dataset ------------------- The toy dataset is composed of a 3D heart dicom image that is downloaded if it is necessary throught the 'get_sample_data' function and exported locally in a 'heart.dcm' file. """ dicom_dataset = get_sample_data("dicom") dcmfolder = os.path.join(outdir, "dicom") if not os.path.isdir(dcmfolder): os.makedirs(dcmfolder) shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm")) """ Pipeline definition ------------------- The pipeline XML description is first imported throught the 'get_process_instance' method, and the resulting pipeline instance is parametrized: in this example we decided to set the date in the converted file name and we set two DICOM directories to be converted in Nifti format. """ pipeline = get_process_instance(pipeline_name) pipeline.date_in_filename = True pipeline.dicom_directories = [dcmfolder, dcmfolder] pipeline.additional_informations = [[("Provided by", "Neurospin@2015")], [("Provided by", "Neurospin@2015"), ("TR", "1500")]] pipeline.dcm_tags = [("TR", [("0x0018", "0x0080")]), ("TE", [("0x0018", "0x0081")])] """ Pipeline representation ----------------------- By executing this block of code, a pipeline representation can be displayed. This representation is composed of boxes connected to each other. """ if 0: from capsul.qt_gui.widgets import PipelineDevelopperView from PySide import QtGui app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline) view1.show() app.exec_() """ Pipeline execution ------------------ Finally the pipeline is eecuted in the defined 'study_config'. """ study_config.run(pipeline) """ Access the result ----------------- The 'nibabel' package is used to load the generated images. We display the numpy array shape and the stored repetiton and echo times: in order to load the 'descrip' image field we use the 'json' package. """ import json import copy import nibabel generated_images = pipeline.filled_converted_files for fnames in generated_images: print(">>>", fnames, "...") im = nibabel.load(fnames[0]) print("shape=", im.get_data().shape) header = im.get_header() a = str(header["descrip"]) a = a.strip() description = json.loads(copy.deepcopy(a)) print("TE=", description["TE"]) print("TR=", description["TR"]) print("Provided by=", description["Provided by"])
def pilot_dcm2nii(): """ Imports ------- This code needs 'capsul' and 'mmutils' package in order to instanciate and execute the pipeline and to get a toy dataset. These packages are available in the 'neurospin' source list or in pypi. """ import os import sys import shutil import tempfile from capsul.study_config.study_config import StudyConfig from capsul.process.loader import get_process_instance from mmutils.toy_datasets import get_sample_data """ Parameters ---------- The 'pipeline_name' parameter contains the location of the pipeline XML description that will perform the DICOMs conversion, and the 'outdir' the location of the pipeline's results: in this case a temporary directory. """ pipeline_name = "dcmio.dcmconverter.dcm_to_nii.xml" outdir = tempfile.mkdtemp() """ Capsul configuration -------------------- A 'StudyConfig' has to be instantiated in order to execute the pipeline properly. It enables us to define the results directory through the 'output_directory' attribute, the number of CPUs to be used through the 'number_of_cpus' attributes, and to specify that we want a log of the processing step through the 'generate_logging'. The 'use_scheduler' must be set to True if more than 1 CPU is used. """ study_config = StudyConfig(modules=[], output_directory=outdir, number_of_cpus=1, generate_logging=True, use_scheduler=True) """ Get the toy dataset ------------------- The toy dataset is composed of a 3D heart dicom image that is downloaded if it is necessary throught the 'get_sample_data' function and exported locally in a 'heart.dcm' file. """ dicom_dataset = get_sample_data("dicom") dcmfolder = os.path.join(outdir, "dicom") if not os.path.isdir(dcmfolder): os.makedirs(dcmfolder) shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm")) """ Pipeline definition ------------------- The pipeline XML description is first imported throught the 'get_process_instance' method, and the resulting pipeline instance is parametrized: in this example we decided to set the date in the converted file name and we set two DICOM directories to be converted in Nifti format. """ pipeline = get_process_instance(pipeline_name) pipeline.date_in_filename = True pipeline.dicom_directories = [dcmfolder, dcmfolder] pipeline.additional_informations = [[("Provided by", "Neurospin@2015")], [("Provided by", "Neurospin@2015"), ("TR", "1500")]] pipeline.dcm_tags = [("TR", [("0x0018", "0x0080")]), ("TE", [("0x0018", "0x0081")])] """ Pipeline representation ----------------------- By executing this block of code, a pipeline representation can be displayed. This representation is composed of boxes connected to each other. """ if 0: from capsul.qt_gui.widgets import PipelineDevelopperView from PySide import QtGui app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline) view1.show() app.exec_() """ Pipeline execution ------------------ Finally the pipeline is eecuted in the defined 'study_config'. """ study_config.run(pipeline) """ Access the result ----------------- The 'nibabel' package is used to load the generated images. We display the numpy array shape and the stored repetiton and echo times: in order to load the 'descrip' image field we use the 'json' package. """ import json import copy import nibabel generated_images = pipeline.filled_converted_files for fnames in generated_images: print(">>>", fnames, "...") im = nibabel.load(fnames[0]) print("shape=", im.get_data().shape) header = im.get_header() a = str(header["descrip"]) a = a.strip() description = json.loads(copy.deepcopy(a)) print("TE=", description["TE"]) print("TR=", description["TR"]) print("Provided by=", description["Provided by"])
def pilot_gdti_estimation(): """ Generalized diffusion tensor estimation ======================================= """ # System import import os import sys import datetime import PySide.QtGui as QtGui # CAPSUL import from capsul.qt_gui.widgets import PipelineDevelopperView from capsul.study_config.study_config import StudyConfig from capsul.process.loader import get_process_instance """ Study configuration ------------------- We first define the working directory and guarantee this folder exists on the file system: """ working_dir = "/volatile/nsap/clindmri/gdti" if not os.path.isdir(working_dir): os.makedirs(working_dir) """ And then define the study configuration (here we activate the smart caching module that will be able to remember which process has already been processed): """ study_config = StudyConfig( modules=["SmartCachingConfig"], use_smart_caching=True, output_directory=working_dir) # Create pipeline start_time = datetime.datetime.now() print "Start Pipeline Creation", start_time pipeline = get_process_instance("clindmri.estimation.gdti.xml") print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # View pipeline if 0: app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline) view1.show() app.exec_() del view1 # Set pipeline input parameters pipeline.dfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.nii.gz" pipeline.bvalfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bval" pipeline.bvecfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bvec" pipeline.order = 2 pipeline.odf = False print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # Execute the pipeline in the configured study study_config.run(pipeline, verbose=1)
def pilot_fsl_preproc(): """ FSL preprocessings ================== """ # System import import os import sys import datetime import PySide.QtGui as QtGui # CAPSUL import from capsul.qt_gui.widgets import PipelineDevelopperView from capsul.study_config.study_config import StudyConfig from capsul.process.loader import get_process_instance """ Study configuration ------------------- We first define the working directory and guarantee this folder exists on the file system: """ working_dir = "/volatile/nsap/clindmri/fslpreproc" if not os.path.isdir(working_dir): os.makedirs(working_dir) """ And then define the study configuration (here we activate the smart caching module that will be able to remember which process has already been processed): """ study_config = StudyConfig( modules=["SmartCachingConfig", "FSLConfig", "MatlabConfig", "SPMConfig", "NipypeConfig"], use_smart_caching=True, fsl_config="/etc/fsl/4.1/fsl.sh", use_fsl=True, output_directory=working_dir) # Create pipeline start_time = datetime.datetime.now() print "Start Pipeline Creation", start_time pipeline = get_process_instance("clindmri.preproc.fsl_preproc.xml") print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) # View pipeline if 0: app = QtGui.QApplication(sys.argv) view1 = PipelineDevelopperView(pipeline) view1.show() app.exec_() del view1 # Set pipeline input parameters pipeline.dfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.nii.gz" pipeline.bvalfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bval" pipeline.bvecfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bvec" print "Done in {0} seconds.".format(datetime.datetime.now() - start_time) #print pipeline.nodes["eddy"].process._nipype_interface.inputs print pipeline.nodes["eddy"].process._nipype_interface.cmdline # Execute the pipeline in the configured study study_config.run(pipeline, verbose=1)