Example #1
0
def spm_tissue_probability_maps(fsl_dir="/usr/share/fsl/4.1",
                                spm_dir="/i2bm/local/spm8/"):
    """ SPM tissue probability maps.

    
    <process capsul_xml="2.0">
      <input name="fsl_dir" type="string" doc="the fsl repository"/>
      <input name="spm_dir" type="string" doc="the spm repository"/>
      <return name="tpm_struct" type="list_any" doc="a struct containing the spm tissue probability map descriptions."/>
    </process>
    
    """
    # Try to import the resource
    try:
        from mmutils.toy_datasets import get_sample_data
    except:
        raise ImportError("Can't import 'caps'.")

    if "spm8" in spm_dir:
        tmp_file = get_sample_data("tpm", fsl_dir=fsl_dir, spm_dir=spm_dir).all
    else:
        tmp_file = os.path.join(spm_dir, "tpm", "TPM.nii")

    # Format the tpm for spm
    tissue1 = ((tmp_file, 1), 2, (True, True), (False, True))
    tissue2 = ((tmp_file, 2), 2, (True, True), (False, True))
    tissue3 = ((tmp_file, 3), 2, (True, False), (False, False))
    tissue4 = ((tmp_file, 4), 3, (False, False), (False, False))
    tissue5 = ((tmp_file, 5), 4, (False, False), (False, False))
    tissue6 = ((tmp_file, 6), 2, (False, False), (False, False))

    tpm_struct = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6]
    return tpm_struct
Example #2
0
def spm_tissue_probability_maps(fsl_dir="/usr/share/fsl/4.1",
                                spm_dir="/i2bm/local/spm8/"):
    """ SPM tissue probability maps.

    
    <process capsul_xml="2.0">
      <input name="fsl_dir" type="string" doc="the fsl repository"/>
      <input name="spm_dir" type="string" doc="the spm repository"/>
      <return name="tpm_struct" type="list_any" doc="a struct containing the spm tissue probability map descriptions."/>
    </process>
    
    """
    # Try to import the resource
    try:
        from mmutils.toy_datasets import get_sample_data
    except:
        raise ImportError("Can't import 'caps'.")

    if "spm8" in spm_dir:
        tmp_file = get_sample_data("tpm", fsl_dir=fsl_dir, spm_dir=spm_dir).all
    else:
        tmp_file = os.path.join(spm_dir, "tpm", "TPM.nii")

    # Format the tpm for spm
    tissue1 = ((tmp_file, 1), 2, (True, True), (False, True))
    tissue2 = ((tmp_file, 2), 2, (True, True), (False, True))
    tissue3 = ((tmp_file, 3), 2, (True, False), (False, False))
    tissue4 = ((tmp_file, 4), 3, (False, False), (False, False))
    tissue5 = ((tmp_file, 5), 4, (False, False), (False, False))
    tissue6 = ((tmp_file, 6), 2, (False, False), (False, False))

    tpm_struct = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6]
    return tpm_struct
Example #3
0
    def test_simple_run(self):
        """ Method to test a simple 1 cpu call with the scheduler.
        """
        # Configure the environment
        study_config = StudyConfig(
            modules=[],
            use_smart_caching=True,
            output_directory=self.outdir,
            number_of_cpus=1,
            generate_logging=True,
            use_scheduler=True)

        # Create pipeline
        pipeline = get_process_instance(self.pipeline_name)
        pipeline.date_in_filename = True

        # Set pipeline input parameters
        dicom_dataset = get_sample_data("dicom")
        dcmfolder = os.path.join(self.outdir, "dicom")
        if not os.path.isdir(dcmfolder):
            os.makedirs(dcmfolder)
        shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm"))
        pipeline.source_dir = dcmfolder

        # View pipeline
        if 0:
            from capsul.qt_gui.widgets import PipelineDevelopperView
            from PySide import QtGui
            app = QtGui.QApplication(sys.argv)
            view1 = PipelineDevelopperView(pipeline)
            view1.show()
            app.exec_()

        # Execute the pipeline in the configured study
        study_config.run(pipeline)
Example #4
0
def spm_tissue_probability_maps():
    """ SPM tissue probability maps.

    <unit>
        <output name="tpm_struct" type="List" content="Any" desc="a struct
            containing the spm tissue probability map descriptions."/>
    </unit>
    """
    # Try to import the resource
    try:
        from mmutils.toy_datasets import get_sample_data
    except:
        raise ImportError("Can't import 'caps'.")
    tmp_file = get_sample_data("tpm").all

    # Format the tpm for spm
    tissue1 = ((tmp_file, 1), 2, (True, True), (False, True))
    tissue2 = ((tmp_file, 2), 2, (True, True), (False, True))
    tissue3 = ((tmp_file, 3), 2, (True, False), (False, False))
    tissue4 = ((tmp_file, 4), 3, (False, False), (False, False))
    tissue5 = ((tmp_file, 5), 4, (False, False), (False, False))

    tpm_struct = [tissue1, tissue2, tissue3, tissue4, tissue5]
    return tpm_struct
Example #5
0
def spm_tissue_probability_maps():
    """ SPM tissue probability maps.

    <unit>
        <output name="tpm_struct" type="List" content="Any" desc="a struct
            containing the spm tissue probability map descriptions."/>
    </unit>
    """
    # Try to import the resource
    try:
        from mmutils.toy_datasets import get_sample_data
    except:
        raise ImportError("Can't import 'caps'.")
    tmp_file = get_sample_data("tpm").all

    # Format the tpm for spm
    tissue1 = ((tmp_file, 1), 2, (True, True), (False, True))
    tissue2 = ((tmp_file, 2), 2, (True, True), (False, True))
    tissue3 = ((tmp_file, 3), 2, (True, False), (False, False))
    tissue4 = ((tmp_file, 4), 3, (False, False), (False, False))
    tissue5 = ((tmp_file, 5), 4, (False, False), (False, False))

    tpm_struct = [tissue1, tissue2, tissue3, tissue4, tissue5]
    return tpm_struct
Example #6
0
def pilot_bet(enable_display=False):
    """ 
    BET
    ===

    Brain extraction with FSL. 

    Start to import required modules:
    """
    import os
    from mmutils.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.process import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/catalogue/pclinfmri/fsl_bet"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    And then define the study configuration:
    """
    study_config = StudyConfig(
        modules=["MatlabConfig", "SPMConfig", "FSLConfig", "NipypeConfig"],
        use_smart_caching=False,
        fsl_config="/etc/fsl/4.1/fsl.sh",
        use_fsl=True,
        output_directory=working_dir,
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * fmri: the functional volume.
        * anat: the structural volume.
        * TR: the repetition time.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <clinfmri.preproc.FslBet>` that
    define the different step of the processings:
    """
    pipeline = get_process_instance("clinfmri.utils.fsl_bet.xml")
    print pipeline.get_input_spec()

    """
    It is possible to display the pipeline.
    """
    if enable_display:
        import sys
        from PySide import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView

        app = QtGui.QApplication(sys.argv)
        view = PipelineDevelopperView(pipeline)
        view.show()
        app.exec_()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.input_image_file = toy_dataset.anat
    pipeline.generate_binary_mask = True
    pipeline.bet_threshold = 0.5

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=False, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Example #7
0
def pilot_new_segment(enable_display=False):
    """ 
    New Segment
    ===========

    Unifed SPM segmentation: segments, bias corrects and spatially normalises. 

    Start to import required modules:
    """
    import os
    from mmutils.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.process import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/catalogue/pclinfmri/spm_newsegment"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    And then define the study configuration:
    """
    study_config = StudyConfig(
        modules=["MatlabConfig", "SPMConfig", "FSLConfig", "NipypeConfig"],
        use_smart_caching=False,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=True,
        spm_directory="/i2bm/local/spm8",
        use_spm=True,
        output_directory=working_dir,
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")
    template_dataset = get_sample_data("mni_1mm")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * fmri: the functional volume.
        * anat: the structural volume.
        * TR: the repetition time.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <clinfmri.utils.SpmNewSegment>`
    that define the different step of the processings:
    """
    pipeline = get_process_instance("clinfmri.utils.spm_new_segment.xml")
    print pipeline.get_input_spec()

    """
    It is possible to display the pipeline.
    """
    if enable_display:
        import sys
        from PySide import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView

        app = QtGui.QApplication(sys.argv)
        view = PipelineDevelopperView(pipeline)
        view.show()
        app.exec_()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.channel_files = [toy_dataset.mean]
    pipeline.reference_volume = template_dataset.brain

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=False, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Example #8
0
def pilot_qa_fmri():
    """
    Imports
    -------

    This code needs 'capsul' and 'mmutils' package in order to instanciate and
    execute the pipeline and to get a toy dataset.
    These packages are available in the 'neurospin' source list or in pypi.
    """
    # Capsul import
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance

    # Mmutils import
    from mmutils.toy_datasets import get_sample_data
    """
    Parameters
    ----------

    The 'pipeline_name' parameter contains the location of the pipeline XML
    description that will perform the DICOMs conversion, and the 'outdir' the
    location of the pipeline's results: in this case a temporary directory.
    """

    pipeline_name = "mmqa.fmri.fmri_quality_assurance_bbox.xml"
    outdir = tempfile.mkdtemp()
    """
    Capsul configuration
    --------------------

    A 'StudyConfig' has to be instantiated in order to execute the pipeline
    properly. It enables us to define the results directory through the
    'output_directory' attribute, the number of CPUs to be used through the
    'number_of_cpus' attributes, and to specify that we want a log of the
    processing step through the 'generate_logging'. The 'use_scheduler'
    must be set to True if more than 1 CPU is used.
    """
    study_config = StudyConfig(number_of_cpus=1,
                               generate_logging=True,
                               use_scheduler=True,
                               output_directory=outdir)
    """
    Get the toy dataset
    -------------------

    The toy dataset is composed of a functional image that is downloaded
    if it is necessary throught the 'get_sample_data' function and exported
    locally.
    """

    localizer_dataset = get_sample_data("localizer_extra")
    """
    Pipeline definition
    -------------------

    The pipeline XML description is first imported throught the
    'get_process_instance' method, and the resulting pipeline instance is
    parametrized: in this example we decided to set the date in the converted
    file name and we set two DICOM directories to be converted in Nifti
    format.
    """

    pipeline = get_process_instance(pipeline_name)
    pipeline.image_file = localizer_dataset.fmri
    pipeline.repetition_time = 2.0
    pipeline.exclude_volume = []
    pipeline.roi_size = 21
    pipeline.score_file = os.path.join(outdir, "scores.json")
    """
    Pipeline representation
    -----------------------

    By executing this block of code, a pipeline representation can be
    displayed. This representation is composed of boxes connected to each
    other.
    """
    if 0:
        from capsul.qt_gui.widgets import PipelineDevelopperView
        from PySide import QtGui
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()
    """
    Pipeline execution
    ------------------

    Finally the pipeline is eecuted in the defined 'study_config'.
    """
    study_config.run(pipeline)
    """
    Access the result
    -----------------

    Display the computed scores
    """

    scores_file = pipeline.scores_file

    with open(scores_file, "r") as _file:
        scores = json.load(_file)

    for key, value in scores.iteritems():
        print "{0} = {1}".format(key, value)
Example #9
0
def pilot_qa_fmri():
    """
    Imports
    -------

    This code needs 'capsul' and 'mmutils' package in order to instanciate and
    execute the pipeline and to get a toy dataset.
    These packages are available in the 'neurospin' source list or in pypi.
    """
    # Capsul import
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance

    # Mmutils import
    from mmutils.toy_datasets import get_sample_data

    """
    Parameters
    ----------

    The 'pipeline_name' parameter contains the location of the pipeline XML
    description that will perform the DICOMs conversion, and the 'outdir' the
    location of the pipeline's results: in this case a temporary directory.
    """

    pipeline_name = "mmqa.fmri.fmri_quality_assurance_bbox.xml"
    outdir = tempfile.mkdtemp()

    """
    Capsul configuration
    --------------------

    A 'StudyConfig' has to be instantiated in order to execute the pipeline
    properly. It enables us to define the results directory through the
    'output_directory' attribute, the number of CPUs to be used through the
    'number_of_cpus' attributes, and to specify that we want a log of the
    processing step through the 'generate_logging'. The 'use_scheduler'
    must be set to True if more than 1 CPU is used.
    """
    study_config = StudyConfig(
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True,
        output_directory=outdir)
    """
    Get the toy dataset
    -------------------

    The toy dataset is composed of a functional image that is downloaded
    if it is necessary throught the 'get_sample_data' function and exported
    locally.
    """

    localizer_dataset = get_sample_data("localizer_extra")

    """
    Pipeline definition
    -------------------

    The pipeline XML description is first imported throught the
    'get_process_instance' method, and the resulting pipeline instance is
    parametrized: in this example we decided to set the date in the converted
    file name and we set two DICOM directories to be converted in Nifti
    format.
    """

    pipeline = get_process_instance(pipeline_name)
    pipeline.image_file = localizer_dataset.fmri
    pipeline.repetition_time = 2.0
    pipeline.exclude_volume = []
    pipeline.roi_size = 21
    pipeline.score_file = os.path.join(outdir, "scores.json")

    """
    Pipeline representation
    -----------------------

    By executing this block of code, a pipeline representation can be
    displayed. This representation is composed of boxes connected to each
    other.
    """
    if 0:
        from capsul.qt_gui.widgets import PipelineDevelopperView
        from PySide import QtGui
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()

    """
    Pipeline execution
    ------------------

    Finally the pipeline is eecuted in the defined 'study_config'.
    """
    study_config.run(pipeline)

    """
    Access the result
    -----------------

    Display the computed scores
    """

    scores_file = pipeline.scores_file

    with open(scores_file, "r") as _file:
        scores = json.load(_file)

    for key, value in scores.iteritems():
        print "{0} = {1}".format(key, value)
Example #10
0
def pilot_dcm2nii():
    """
    Imports
    -------

    This code needs 'capsul' and 'mmutils' package in order to instanciate and
    execute the pipeline and to get a toy dataset.
    These packages are available in the 'neurospin' source list or in pypi.
    """
    import os
    import sys
    import shutil
    import tempfile
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance
    from mmutils.toy_datasets import get_sample_data

    """
    Parameters
    ----------

    The 'pipeline_name' parameter contains the location of the pipeline XML
    description that will perform the DICOMs conversion, and the 'outdir' the
    location of the pipeline's results: in this case a temporary directory.
    """
    pipeline_name = "dcmio.dcmconverter.dcm_to_nii.xml"
    outdir = tempfile.mkdtemp()

    """
    Capsul configuration
    --------------------

    A 'StudyConfig' has to be instantiated in order to execute the pipeline
    properly. It enables us to define the results directory through the
    'output_directory' attribute, the number of CPUs to be used through the
    'number_of_cpus' attributes, and to specify that we want a log of the
    processing step through the 'generate_logging'. The 'use_scheduler'
    must be set to True if more than 1 CPU is used.
    """
    study_config = StudyConfig(
        modules=[],
        output_directory=outdir,
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True)

    """
    Get the toy dataset
    -------------------

    The toy dataset is composed of a 3D heart dicom image that is downloaded
    if it is necessary throught the 'get_sample_data' function and exported
    locally in a 'heart.dcm' file.
    """
    dicom_dataset = get_sample_data("dicom")
    dcmfolder = os.path.join(outdir, "dicom")
    if not os.path.isdir(dcmfolder):
        os.makedirs(dcmfolder)
    shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm"))

    """
    Pipeline definition
    -------------------

    The pipeline XML description is first imported throught the
    'get_process_instance' method, and the resulting pipeline instance is
    parametrized: in this example we decided to set the date in the converted
    file name and we set two DICOM directories to be converted in Nifti
    format.
    """
    pipeline = get_process_instance(pipeline_name)
    pipeline.date_in_filename = True
    pipeline.dicom_directories = [dcmfolder, dcmfolder]
    pipeline.additional_informations = [[("Provided by", "Neurospin@2015")],
                                        [("Provided by", "Neurospin@2015"),
                                         ("TR", "1500")]]

    pipeline.dcm_tags = [("TR", [("0x0018", "0x0080")]),
                         ("TE", [("0x0018", "0x0081")])]

    """
    Pipeline representation
    -----------------------

    By executing this block of code, a pipeline representation can be
    displayed. This representation is composed of boxes connected to each
    other.
    """
    if 0:
        from capsul.qt_gui.widgets import PipelineDevelopperView
        from PySide import QtGui
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()

    """
    Pipeline execution
    ------------------

    Finally the pipeline is eecuted in the defined 'study_config'.
    """
    study_config.run(pipeline)

    """
    Access the result
    -----------------

    The 'nibabel' package is used to load the generated images. We display the
    numpy array shape and the stored repetiton and echo times: in order
    to load the 'descrip' image field we use the 'json' package.
    """
    import json
    import copy
    import nibabel

    generated_images = pipeline.filled_converted_files

    for fnames in generated_images:
        print(">>>", fnames, "...")
        im = nibabel.load(fnames[0])
        print("shape=", im.get_data().shape)
        header = im.get_header()
        a = str(header["descrip"])
        a = a.strip()
        description = json.loads(copy.deepcopy(a))
        print("TE=", description["TE"])
        print("TR=", description["TR"])
        print("Provided by=", description["Provided by"])
Example #11
0
def pilot_preproc_spm_fmri(enable_display=False):
    """
    FMRI preprocessings
    ===================

    Preprocessing with the SPM slice timing and a normalization to a given
    template.

    Start to import required modules:
    """
    import os
    from mmutils.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.api import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/catalogue/pclinfmri/fmri_preproc_spm_fmri"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    Then define the study configuration:
    """
    study_config = StudyConfig(
        modules=["MatlabConfig", "SPMConfig", "FSLConfig", "NipypeConfig"],
        use_smart_caching=False,
        fsl_config="/etc/fsl/4.1/fsl.sh",
        use_fsl=True,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=True,
        spm_directory="/i2bm/local/spm8",
        use_spm=True,
        output_directory=working_dir,
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True,)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")
    template_dataset = get_sample_data("mni_1mm")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * fmri: the functional volume.
        * anat: the structural volume.
        * TR: the repetition time.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <clinfmri.preproc.FmriPreproc>` that
    define the different step of the processings:
    """
    pipeline = get_process_instance("clinfmri.preproc.converted_fmri_preproc")
    print pipeline.get_input_spec()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.fmri_file = toy_dataset.fmri
    pipeline.structural_file = toy_dataset.anat
    pipeline.realign_register_to_mean = True
    pipeline.select_slicer = "spm"
    pipeline.select_normalization = "fmri"
    pipeline.template_file = template_dataset.brain
    pipeline.force_repetition_time = toy_dataset.TR
    pipeline.force_slice_orders = [index + 1 for index in range(40)]

    """
    It is possible to display the pipeline.
    """
    if enable_display:
        import sys
        from PySide import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView

        app = QtGui.QApplication(sys.argv)
        view = PipelineDevelopperView(pipeline)
        view.show()
        app.exec_()

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=False, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Example #12
0
def pilot_bet(enable_display=False):
    """
    Brain extractio Tool
    ====================
    """
    import os
    from mmutils.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.api import get_process_instance

    working_dir = "/volatile/nsap/catalogue/pclinfmri/fmri_bet"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    Then define the study configuration:
    """
    study_config = StudyConfig(
        modules=["MatlabConfig", "SPMConfig", "FSLConfig", "NipypeConfig"],
        use_smart_caching=False,
        fsl_config="/etc/fsl/4.1/fsl.sh",
        use_fsl=True,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=False,
        spm_directory="/i2bm/local/spm8",
        use_spm=False,
        output_directory=working_dir)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    template_dataset = get_sample_data("mni_1mm")

    """
    Processing definition
    ---------------------
    """
    pipeline = get_process_instance("clinfmri.utils.converted_fsl_bet")
    print pipeline.get_input_spec()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.input_image_file = template_dataset.brain
    pipeline.generate_binary_mask = True

    """
    It is possible to display the pipeline.
    """
    if enable_display:
        import sys
        from PySide import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView

        app = QtGui.QApplication(sys.argv)
        view = PipelineDevelopperView(pipeline)
        view.show()
        app.exec_()

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=False, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Example #13
0
def pilot_dcm2nii():
    """
    Imports
    -------

    This code needs 'capsul' and 'mmutils' package in order to instanciate and
    execute the pipeline and to get a toy dataset.
    These packages are available in the 'neurospin' source list or in pypi.
    """
    import os
    import sys
    import shutil
    import tempfile
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance
    from mmutils.toy_datasets import get_sample_data
    """
    Parameters
    ----------

    The 'pipeline_name' parameter contains the location of the pipeline XML
    description that will perform the DICOMs conversion, and the 'outdir' the
    location of the pipeline's results: in this case a temporary directory.
    """
    pipeline_name = "dcmio.dcmconverter.dcm_to_nii.xml"
    outdir = tempfile.mkdtemp()
    """
    Capsul configuration
    --------------------

    A 'StudyConfig' has to be instantiated in order to execute the pipeline
    properly. It enables us to define the results directory through the
    'output_directory' attribute, the number of CPUs to be used through the
    'number_of_cpus' attributes, and to specify that we want a log of the
    processing step through the 'generate_logging'. The 'use_scheduler'
    must be set to True if more than 1 CPU is used.
    """
    study_config = StudyConfig(modules=[],
                               output_directory=outdir,
                               number_of_cpus=1,
                               generate_logging=True,
                               use_scheduler=True)
    """
    Get the toy dataset
    -------------------

    The toy dataset is composed of a 3D heart dicom image that is downloaded
    if it is necessary throught the 'get_sample_data' function and exported
    locally in a 'heart.dcm' file.
    """
    dicom_dataset = get_sample_data("dicom")
    dcmfolder = os.path.join(outdir, "dicom")
    if not os.path.isdir(dcmfolder):
        os.makedirs(dcmfolder)
    shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm"))
    """
    Pipeline definition
    -------------------

    The pipeline XML description is first imported throught the
    'get_process_instance' method, and the resulting pipeline instance is
    parametrized: in this example we decided to set the date in the converted
    file name and we set two DICOM directories to be converted in Nifti
    format.
    """
    pipeline = get_process_instance(pipeline_name)
    pipeline.date_in_filename = True
    pipeline.dicom_directories = [dcmfolder, dcmfolder]
    pipeline.additional_informations = [[("Provided by", "Neurospin@2015")],
                                        [("Provided by", "Neurospin@2015"),
                                         ("TR", "1500")]]

    pipeline.dcm_tags = [("TR", [("0x0018", "0x0080")]),
                         ("TE", [("0x0018", "0x0081")])]
    """
    Pipeline representation
    -----------------------

    By executing this block of code, a pipeline representation can be
    displayed. This representation is composed of boxes connected to each
    other.
    """
    if 0:
        from capsul.qt_gui.widgets import PipelineDevelopperView
        from PySide import QtGui
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()
    """
    Pipeline execution
    ------------------

    Finally the pipeline is eecuted in the defined 'study_config'.
    """
    study_config.run(pipeline)
    """
    Access the result
    -----------------

    The 'nibabel' package is used to load the generated images. We display the
    numpy array shape and the stored repetiton and echo times: in order
    to load the 'descrip' image field we use the 'json' package.
    """
    import json
    import copy
    import nibabel

    generated_images = pipeline.filled_converted_files

    for fnames in generated_images:
        print(">>>", fnames, "...")
        im = nibabel.load(fnames[0])
        print("shape=", im.get_data().shape)
        header = im.get_header()
        a = str(header["descrip"])
        a = a.strip()
        description = json.loads(copy.deepcopy(a))
        print("TE=", description["TE"])
        print("TR=", description["TR"])
        print("Provided by=", description["Provided by"])