Exemple #1
0
def spm_tissue_probability_maps():
    """ SPM tissue probability maps.

    <unit>
        <output name="tpm_struct" type="List" content="Any" desc="a struct
            containing the spm tissue probability map descriptions."/>
    </unit>
    """
    # Try to import the resource
    try:
        from caps.toy_datasets import get_sample_data
    except:
        raise ImportError("Can't import 'caps'.")
    tmp_file = get_sample_data("tpm").all

    # Format the tpm for spm
    tissue1 = ((tmp_file, 1), 2, (True, True), (False, True))
    tissue2 = ((tmp_file, 2), 2, (True, True), (False, True))
    tissue3 = ((tmp_file, 3), 2, (True, False), (False, False))
    tissue4 = ((tmp_file, 4), 3, (False, False), (False, False))
    tissue5 = ((tmp_file, 5), 4, (False, False), (False, False))

    tpm_struct = [tissue1, tissue2, tissue3, tissue4, tissue5]
    return tpm_struct
Exemple #2
0
def pilot(working_dir="/volatile/nsap/caps", **kwargs):
    """
    ===============================
    Diffusion Brain Extraction Tool
    ===============================
    .. topic:: Objective

        We propose to extract the brain mask from a diffusion sequence.

    Import
    ------

    First we load the function that enables us to access the toy datasets
    """
    from caps.toy_datasets import get_sample_data

    """
    From capsul we then load the class to configure the study we want to
    perform
    """
    from capsul.study_config import StudyConfig

    """
    Here two utility tools are loaded. The first one enables the creation
    of ordered dictionary and the second ensure that a directory exist.
    Note that the directory will be created if necessary.
    """
    from capsul.utils.sorted_dictionary import SortedDictionary
    from nsap.lib.base import ensure_is_dir

    """
    Load the toy dataset
    --------------------

    We want to perform BET on a diffusion sequence.
    To do so, we use the *get_sample_data* function to load this
    dataset.

    .. seealso::

        For a complete description of the *get_sample_data* function, see the
        :ref:`Toy Datasets documentation <toy_datasets_guide>`
    """
    toy_dataset = get_sample_data("dwi")

    """
    The *toy_dataset* is an Enum structure with some specific
    elements of interest *dwi*, *bvals* that contain the nifti diffusion
    image and the b-values respectively.
    """
    print(toy_dataset.dwi, toy_dataset.bvals)

    """
    Will return:

    .. code-block:: python

        /home/ag239446/git/nsap-src/nsap/data/DTI30s010.nii
        /home/ag239446/git/nsap-src/nsap/data/DTI30s010.bval

    We can see that the image has been found in a local directory

    Processing definition
    ---------------------

    Now we need to define the processing step that will perform BET on
    diffusion sequence.
    """
    bet_pipeline = dBET()

    """
    It is possible to access the ipeline input specification.
    """
    print(bet_pipeline.get_input_spec())

    """
    Will return the input parameters the user can set:

    .. code-block:: python

        INPUT SPECIFICATIONS

        dw_image: ['File']
        bvals: ['File']
        specified_index_of_ref_image: ['Int']
        terminal_output: ['Enum']
        generate_binary_mask: ['Bool']
        use_4d_input: ['Bool']
        generate_mesh: ['Bool']
        generate_skull: ['Bool']
        bet_threshold: ['Float']

    We can now tune the pipeline parameters.
    We first set the input dwi file:
    """
    bet_pipeline.dw_image = toy_dataset.dwi

    """
    And set the b-values file
    """
    bet_pipeline.bvals = toy_dataset.bvals

    """
    Study Configuration
    -------------------

    The pipeline is now set up and ready to be executed.
    For a complete description of a study execution, see the
    :ref:`Study Configuration description <study_configuration_guide>`
    """
    bet_working_dir = os.path.join(working_dir, "diffusion_bet")
    ensure_is_dir(bet_working_dir)
    default_config = SortedDictionary(
        ("output_directory", bet_working_dir),
        ("fsl_config", "/etc/fsl/4.1/fsl.sh"),
        ("use_fsl", True),
        ("use_smart_caching", True),
        ("generate_logging", True)
    )
    study = StudyConfig(default_config)
    study.run(bet_pipeline)

    """
    Results
    -------

    Finally, we print the pipeline outputs
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in bet_pipeline.get_outputs().iteritems():
        print("{0}: {1}".format(trait_name, trait_value))

    """
    output_directory="/volatile/nsap/catalogue/spm_first_level/")
print "Done in {0} seconds".format(datetime.datetime.now() - start_time)


# Create pipeline
start_time = datetime.datetime.now()
print "Start Pipeline Creation", start_time
pipeline = get_process_instance(
    "caps.nsap.functional_statistic.pipeline.spm_first_level_pipeline.xml")
print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)


# Set pipeline input parameters
start_time = datetime.datetime.now()
print "Start Parametrization", start_time
localizer_dataset = get_sample_data("localizer")
pipeline.behavioral_data = [localizer_dataset.onsets]
pipeline.fmri_sessions = [localizer_dataset.preproc_fmri]
pipeline.time_repetition = localizer_dataset.TR
pipeline.realignment_parameters = localizer_dataset.mouvment_parameters
pipeline.condition_name = "Conditions"
pipeline.onset_name = "Onsets"
pipeline.duration_name = "Durations"
pipeline.delimiter = ";"
pipeline.start = 0
pipeline.contrasts = [
    ("Horizontal Checkerboard","T",['damier_H',],[1,]),
    ("Vertical Checkerboard","T",['damier_V',],[1,]),
    ("Right Audio Click","T",['clicDaudio',],[1,]),
    ("Left Audio Click","T",['clicGaudio',],[1,]),
    ("Right Video Click","T",['clicDvideo',],[1,]),
Exemple #4
0
def pilot_preproc():
    """
    FMRI preprocessings
    ===================
    """
    # Pilot imports
    import os
    from caps.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.process import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/catalogue/pclinfmri/fmri_preproc"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    Now we get the pipeline from its definition (xml file)
    """
    pipeline = get_process_instance(
        "clinfmri.preproc.pipeline.fmri_preproc.xml")

    """
    And then define the study configuration (here we activate the smart
    caching module that will be able to remember which process has already been
    processed):
    """
    study_config = StudyConfig(
        modules=["SmartCachingConfig", "MatlabConfig", "SPMConfig",
                 "FSLConfig",
                 "NipypeConfig"],
        use_smart_caching=True,
        fsl_config="/etc/fsl/4.1/fsl.sh",
        use_fsl=True,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=True,
        spm_directory="/i2bm/local/spm8",
        use_spm=True,
        output_directory=working_dir)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")
    template_dataset = get_sample_data("mni_1mm")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * **??**: ??.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <pclinfmri.preproc.pipeline.SliceTiming>` that
    define the different step of the processings:
    """
    pipeline = get_process_instance("pclinfmri.preproc.fmri_preproc.xml")
    print pipeline.get_input_spec()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.fmri_file = toy_dataset.fmri
    pipeline.structural_file = toy_dataset.anat
    pipeline.realign_register_to_mean = True
    pipeline.select_slicer = "none"
    pipeline.select_registration = "template"
    pipeline.template_file = template_dataset.brain
    pipeline.force_repetition_time = toy_dataset.TR
    pipeline.force_slice_orders = [index + 1 for index in range(40)]

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=True, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Exemple #5
0
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################

# System import
import datetime

# CAPS import
from caps.toy_datasets import get_sample_data

# Lib import
from dcmreader import get_sequence_name, get_all_sop_instance_uids

start_time = datetime.datetime.now()

localizer_dataset = get_sample_data("qt1")
#
print "Start Procedure", start_time
print "***************"
print ""
print "Extracting sequence name (first value returned only)..."
seqname = get_sequence_name(localizer_dataset.gre5dcm)
print "sequence name: {0}".format(seqname)
print ""
print(
    "Extracting Referenced SOP Instance UID (all values "
    "returned in a list)...")
uids = get_all_sop_instance_uids(localizer_dataset.gre20dcm)
print "Referenced SOP Instance UID: {0}".format(uids)
print ""
Exemple #6
0
def pilot(working_dir="/volatile/nsap/caps", **kwargs):
    """
    ===============================
    Diffusion Brain Extraction Tool
    ===============================
    .. topic:: Objective

        We propose to extract the brain mask from a diffusion sequence.

    Import
    ------

    First we load the function that enables us to access the toy datasets
    """
    from caps.toy_datasets import get_sample_data
    """
    From capsul we then load the class to configure the study we want to
    perform
    """
    from capsul.study_config import StudyConfig
    """
    Here two utility tools are loaded. The first one enables the creation
    of ordered dictionary and the second ensure that a directory exist.
    Note that the directory will be created if necessary.
    """
    from capsul.utils.sorted_dictionary import SortedDictionary
    from nsap.lib.base import ensure_is_dir
    """
    Load the toy dataset
    --------------------

    We want to perform BET on a diffusion sequence.
    To do so, we use the *get_sample_data* function to load this
    dataset.

    .. seealso::

        For a complete description of the *get_sample_data* function, see the
        :ref:`Toy Datasets documentation <toy_datasets_guide>`
    """
    toy_dataset = get_sample_data("dwi")
    """
    The *toy_dataset* is an Enum structure with some specific
    elements of interest *dwi*, *bvals* that contain the nifti diffusion
    image and the b-values respectively.
    """
    print(toy_dataset.dwi, toy_dataset.bvals)
    """
    Will return:

    .. code-block:: python

        /home/ag239446/git/nsap-src/nsap/data/DTI30s010.nii
        /home/ag239446/git/nsap-src/nsap/data/DTI30s010.bval

    We can see that the image has been found in a local directory

    Processing definition
    ---------------------

    Now we need to define the processing step that will perform BET on
    diffusion sequence.
    """
    bet_pipeline = dBET()
    """
    It is possible to access the ipeline input specification.
    """
    print(bet_pipeline.get_input_spec())
    """
    Will return the input parameters the user can set:

    .. code-block:: python

        INPUT SPECIFICATIONS

        dw_image: ['File']
        bvals: ['File']
        specified_index_of_ref_image: ['Int']
        terminal_output: ['Enum']
        generate_binary_mask: ['Bool']
        use_4d_input: ['Bool']
        generate_mesh: ['Bool']
        generate_skull: ['Bool']
        bet_threshold: ['Float']

    We can now tune the pipeline parameters.
    We first set the input dwi file:
    """
    bet_pipeline.dw_image = toy_dataset.dwi
    """
    And set the b-values file
    """
    bet_pipeline.bvals = toy_dataset.bvals
    """
    Study Configuration
    -------------------

    The pipeline is now set up and ready to be executed.
    For a complete description of a study execution, see the
    :ref:`Study Configuration description <study_configuration_guide>`
    """
    bet_working_dir = os.path.join(working_dir, "diffusion_bet")
    ensure_is_dir(bet_working_dir)
    default_config = SortedDictionary(
        ("output_directory", bet_working_dir),
        ("fsl_config", "/etc/fsl/4.1/fsl.sh"), ("use_fsl", True),
        ("use_smart_caching", True), ("generate_logging", True))
    study = StudyConfig(default_config)
    study.run(bet_pipeline)
    """
    Results
    -------

    Finally, we print the pipeline outputs
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in bet_pipeline.get_outputs().iteritems():
        print("{0}: {1}".format(trait_name, trait_value))
    """
Exemple #7
0
def pilot_newsegment():
    """ 
    New Segment
    ===========
    """
    # Pilot imports
    import os
    from caps.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from pclinfmri.utils.pipeline import SpmNewSegment

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/pclinfmri/spmnewsegment"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    And then define the study configuration (here we activate the smart
    caching module that will be able to remember which process has already been
    processed):
    """
    study_config = StudyConfig(
        modules=["SmartCachingConfig", "MatlabConfig", "SPMConfig",
                 "NipypeConfig"],
        use_smart_caching=True,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=True,
        spm_directory="/i2bm/local/spm8",
        use_spm=True,
        output_directory=working_dir)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * **??**: ??.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <pclinfmri.preproc.pipeline.SliceTiming>` that
    define the different step of the processings:
    """
    pipeline = SpmNewSegment()
    print pipeline.get_input_spec()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.coregistered_struct_file = toy_dataset.mean

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=True, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Exemple #8
0
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################

# System import
import datetime

# CAPS import
from caps.toy_datasets import get_sample_data

# Lib import
from dcmreader import get_sequence_name, get_all_sop_instance_uids

start_time = datetime.datetime.now()

localizer_dataset = get_sample_data("qt1")
#
print "Start Procedure", start_time
print "***************"
print ""
print "Extracting sequence name (first value returned only)..."
seqname = get_sequence_name(localizer_dataset.gre5dcm)
print "sequence name: {0}".format(seqname)
print ""
print ("Extracting Referenced SOP Instance UID (all values "
       "returned in a list)...")
uids = get_all_sop_instance_uids(localizer_dataset.gre20dcm)
print "Referenced SOP Instance UID: {0}".format(uids)
print ""

print "End of pilots"