Example #1
0
    def set_analysis_parameters(self):
        subjectname = "sujet02"
        groupname = "group1"

        test_dir = os.environ.get("BRAINVISA_TESTS_DIR")
        if not test_dir:
            raise RuntimeError("BRAINVISA_TESTS_DIR is not set")
        test_dir = os.path.join(test_dir, "tmp_tests_brainvisa")

        filename = os.path.join(test_dir, "data_unprocessed", subjectname, "anatomy", subjectname + ".ima")

        subject = Subject(subjectname, groupname, filename)
        self.analysis.set_parameters(subject=subject)

        from capsul.process import get_process_instance

        import_step = get_process_instance("morphologist.capsul.import_t1_mri.ImportT1Mri")

        import_step.input = subject.filename
        import_step.output = self.analysis.pipeline.process.t1mri
        import_step.referential = (
            self.analysis.pipeline.process.PrepareSubject_TalairachFromNormalization_source_referential
        )
        pipeline_tools.create_output_directories(import_step)

        self.analysis.clear_results()
Example #2
0
 def build_pipeline(self):
     pipeline = get_process_instance(
         'capsul.pipeline.test.test_pipeline.MyPipeline')
     pipeline.add_pipeline_step('step1', ['constant'])
     pipeline.add_pipeline_step('step2', ['node1'])
     pipeline.add_pipeline_step('step3', ['node2'])
     return pipeline
Example #3
0
    def import_data(self, subject):
        from capsul.process import get_process_instance
        import_step = get_process_instance(
            'morphologist.capsul.import_t1_mri.ImportT1Mri')

        import_step.input = subject.filename
        import_step.output \
            = self.pipeline.process.t1mri
        import_step.referential = self.pipeline.process. \
            PrepareSubject_TalairachFromNormalization_source_referential
        pipeline_tools.create_output_directories(import_step)
        import_step() # run
        return import_step.output
Example #4
0
    def onLoadClicked(self):
        """ Event to load and display a pipeline.
        """
        # Get the pipeline instance from its string description
        item = self.ui.menu_treectrl.currentItem()
        description_list = [str(x) for x in [item.text(1), item.text(0)]
                            if x != ""]
        process_description = ".".join(description_list)
        self.pipeline = get_process_instance(process_description)

        # Create the controller widget associated to the pipeline
        # controller
        pipeline_widget = ScrollControllerWidget(
            self.pipeline, live=True, select_controls="inputs")
        self.ui.dockWidgetParameters.setWidget(pipeline_widget)

        # Add observer to refresh the run button
        controller_widget = pipeline_widget.controller_widget
        for control_name, control in controller_widget._controls.iteritems():

            # Unpack the control item
            trait, control_class, control_instance, control_label = control

            # Add the new callback
            control_class.add_callback(self.onRunStatus, control_instance)

        # Refresh manually the run button status the first time
        self.onRunStatus()

        # Store the pipeline documentation root path
        self.path_to_pipeline_doc[self.pipeline.id] = item.text(2)

        # Store the pipeline instance
        self.pipelines[self.pipeline.name] = (
            self.pipeline, pipeline_widget)

        # Create the widget
        widget = PipelineDevelopperView(self.pipeline)
        self._insert_widget_in_tab(widget)

        # Connect the subpipeline clicked signal to the
        # onLoadSubPipelineClicked slot
        widget.subpipeline_clicked.connect(self.onLoadSubPipelineClicked)
Example #5
0
    def __init__(self, process, iterative_parameters):
        super(ProcessIteration, self).__init__()
        self.process = get_process_instance(process)
        self.regular_parameters = set()
        self.iterative_parameters = set(iterative_parameters)
        
        # Check that all iterative parameters are valid process parameters
        user_traits = self.process.user_traits()
        for parameter in self.iterative_parameters:
            if parameter not in user_traits:
                raise ValueError('Cannot iterate on parameter %s that is not a parameter of process %s' % (parameter, self.process.id))

        # Create iterative process parameters by copying process parameter
        # and changing iterative parameters to list
        for name, trait in user_traits.iteritems():
            if name in iterative_parameters:
                self.add_trait(name, List(trait, output=trait.output, optional=trait.optional))
            else:
                self.regular_parameters.add(name)
                self.add_trait(name, trait)
    def __init__(self, pipeline_path, record_file=None, *args, **kwargs):
        """ Method to initialize the ActivationInspectorApp class.

        Parameters
        ----------
        pipeline_path: str (mandatory)
            the name of the pipeline we want to load.
        record_file: str (optional)
            a file where the pipeline activation steps are stored.
        """
        # Inhetritance
        super(ActivationInspectorApp, self).__init__(*args, **kwargs)

        # Load the pipeline
        self.pipeline = get_process_instance(pipeline_path)

        # Initialize the application
        self.record_file = record_file
        self.window = None
        self.init_window()
Example #7
0
def pilot_bet(enable_display=False):
    """ 
    BET
    ===

    Brain extraction with FSL. 

    Start to import required modules:
    """
    import os
    from mmutils.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.process import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/catalogue/pclinfmri/fsl_bet"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    And then define the study configuration:
    """
    study_config = StudyConfig(
        modules=["MatlabConfig", "SPMConfig", "FSLConfig", "NipypeConfig"],
        use_smart_caching=False,
        fsl_config="/etc/fsl/4.1/fsl.sh",
        use_fsl=True,
        output_directory=working_dir,
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * fmri: the functional volume.
        * anat: the structural volume.
        * TR: the repetition time.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <clinfmri.preproc.FslBet>` that
    define the different step of the processings:
    """
    pipeline = get_process_instance("clinfmri.utils.fsl_bet.xml")
    print pipeline.get_input_spec()

    """
    It is possible to display the pipeline.
    """
    if enable_display:
        import sys
        from PySide import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView

        app = QtGui.QApplication(sys.argv)
        view = PipelineDevelopperView(pipeline)
        view.show()
        app.exec_()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.input_image_file = toy_dataset.anat
    pipeline.generate_binary_mask = True
    pipeline.bet_threshold = 0.5

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=False, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Example #8
0
def pilot_new_segment(enable_display=False):
    """ 
    New Segment
    ===========

    Unifed SPM segmentation: segments, bias corrects and spatially normalises. 

    Start to import required modules:
    """
    import os
    from mmutils.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.process import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/catalogue/pclinfmri/spm_newsegment"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    And then define the study configuration:
    """
    study_config = StudyConfig(
        modules=["MatlabConfig", "SPMConfig", "FSLConfig", "NipypeConfig"],
        use_smart_caching=False,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=True,
        spm_directory="/i2bm/local/spm8",
        use_spm=True,
        output_directory=working_dir,
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")
    template_dataset = get_sample_data("mni_1mm")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * fmri: the functional volume.
        * anat: the structural volume.
        * TR: the repetition time.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <clinfmri.utils.SpmNewSegment>`
    that define the different step of the processings:
    """
    pipeline = get_process_instance("clinfmri.utils.spm_new_segment.xml")
    print pipeline.get_input_spec()

    """
    It is possible to display the pipeline.
    """
    if enable_display:
        import sys
        from PySide import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView

        app = QtGui.QApplication(sys.argv)
        view = PipelineDevelopperView(pipeline)
        view.show()
        app.exec_()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.channel_files = [toy_dataset.mean]
    pipeline.reference_volume = template_dataset.brain

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=False, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
    if 0:
        def write_state():
            state_file_name = '/tmp/state.json'
            json.dump(pipeline.pipeline_state(), open(state_file_name,'w'))
            print 'Wrote', state_file_name

        import sys
        #from PySide import QtGui
        from soma.qt_gui import qt_backend
        qt_backend.set_qt_backend('PyQt4')
        from soma.qt_gui.qt_backend import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView
        #from capsul.qt_gui.widgets import PipelineUserView
        from capsul.process import get_process_instance

        app = QtGui.QApplication(sys.argv)
        pipeline = get_process_instance(MainTestPipeline)
        pipeline.on_trait_change(write_state,'selection_changed')
        view1 = PipelineDevelopperView(pipeline, show_sub_pipelines=True, allow_open_controller=True)
        view1.add_embedded_subpipeline('switch_pipeline', scale=0.7)
        view1.add_embedded_subpipeline('way1_1', scale=0.4)
        view1.add_embedded_subpipeline('way2_1', scale=0.4)
        view1.show()
        #view2 = PipelineUserView(pipeline)
        #view2.show()
        app.exec_()
        del view1
        #del view2

Example #10
0
def morphologist_all(t1file, sid, outdir, study="morphologist", waittime=10,
                     somaworkflow=False,
                     spmexec="/i2bm/local/spm8-standalone/run_spm8.sh",
                     spmdir="/i2bm/local/spm8-standalone"):
    """ Performs all the Morphologist steps.

    Steps:

    1- Ensure image orientation and reorient it if needed (Prepare Subject for
       Anatomical Pipeline).
    2- Computation of a brain mask (Brain Mask Segmentation).
    3- Computation of a mask for each hemisphere (Split Brain Mask).
    4- A grey/white classification of each hemisphere to perform "Voxel Based
       Morphometry" (Grey White Classification) and spherical triangulation of
       cortical hemispheres (Grey White Surface).
    5- Spherical triangulation of the external interface of the cortex of one
       or two hemispheres (Get Spherical Hemi Surface).
    6- Computation of a graph representing the cortical fold topography
       (Cortical Fold Graph).
    7- Automatic identification of the cortical sulci (Automatic Sulci
       Recognition), located in the "sulci" toolbox.

    The execution is performed with soma_workflow that has to be installed in
    the bv_env environment.

    To check the worklow submission, use the 'soma_workflow_gui' command.

    If the input 't1file' has no the expected extension, an Exception will
    be raised.
    If the $outdir/$study/$sid has already been created, an Exception will
    be raised.

    Parameters
    ----------
    t1file: str (mandatory)
        the path to a ".nii.gz" anatomical T1 weighted file.
    sid: str (mandatory)
        a subject identifier.
    outdir: str (mandatory)
        the morphologist output files will be written in $outdir/$study/$sid.
    study: str (mandatory)
        the name of the study.
    waittime: float (optional, default 10)
        a delay (in seconds) used to check the worflow status.
    somaworkflow: bool (optional, default False)
        if True use somaworkflow for the execution.
    spmexec: str (optional)
        the path to the standalone SPM execution file.
    spmdir: str (optional)
        the standalone SPM directory.

    Returns
    -------
    wffile: str
        a file containing the submitted workflow.
    wfid: int
        the submitted workflow identifier.
    wfstatus: str
        the submited worflow status afer 'waittime' seconds.
    """
    # Check roughly the input file extension
    if not t1file.endswith(".nii.gz"):
        raise Exception("'{0}' is not a COMPRESSED NIFTI file.".format(t1file))

    # Create a configuration for the morphologist study
    study_config = StudyConfig(
        modules=StudyConfig.default_modules + ["FomConfig", "BrainVISAConfig"])
    study_dict = {
        "name": "morphologist_fom",
        "input_directory": outdir,
        "output_directory": outdir,
        "input_fom": "morphologist-auto-nonoverlap-1.0",
        "output_fom": "morphologist-auto-nonoverlap-1.0",
        "shared_fom": "shared-brainvisa-1.0",
        "spm_directory": spmdir,
        "use_soma_workflow": True,
        "use_fom": True,
        "spm_standalone": True,
        "use_matlab": False,
        "volumes_format": "NIFTI gz",
        "meshes_format": "GIFTI",
        "use_spm": True,
        "spm_exec": spmexec,
        "study_config.somaworkflow_computing_resource": "localhost",
        "somaworkflow_computing_resources_config": {
            "localhost": {
            }
        }
    }
    study_config.set_study_configuration(study_dict)

    # Create the morphologist pipeline
    pipeline = get_process_instance(
        "morphologist.capsul.morphologist.Morphologist")
    morphologist_pipeline = process_with_fom.ProcessWithFom(
        pipeline, study_config)
    morphologist_pipeline.attributes = dict(
        (trait_name, getattr(morphologist_pipeline, trait_name))
        for trait_name in morphologist_pipeline.user_traits())
    morphologist_pipeline.attributes["center"] = "morphologist"
    morphologist_pipeline.attributes["subject"] = sid
    morphologist_pipeline.create_completion()

    # Create morphologist expected tree
    # ToDo: use ImportT1 from axon
    subjectdir = os.path.join(outdir, study, sid)
    if os.path.isdir(subjectdir):
        raise Exception("Folder '{0}' already created.".format(subjectdir))
    os.makedirs(os.path.join(
        subjectdir, "t1mri", "default_acquisition",
        "default_analysis", "folds", "3.1", "default_session_auto"))
    os.makedirs(os.path.join(
        subjectdir, "t1mri", "default_acquisition",
        "registration"))
    os.makedirs(os.path.join(
        subjectdir, "t1mri", "default_acquisition",
        "segmentation", "mesh"))
    os.makedirs(os.path.join(
        subjectdir, "t1mri", "default_acquisition",
        "tmp"))

    # Copy T1 file in the morphologist expected location
    destfile = os.path.join(subjectdir, "t1mri",
                            "default_acquisition", sid + ".nii.gz")
    shutil.copy(t1file, destfile)

    # Create source_referential morphologist expected file
    source_referential = {"uuid": str(soma.uuid.Uuid())}
    referential_file = os.path.join(
        subjectdir, "t1mri", "default_acquisition", "registration",
        "RawT1-{0}_default_acquisition.referential".format(sid))
    attributes = "attributes = {0}".format(json.dumps(source_referential))
    with open(referential_file, "w") as openfile:
        openfile.write(attributes)

    # Create a worflow from the morphologist pipeline
    workflow = Workflow(name="{0} {1}".format(study, sid),
                        jobs=[])
    workflow.root_group = []

    # Create the workflow
    wf = pipeline_workflow.workflow_from_pipeline(
        morphologist_pipeline.process, study_config=study_config)
    workflow.add_workflow(wf, as_group="{0}_{1}".format(study, sid))
    wffile = os.path.join(subjectdir, "{0}.wf".format(study))
    pickle.dump(workflow, open(wffile, "w"))

    # Execute the workflow with somaworkflow
    if somaworkflow:
        controller = WorkflowController()
        wfid = controller.submit_workflow(
            workflow=workflow, name="{0}_{1}".format(study, sid))

        # Return the worflow status after execution
        while True:
            time.sleep(waittime)
            wfstatus = controller.workflow_status(wfid)
            if wfstatus not in [
                    "worklflow_not_started", "workflow_in_progress"]:
                break

    # Execute the workflow with subprocess
    else:
        # -> construct the ordered list of commands to be executed
        workflow_repr = workflow.to_dict()
        graph = Graph()
        for job in workflow_repr["jobs"]:
            graph.add_node(GraphNode(job, None))
        for link in workflow_repr["dependencies"]:
            graph.add_link(link[0], link[1])
        ordered_nodes = [str(node[0]) for node in graph.topological_sort()]
        commands = []
        jobs = workflow_repr["serialized_jobs"]
        temporaries = workflow_repr["serialized_temporary_paths"]
        barriers = workflow_repr["serialized_barriers"]
        for index in ordered_nodes:
            if index in jobs:
                commands.append(jobs[index]["command"])
            elif index in barriers:
                continue
            else:
                raise Exception("Unexpected node in workflow.")

        # -> Go through all commands
        tmpmap = {}
        for cmd in commands:
            # -> deal with temporary files
            for index, item in enumerate(cmd):
                if not isinstance(item, basestring):
                    if str(item) not in tmpmap:
                        if str(item) in temporaries:
                            struct = temporaries[str(item)]
                            name = cmd[2].split(";")[1].split()[-1]
                            tmppath = os.path.join(
                                subjectdir, "t1mri", "default_acquisition",
                                "tmp", str(item) + name + struct["suffix"])
                            tmpmap[str(item)] = tmppath
                        else:
                            raise MorphologistError(
                                "Can't complete command '{0}'.".format(
                                    cmd))
                    cmd[index] = tmpmap[str(item)]

            # -> execute the command
            worker = MorphologistWrapper(cmd)
            worker()
            if worker.exitcode != 0:
                raise MorphologistRuntimeError(
                    " ".join(worker.cmd), worker.stderr)

        wfstatus = "Done"
        wfid = "subprocess"

    return wffile, wfid, wfstatus
for module_name, module_pipelines in sorted_pipelines.items():

    # this docwriter is juste used to manage short names
    docwriter = PipelineHelpWriter([], short_names=short_names)

    # Where the documentation will be written: a relative path from the
    # makefile
    short_name = docwriter.get_short_name(module_name)
    outdir = os.path.join(base_outdir, short_name,  "schema")
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    # Go through all pipeline
    for module_pipeline in module_pipelines:

        # Get pipeline instance
        pipeline_instance = get_process_instance(module_pipeline)

        # Get output files
        short_pipeline = docwriter.get_short_name(module_pipeline)
        image_name = os.path.join(outdir, short_pipeline + ".png")
        pipeline_tools.save_dot_image(
            pipeline_instance, image_name, nodesep=0.1, include_io=False,
            rankdir='TB')
        logger.info("Pipeline '{0}' representation has been written at "
                    "location '{1}'.".format(module_pipeline,
                                             os.path.abspath(image_name)))

    # Just print a summary
    logger.info("Summary: '{0}' files written for module '{1}'.".format(
        len(module_pipelines), module_name))
Example #12
0
    use_matlab=False,
    use_spm=False,
    spm_exec=args.spmbin,
    spm_standalone=True,
    use_nipype=True,
    output_directory=tmp_capsul,
    number_of_cpus=1,
    generate_logging=True,
    use_scheduler=True,
)

"""
Processing definition: create the <clinfmri.preproc.FmriPreproc> that
define the different step of the processings.
"""
pipeline = get_process_instance("clinfmri.preproc.fmri_preproc.xml")

"""
It is possible to display the pipeline.
"""
if args.display:
    import sys
    from PySide import QtGui
    from capsul.qt_gui.widgets import PipelineDevelopperView

    app = QtGui.QApplication(sys.argv)
    view = PipelineDevelopperView(pipeline)
    view.show()
    app.exec_()

"""
Example #13
0
def pilot_preproc():
    """
    FMRI preprocessings
    ===================
    """
    # Pilot imports
    import os
    from caps.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.process import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/catalogue/pclinfmri/fmri_preproc"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    Now we get the pipeline from its definition (xml file)
    """
    pipeline = get_process_instance(
        "clinfmri.preproc.pipeline.fmri_preproc.xml")

    """
    And then define the study configuration (here we activate the smart
    caching module that will be able to remember which process has already been
    processed):
    """
    study_config = StudyConfig(
        modules=["SmartCachingConfig", "MatlabConfig", "SPMConfig",
                 "FSLConfig",
                 "NipypeConfig"],
        use_smart_caching=True,
        fsl_config="/etc/fsl/4.1/fsl.sh",
        use_fsl=True,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=True,
        spm_directory="/i2bm/local/spm8",
        use_spm=True,
        output_directory=working_dir)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")
    template_dataset = get_sample_data("mni_1mm")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * **??**: ??.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <pclinfmri.preproc.pipeline.SliceTiming>` that
    define the different step of the processings:
    """
    pipeline = get_process_instance("pclinfmri.preproc.fmri_preproc.xml")
    print pipeline.get_input_spec()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.fmri_file = toy_dataset.fmri
    pipeline.structural_file = toy_dataset.anat
    pipeline.realign_register_to_mean = True
    pipeline.select_slicer = "none"
    pipeline.select_registration = "template"
    pipeline.template_file = template_dataset.brain
    pipeline.force_repetition_time = toy_dataset.TR
    pipeline.force_slice_orders = [index + 1 for index in range(40)]

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=True, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Example #14
0
def pilot_smoothing():
    """ 
    Smoothing
    =========
    """
    # Pilot imports
    import os
    from caps.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.process import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/pclinfmri/spmsmoothing"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    And then define the study configuration (here we activate the smart
    caching module that will be able to remember which process has already been
    processed):
    """
    study_config = StudyConfig(
        modules=["SmartCachingConfig", "MatlabConfig", "SPMConfig",
                 "NipypeConfig"],
        use_smart_caching=True,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=True,
        spm_directory="/i2bm/local/spm8",
        use_spm=True,
        output_directory=working_dir)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * **??**: ??.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <pclinfmri.preproc.pipeline.SliceTiming>` that
    define the different step of the processings:
    """
    pipeline = get_process_instance("pclinfmri.utils.spm_smoothing.xml")
    print pipeline.get_input_spec()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.image_file = toy_dataset.fmri

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=True, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Example #15
0
def pilot_preproc_spm_fmri(enable_display=False):
    """
    FMRI preprocessings
    ===================

    Preprocessing with the SPM slice timing and a normalization to a given
    template.

    Start to import required modules:
    """
    import os
    from mmutils.toy_datasets import get_sample_data
    from capsul.study_config import StudyConfig
    from capsul.process import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/catalogue/pclinfmri/fmri_preproc_spm_fmri"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    Then define the study configuration:
    """
    study_config = StudyConfig(
        modules=["MatlabConfig", "SPMConfig", "FSLConfig", "NipypeConfig"],
        use_smart_caching=False,
        fsl_config="/etc/fsl/4.1/fsl.sh",
        use_fsl=True,
        matlab_exec="/neurospin/local/bin/matlab",
        use_matlab=True,
        spm_directory="/i2bm/local/spm8",
        use_spm=True,
        output_directory=working_dir,
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True,)

    """
    Load the toy dataset
    --------------------

    To do so, we use the get_sample_data function to download the toy
    dataset on the local file system (here localizer data):
    """
    toy_dataset = get_sample_data("localizer")
    template_dataset = get_sample_data("mni_1mm")

    """
    The toy_dataset is an Enum structure with some specific elements of
    interest:

        * fmri: the functional volume.
        * anat: the structural volume.
        * TR: the repetition time.

    Processing definition
    ---------------------

    First create the
    :ref:`slice timing pipeline <clinfmri.preproc.FmriPreproc>` that
    define the different step of the processings:
    """
    pipeline = get_process_instance("clinfmri.preproc.fmri_preproc.xml")
    print pipeline.get_input_spec()

    """
    It is possible to display the pipeline.
    """
    if enable_display:
        import sys
        from PySide import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView

        app = QtGui.QApplication(sys.argv)
        view = PipelineDevelopperView(pipeline)
        view.show()
        app.exec_()

    """
    Now we need now to parametrize this pipeline:
    """
    pipeline.fmri_file = toy_dataset.fmri
    pipeline.structural_file = toy_dataset.anat
    pipeline.realign_register_to_mean = True
    pipeline.select_slicer = "spm"
    pipeline.select_normalization = "fmri"
    pipeline.template_file = template_dataset.brain
    pipeline.force_repetition_time = toy_dataset.TR
    pipeline.force_slice_orders = [index + 1 for index in range(40)]

    """
    The pipeline is now ready to be run:
    """
    study_config.run(pipeline, executer_qc_nodes=False, verbose=1)

    """
    Results
    -------

    Finally, we print the pipeline outputs:
    """
    print("\nOUTPUTS\n")
    for trait_name, trait_value in pipeline.get_outputs().items():
        print("{0}: {1}".format(trait_name, trait_value))
Example #16
0
    def generate_api_doc(self, pipeline, schema):
        """ Make autodoc documentation for a pipeline python module

        Parameters
        ----------
        pipeline : string
            python location of pipeline - e.g 'caps.fmri.PIPELINE'
        schema : string
            path to the pipeline representation image

        Returns
        -------
        ad : string
            contents of API doc
        title : string
            the fist line of the docstring
        """
        # Fiest get the pipeline instance from its string description
        pipeline_instance = get_process_instance(pipeline)

        # Get the header, ie. the first line of the docstring
        # Default title is ''
        header = pipeline_instance.__doc__
        title = ""
        if header:
            title = pipeline_instance.__doc__.splitlines()[0]

        # Add header to tell us that this documentation must not be edited
        ad = ".. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n"

        # Generate the page title: name of the pipeline
        ad += ":orphan:\n\n"
        chap_title = pipeline
        ad += (chap_title + "\n" +
               self.rst_section_levels[1] * len(chap_title) + "\n\n")

        # Generate a bookmark (for cross references)
        pipeline_name = pipeline_instance.__class__.__name__
        label = pipeline + ":"
        ad += "\n.. _{0}\n\n".format(label)
        # ad += "\n.. index:: {0}\n\n".format(pipeline_name)

        # Add a subtitle
        ad += (pipeline_name + "\n" +
               self.rst_section_levels[2] * len(pipeline_name) + "\n\n")

        # Set the current module
        currentmodule = ".".join(pipeline_instance.id.split(".")[:-1])
        ad += ".. currentmodule:: {0}\n\n".format(currentmodule)

        # Then add the trait description
        # It will generate two sections: input and output
        ad += pipeline_instance.get_help(returnhelp=True)

        # Add schema if generated
        if schema:
            schama_title = "Pipeline schema"
            ad += ("\n" + schama_title + "\n" +
                   "~" * len(schama_title) + "\n\n")
            ad += ".. image:: {0}\n".format(schema)
            ad += "    :height: 400px\n"
            ad += "    :align: center\n\n"

        return ad, title
Example #17
0
        # Test the cache mechanism
        proxy_process(f=2.5, i=__file__, l=[__file__])
        copied_file = os.path.join(self.workspace_dir,
                                   os.path.basename(__file__))
        self.assertEqual(
            proxy_process.s,
            "{{'i': '{0}', 'l': ['{0}'], 'f': 2.5}}".format(copied_file))

if 0:
    # Configure the environment
    study_config = StudyConfig(modules=["FSLConfig"],
                               fsl_config="/etc/fsl/4.1/fsl.sh")

    # Create a process instance
    ifname = "/home/ag239446/.local/share/nsap/t1_localizer.nii.gz"
    instance = get_process_instance("nipype.interfaces.fsl.Merge")

    # Create a decorated instance
    dec_instance = mem.cache(instance)
    print dec_instance
    print dec_instance.__doc__

    # Set parameters
    dec_instance.in_files = [ifname, ifname]
    dec_instance.dimension = "t"
    dec_instance.output_type = "NIFTI_GZ"
    dec_instance.set_output_directory("/home/ag239446/tmp/")

    # Test the cache mechanism
    result = dec_instance()
    print dec_instance._merged_file
    # this docwriter is juste used to manage short names
    docwriter = PipelineHelpWriter([], short_names=short_names)

    # Where the documentation will be written: a relative path from the
    # makefile
    short_name = docwriter.get_short_name(module_name)
    outdir = os.path.join(base_outdir, short_name, "schema")
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    # Go through all pipeline
    for module_pipeline in module_pipelines:

        # Get pipeline instance
        pipeline_instance = get_process_instance(module_pipeline)

        # Get output files
        short_pipeline = docwriter.get_short_name(module_pipeline)
        image_name = os.path.join(outdir, short_pipeline + ".png")
        pipeline_tools.save_dot_image(pipeline_instance,
                                      image_name,
                                      nodesep=0.1,
                                      include_io=False,
                                      rankdir='TB')
        logger.info("Pipeline '{0}' representation has been written at "
                    "location '{1}'.".format(module_pipeline,
                                             os.path.abspath(image_name)))

    # Just print a summary
    logger.info("Summary: '{0}' files written for module '{1}'.".format(
Example #19
0
    if 0:
        def write_state():
            state_file_name = '/tmp/state.json'
            json.dump(pipeline.pipeline_state(), open(state_file_name,'w'))
            print('Wrote', state_file_name)

        import sys
        #from PySide import QtGui
        from soma.qt_gui import qt_backend
        qt_backend.set_qt_backend('PyQt4')
        from soma.qt_gui.qt_backend import QtGui
        from capsul.qt_gui.widgets import PipelineDevelopperView
        #from capsul.qt_gui.widgets import PipelineUserView
        from capsul.process import get_process_instance

        app = QtGui.QApplication(sys.argv)
        pipeline = get_process_instance(MainTestPipeline)
        pipeline.on_trait_change(write_state,'selection_changed')
        view1 = PipelineDevelopperView(pipeline, show_sub_pipelines=True, allow_open_controller=True)
        view1.add_embedded_subpipeline('switch_pipeline', scale=0.7)
        view1.add_embedded_subpipeline('way1_1', scale=0.4)
        view1.add_embedded_subpipeline('way2_1', scale=0.4)
        view1.show()
        #view2 = PipelineUserView(pipeline)
        #view2.show()
        app.exec_()
        del view1
        #del view2