Exemplo n.º 1
0
    def test_simple_run(self):
        """ Method to test a simple 1 cpu call with the scheduler.
        """
        # Configure the environment
        study_config = StudyConfig(
            modules=[],
            use_smart_caching=True,
            output_directory=self.outdir,
            number_of_cpus=1,
            generate_logging=True,
            use_scheduler=True)

        # Create pipeline
        pipeline = get_process_instance(self.pipeline_name)
        pipeline.date_in_filename = True

        # Set pipeline input parameters
        dicom_dataset = get_sample_data("dicom")
        dcmfolder = os.path.join(self.outdir, "dicom")
        if not os.path.isdir(dcmfolder):
            os.makedirs(dcmfolder)
        shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm"))
        pipeline.source_dir = dcmfolder

        # View pipeline
        if 0:
            from capsul.qt_gui.widgets import PipelineDevelopperView
            from PySide import QtGui
            app = QtGui.QApplication(sys.argv)
            view1 = PipelineDevelopperView(pipeline)
            view1.show()
            app.exec_()

        # Execute the pipeline in the configured study
        study_config.run(pipeline)
Exemplo n.º 2
0
    def setUp(self):
        self.pipeline = DummyPipeline()

        tmpout = tempfile.mkstemp('.txt', prefix='capsul_test_')
        os.close(tmpout[0])
        os.unlink(tmpout[1])

        # use a custom temporary soma-workflow dir to avoid concurrent
        # access problems
        tmpdb = tempfile.mkstemp('', prefix='soma_workflow')
        os.close(tmpdb[0])
        os.unlink(tmpdb[1])
        self.soma_workflow_temp_dir = tmpdb[1]
        os.mkdir(self.soma_workflow_temp_dir)
        swf_conf = '[%s]\nSOMA_WORKFLOW_DIR = %s\n' \
            % (socket.gethostname(), tmpdb[1])
        swconfig.Configuration.search_config_path \
            = staticmethod(lambda : StringIO.StringIO(swf_conf))

        self.output = tmpout[1]
        self.pipeline.input = '/tmp/file_in.nii'
        self.pipeline.output = self.output
        study_config = StudyConfig(modules=['SomaWorkflowConfig'])
        study_config.input_directory = '/tmp'
        study_config.somaworkflow_computing_resource = 'localhost'
        study_config.somaworkflow_computing_resources_config.localhost = {
            'transfer_paths': [],
        }
        self.study_config = study_config
Exemplo n.º 3
0
 def test_study_config_fsl(self):
     if not sys.platform.startswith('win'):
         try:
             study_config = StudyConfig(use_fsl=True)
         except EnvironmentError as e:
             # If FSL cannot be configured automatically, skip the test
             print(
                 'WARNING: Skip FSL test because it cannot be configured automatically:',
                 str(e),
                 file=sys.stderr)
             return
         test_image = '/usr/share/data/fsl-mni152-templates/MNI152_T1_1mm_brain.nii.gz'
         if not osp.exists(test_image):
             fsl_dir = os.environ.get('FSLDIR')
             test_image = None
             if not fsl_dir and study_config.fsl_config is not Undefined:
                 fsl_dir = osp.dirname(
                     osp.dirname(osp.dirname(study_config.fsl_config)))
             if fsl_dir:
                 test_image = glob(
                     osp.join(
                         fsl_dir,
                         'fslpython/envs/fslpython/lib/python*/site-packages/nibabel/tests/data/anatomical.nii'
                     ))
                 if test_image:
                     test_image = test_image[0]
             if not test_image:
                 print(
                     'WARNING: Skip FSL test because test data cannot be found',
                     file=sys.stderr)
                 return
         bet = study_config.get_process_instance(Bet)
         with tempfile.NamedTemporaryFile(suffix='.nii.gz') as tmp:
             bet.run(input_image=test_image, output_image=tmp.name)
             self.assertTrue(os.stat(tmp.name).st_size != 0)
Exemplo n.º 4
0
def pilot_gdti_estimation():
    """
    Generalized diffusion tensor estimation
    =======================================
    """
    # System import
    import os
    import sys
    import datetime
    import PySide.QtGui as QtGui

    # CAPSUL import
    from capsul.qt_gui.widgets import PipelineDevelopperView
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance
    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/clindmri/gdti"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)
    """
    And then define the study configuration (here we activate the smart
    caching module that will be able to remember which process has already been
    processed):
    """
    study_config = StudyConfig(modules=["SmartCachingConfig"],
                               use_smart_caching=True,
                               output_directory=working_dir)

    # Create pipeline
    start_time = datetime.datetime.now()
    print "Start Pipeline Creation", start_time
    pipeline = get_process_instance("clindmri.estimation.gdti.xml")
    print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)

    # View pipeline
    if 0:
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()
        del view1

    # Set pipeline input parameters
    pipeline.dfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.nii.gz"
    pipeline.bvalfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bval"
    pipeline.bvecfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bvec"
    pipeline.order = 2
    pipeline.odf = False
    print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)

    # Execute the pipeline in the configured study
    study_config.run(pipeline, verbose=1)
class TestRunProcess(unittest.TestCase):
    """ Execute a process.
    """
    def test_execution_with_cache(self):
        """ Execute a process with cache.
        """
        # Create a study configuration
        self.output_directory = tempfile.mkdtemp()
        self.study_config = StudyConfig(
            modules=["SmartCachingConfig"],
            use_smart_caching=True,
            output_directory=self.output_directory)

        # Call the test
        self.execution_dummy()

        # Rm temporary folder
        shutil.rmtree(self.output_directory)

    def test_execution_without_cache(self):
        """ Execute a process without cache.
        """
        # Create a study configuration
        self.output_directory = tempfile.mkdtemp()
        self.study_config = StudyConfig(
            modules=["SmartCachingConfig"],
            use_smart_caching=False,
            output_directory=self.output_directory)

        # Call the test
        self.execution_dummy()

        # Rm temporary folder
        shutil.rmtree(self.output_directory)

    def execution_dummy(self):
        """ Test to execute DummyProcess.
        """
        # Create a process instance
        process = DummyProcess()

        # Test the cache mechanism
        for param in [(1., 2.3), (2., 2.), (1., 2.3)]:
            self.study_config.run(process, executer_qc_nodes=False, verbose=1,
                                  f1=param[0], f2=param[1])
            self.assertEqual(process.res, param[0] * param[1])
            self.assertEqual(
                process.output_directory,
                os.path.join(self.output_directory, "{0}-{1}".format(
                    self.study_config.process_counter - 1, process.name)))
 def setUp(self):
     self.pipeline = DummyPipeline()
     self.pipeline.input = '/tmp/file_in.nii'
     self.pipeline.output1 = '/tmp/file_out1.nii'
     self.pipeline.output2 = '/tmp/file_out2.nii'
     self.pipeline.output3 = '/tmp/file_out3.nii'
     study_config = StudyConfig() #modules=StudyConfig.default_modules \
                                #+ ['FomConfig'])
     study_config.input_directory = '/tmp'
     study_config.somaworkflow_computing_resource = 'localhost'
     study_config.somaworkflow_computing_resources_config.localhost = {
         'transfer_paths': [study_config.input_directory],
     }
     self.study_config = study_config
 def setUp(self):
     self.pipeline = DummyPipeline()
     self.pipeline.input = '/tmp/file_in.nii'
     self.pipeline.output1 = '/tmp/file_out1.nii'
     self.pipeline.output2 = '/tmp/file_out2.nii'
     self.pipeline.output3 = '/tmp/file_out3.nii'
     study_config = StudyConfig()  #modules=StudyConfig.default_modules \
     #+ ['FomConfig'])
     study_config.input_directory = '/tmp'
     study_config.somaworkflow_computing_resource = 'localhost'
     study_config.somaworkflow_computing_resources_config.localhost = {
         'transfer_paths': [study_config.input_directory],
     }
     self.study_config = study_config
Exemplo n.º 8
0
    def test_execution_without_cache(self):
        """ Execute a process without cache.
        """
        # Create a study configuration
        self.output_directory = tempfile.mkdtemp()
        self.study_config = StudyConfig(modules=["SmartCachingConfig"],
                                        use_smart_caching=False,
                                        output_directory=self.output_directory)

        # Call the test
        self.execution_dummy()

        # Rm temporary folder
        shutil.rmtree(self.output_directory)
Exemplo n.º 9
0
class TestRunProcess(unittest.TestCase):
    """ Execute a process.
    """
    def test_execution_with_cache(self):
        """ Execute a process with cache.
        """
        # Create a study configuration
        self.output_directory = tempfile.mkdtemp()
        self.study_config = StudyConfig(modules=["SmartCachingConfig"],
                                        use_smart_caching=True,
                                        output_directory=self.output_directory)

        # Call the test
        self.execution_dummy()

        # Rm temporary folder
        shutil.rmtree(self.output_directory)

    def test_execution_without_cache(self):
        """ Execute a process without cache.
        """
        # Create a study configuration
        self.output_directory = tempfile.mkdtemp()
        self.study_config = StudyConfig(modules=["SmartCachingConfig"],
                                        use_smart_caching=False,
                                        output_directory=self.output_directory)

        # Call the test
        self.execution_dummy()

        # Rm temporary folder
        shutil.rmtree(self.output_directory)

    def execution_dummy(self):
        """ Test to execute DummyProcess.
        """
        # Create a process instance
        process = get_process_instance(DummyProcess,
                                       output_directory=self.output_directory)

        # Test the cache mechanism
        for param in [(1., 2.3), (2., 2.), (1., 2.3)]:
            self.study_config.run(process,
                                  executer_qc_nodes=False,
                                  verbose=1,
                                  f1=param[0],
                                  f2=param[1])
            self.assertEqual(process.res, param[0] * param[1])
            self.assertEqual(process.output_directory, self.output_directory)
Exemplo n.º 10
0
 def test_study_config_fs(self):
     freesurfer_config = "/i2bm/local/freesurfer/SetUpFreeSurfer.sh"
     if not os.path.exists(freesurfer_config) \
             or not sys.platform.startswith('linux'):
         # skip this test if FS is not available, or not running
         # on linux (other systems may see this directory but cannot use it)
         return
     study_config = StudyConfig(modules=['FreeSurferConfig'],
                                freesurfer_config = freesurfer_config)
     study_config.use_fs = True
     for varname in ["FREESURFER_HOME", "FSF_OUTPUT_FORMAT", "MNI_DIR",
                     "FSFAST_HOME", "FMRI_ANALYSIS_DIR", "FUNCTIONALS_DIR",
                     "MINC_BIN_DIR", "MNI_DATAPATH"]:
         self.assertTrue(os.environ.get(varname) is not None,
                         msg='%s environment variable not set' % varname)
Exemplo n.º 11
0
 def test_study_config_fs(self):
     freesurfer_config = "/i2bm/local/freesurfer/SetUpFreeSurfer.sh"
     if not os.path.exists(freesurfer_config) \
             or not sys.platform.startswith('linux'):
         # skip this test if FS is not available, or not running
         # on linux (other systems may see this directory but cannot use it)
         return
     study_config = StudyConfig(modules=['FreeSurferConfig'],
                                freesurfer_config = freesurfer_config)
     study_config.use_fs = True
     for varname in ["FREESURFER_HOME", "FSF_OUTPUT_FORMAT", "MNI_DIR",
                     "FSFAST_HOME", "FMRI_ANALYSIS_DIR", "FUNCTIONALS_DIR",
                     "MINC_BIN_DIR", "MNI_DATAPATH"]:
         self.assertTrue(os.environ.get(varname) is not None,
                         msg='%s environment variable not set' % varname)
Exemplo n.º 12
0
 def run_study_config_instanciation(self, tests, test_description,
                                    user_config_directory):
     for arguments, results in tests:
         args, kwargs = arguments
         sargs = ', '.join(repr(i) for i in args)
         if kwargs:
             sargs += ', '.join('%s=%s' % (repr(i), repr(j))
                                for i, j in six.iteritems(kwargs))
         sc = StudyConfig(*args, **kwargs)
         (expected_config, expected_modules, global_config_file,
          study_config_file) = results
         if global_config_file:
             global_config_file = os.path.join(user_config_directory,
                                               global_config_file)
         if study_config_file:
             study_config_file = os.path.join(user_config_directory,
                                              study_config_file)
         config = sc.get_configuration_dict()
         modules = sorted(sc.modules.keys())
         try:
             self.assertEqual(set(config), set(expected_config))
             for name, value in six.iteritems(expected_config):
                 self.assertEqual(
                     config[name], value,
                     'StudyConfig(%s) %s attribute %s should be %s but is '
                     '%s' % (sargs, test_description, name, repr(value),
                             repr(getattr(sc, name))))
             self.assertEqual(
                 modules, expected_modules,
                 'StudyConfig(%s) %s modules are %s but expected value is '
                 '%s' % (sargs, test_description, repr(modules),
                         repr(expected_modules)))
             self.assertEqual(
                 sc.global_config_file, global_config_file,
                 'StudyConfig(%s) %s global_config_file should be %s but '
                 'is %s' %
                 (sargs, test_description, repr(global_config_file),
                  repr(sc.global_config_file)))
             self.assertEqual(
                 sc.study_config_file, study_config_file,
                 'StudyConfig(%s) %s study_config_file should be %s but is '
                 '%s' % (sargs, test_description, repr(study_config_file),
                         repr(sc.study_config_file)))
         except Exception as e:
             raise EnvironmentError(
                 'When testing StudyConfig(*{0}, **{1}), got the following error: {2}'
                 .format(args, kwargs, e))
Exemplo n.º 13
0
    def setUp(self):
        self.pipeline = DummyPipeline()

        tmpout = tempfile.mkstemp('.txt', prefix='capsul_test_')
        os.close(tmpout[0])
        os.unlink(tmpout[1])

        self.output = tmpout[1]
        self.pipeline.input = '/tmp/file_in.nii'
        self.pipeline.output = self.output
        study_config = StudyConfig(modules=['SomaWorkflowConfig'])
        study_config.input_directory = '/tmp'
        study_config.somaworkflow_computing_resource = 'localhost'
        study_config.somaworkflow_computing_resources_config.localhost = {
            'transfer_paths': [],
        }
        self.study_config = study_config
Exemplo n.º 14
0
    def setUp(self):
        self.pipeline = DummyPipeline()

        tmpout = tempfile.mkstemp('.txt', prefix='capsul_test_')
        os.close(tmpout[0])
        os.unlink(tmpout[1])

        self.output = tmpout[1]
        self.pipeline.input = '/tmp/file_in.nii'
        self.pipeline.output = self.output
        study_config = StudyConfig(modules=['SomaWorkflowConfig'])
        study_config.input_directory = '/tmp'
        study_config.somaworkflow_computing_resource = 'localhost'
        study_config.somaworkflow_computing_resources_config.localhost = {
            'transfer_paths': [],
        }
        self.study_config = study_config
Exemplo n.º 15
0
 def setUp(self):
     study_config = StudyConfig() #modules=StudyConfig.default_modules \
                                #+ ['FomConfig'])
     self.pipeline = DummyPipeline()
     self.pipeline.set_study_config(study_config)
     self.tmpdir = tempfile.mkdtemp()
     self.pipeline.input = osp.join(self.tmpdir, 'file_in.nii')
     self.pipeline.output1 = osp.join(self.tmpdir, '/tmp/file_out1.nii')
     self.pipeline.output2 = osp.join(self.tmpdir, '/tmp/file_out2.nii')
     self.pipeline.output3 = osp.join(self.tmpdir, '/tmp/file_out3.nii')
     study_config.input_directory = self.tmpdir
     study_config.somaworkflow_computing_resource = 'localhost'
     study_config.somaworkflow_computing_resources_config.localhost = {
         'transfer_paths': [study_config.input_directory],
     }
     self.study_config = study_config
     engine = self.study_config.engine
     engine.load_module('spm')
     #with engine.settings as session:
         #ids = [c.config_id for c in session.configs('spm', 'global')]
         #for id in ids:
             #session.remove_config('spm', 'global', {'config_id': id})
         #session.new_config('spm', 'global',
                            #{'version': '12', 'standalone': True})
     study_config.spm_standalone = True
     study_config.spm_version = '12'
     study_config.somaworkflow_keep_succeeded_workflows = False
     self.exec_ids = []
Exemplo n.º 16
0
    def setUp(self):
        self.pipeline = DummyPipeline()

        tmpdir = tempfile.mkdtemp('capsul_output_test')
        tmpout = os.path.join(tmpdir, 'capsul_test_node3_out.txt')

        self.tmpdir = tmpdir
        self.pipeline.input = os.path.join(tmpdir, 'file_in.nii')
        with open(self.pipeline.input, 'w') as f:
            print('Initial file content.', file=f)
        self.pipeline.output = tmpout
        study_config = StudyConfig(modules=['SomaWorkflowConfig'])
        study_config.input_directory = tmpdir
        study_config.somaworkflow_computing_resource = 'localhost'
        study_config.somaworkflow_computing_resources_config.localhost = {
            'transfer_paths': [],
        }
        self.study_config = study_config
Exemplo n.º 17
0
    def __init__(self, 
                 database_location,
                 database,
                 config=None):
        '''
        CapsulEngine constructor should not be called directly.
        Use capsul_engine() factory function instead.
        '''
        super(CapsulEngine, self).__init__()
        
        self._database_location = database_location
        self._database = database

        db_config = database.json_value('config')

        self._loaded_modules = {}
        self.modules = database.json_value('modules')
        if self.modules is None:
            self.modules = self.default_modules
        self.load_modules()
        
        execution_context = from_json(database.json_value('execution_context'))
        if execution_context is None:
            execution_context = ExecutionContext()
        self._execution_context = execution_context
            
        self._processing_engine = from_json(database.json_value('processing_engine'))        
        self._metadata_engine = from_json(database.json_value('metadata_engine'))
        
        for cfg in (db_config, config):
            if cfg:
                for n, v in cfg.items():
                    if isinstance(v, dict):
                        o = getattr(self, n)
                        if isinstance(o, Controller):
                            o.import_from_dict(v)
                            continue
                    setattr(self, n, v)

        self.init_modules()

        self.study_config = StudyConfig(engine=self)
    def run_study_config_instanciation(self, tests, test_description,
                                       user_config_directory):
        for arguments, results in tests:
            args, kwargs = arguments
            sargs = ', '.join(repr(i) for i in args)
            if kwargs:
                sargs += ', '.join('%s=%s' % (repr(i),repr(j)) for i,j
                                   in six.iteritems(kwargs))
            sc = StudyConfig(*args, **kwargs)
            (expected_config, expected_modules, global_config_file, 
            study_config_file) = results
            if global_config_file:
                global_config_file = os.path.join(user_config_directory, 
                                                global_config_file)
            if study_config_file:
                study_config_file = os.path.join(user_config_directory, 
                                                study_config_file)
            config = sc.get_configuration_dict()
            modules = sorted(sc.modules.keys())

            self.assertEqual(set(config), set(expected_config))
            for name, value in six.iteritems(expected_config):
                self.assertEqual(config[name], value,
                    'StudyConfig(%s) %s attribute %s should be %s but is '
                    '%s' % (sargs, test_description, name, repr(value),
                            repr(getattr(sc, name))))
            self.assertEqual(modules, expected_modules,
                'StudyConfig(%s) %s modules are %s but expected value is '
                '%s' % (sargs, test_description, repr(modules), 
                        repr(expected_modules)))
            self.assertEqual(sc.global_config_file, global_config_file,
                'StudyConfig(%s) %s global_config_file should be %s but '
                'is %s' % (sargs,test_description, 
                            repr(global_config_file), 
                            repr(sc.global_config_file)))
            self.assertEqual(sc.study_config_file, study_config_file,
                'StudyConfig(%s) %s study_config_file should be %s but is '
                '%s' % (sargs, test_description, repr(study_config_file), 
                        repr(sc.study_config_file)))
Exemplo n.º 19
0
    def __init__(self, database_location, database, require):
        '''
        CapsulEngine.__init__(self, database_location, database, config=None)

        The CapsulEngine constructor should not be called directly.
        Use :func:`capsul_engine` factory function instead.
        '''
        super(CapsulEngine, self).__init__()

        self._settings = None

        self._database_location = database_location
        self._database = database

        self._loaded_modules = set()
        self.load_modules(require)

        from capsul.study_config.study_config import StudyConfig
        self.study_config = StudyConfig(engine=self)

        self._metadata_engine = from_json(
            database.json_value('metadata_engine'))

        self._connected_resource = ''
Exemplo n.º 20
0
 def test_study_config_fsl(self):
     if not sys.platform.startswith('win'):
         fsl_h = "/etc/fsl/4.1/fsl.sh"
         
         if os.path.exists(fsl_h):
             study_config = StudyConfig(modules=['FSLConfig'],
                 fsl_config = fsl_h)
             if not study_config.use_fsl:
                 return # skip this test if FSL is not available
             for varname in ["FSLDIR", "FSLOUTPUTTYPE", "FSLTCLSH", 
                             "FSLWISH", "FSLREMOTECALL", "FSLLOCKDIR", 
                             "FSLMACHINELIST", "FSLBROWSER"]:
                 self.assertTrue(os.environ.get(varname) is not None, 
                                 msg='%s environment variable not set' 
                                     % varname)
    def test_execution_without_cache(self):
        """ Execute a process without cache.
        """
        # Create a study configuration
        self.output_directory = tempfile.mkdtemp()
        self.study_config = StudyConfig(
            modules=["SmartCachingConfig"],
            use_smart_caching=False,
            output_directory=self.output_directory)

        # Call the test
        self.execution_dummy()

        # Rm temporary folder
        shutil.rmtree(self.output_directory)
Exemplo n.º 22
0
    def test_study_config_fom(self):
        initial_config = {
            "input_directory": "/blop/basetests",
            "output_directory": "/blop/basetests",
            "input_fom": "",
            "output_fom": "",
            "shared_fom": "",
            "spm_directory": "/i2bm/local/spm8-standalone",
            "use_soma_workflow": False,
            "use_fom": True,
        }

        #soma_app = Application('soma.fom', '1.0')
        #soma_app.plugin_modules.append('soma.fom')
        #soma_app.initialize()
        study_config = StudyConfig(init_config=initial_config,
                                   modules=StudyConfig.default_modules +
                                   ['BrainVISAConfig', 'FomConfig'])
        self.assertTrue(hasattr(study_config.modules_data, 'foms'))
        self.assertTrue(hasattr(study_config.modules_data, 'fom_atp'))
        self.assertTrue(hasattr(study_config.modules_data, 'fom_pta'))
Exemplo n.º 23
0
from capsul.study_config.study_config import StudyConfig

# CAPS import
from caps.nsap.functional_statistic.pipeline import SpmFirstLevelPipeline
from capsul.process.loader import get_process_instance
from caps.toy_datasets import get_sample_data


# Configure the environment
start_time = datetime.datetime.now()
print "Start Configuration", start_time
study_config = StudyConfig(
    modules=["MatlabConfig", "SPMConfig", "NipypeConfig", "FSLConfig",
             "FreeSurferConfig", "SmartCachingConfig"],
    matlab_exec="/neurospin/local/bin/matlab",
    spm_directory="/i2bm/local/spm8-6313",
    use_matlab=True,
    use_spm=True,
    use_nipype=True,
    use_smart_caching=True,
    output_directory="/volatile/nsap/catalogue/spm_first_level/")
print "Done in {0} seconds".format(datetime.datetime.now() - start_time)


# Create pipeline
start_time = datetime.datetime.now()
print "Start Pipeline Creation", start_time
pipeline = get_process_instance(
    "caps.nsap.functional_statistic.pipeline.spm_first_level_pipeline.xml")
print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)

Exemplo n.º 24
0
class CapsulEngine(Controller):
    '''
    A CapsulEngine is the mandatory entry point of all software using Capsul. It contains objects to store configuration and metadata, define execution environment (possibly remote) and perform pipelines execution.
    
    A CapsulEngine must be created using capsul.engine.capsul_engine function. For instance :
    
    from capsul.engine import capsul_engine
    ce = capsul_engine()
    
    By default, CapsulEngine only store necessary configuration. But it may be necessary to modify Python environment globally to apply this configuration. For instance, Nipype must be configured globally. If SPM is configured in CapsulEngine, it is necessary to explicitely activate the configuration in order to modify the global configuration of Nipype for SPM. This activation is done by explicitely activating the execution context of the capsul engine with the following code :
    
    from capsul.engine import capsul_engine
    ce = capsul_engine()
    # Nipype is not configured here
    with ce.execution_context():
        # Nipype is configured here
    # Nipype may not be configured here
    '''
    
    default_modules = ['capsul.engine.module.spm',
                       'capsul.engine.module.fsl']
        
    def __init__(self, 
                 database_location,
                 database,
                 config=None):
        '''
        CapsulEngine constructor should not be called directly.
        Use capsul_engine() factory function instead.
        '''
        super(CapsulEngine, self).__init__()
        
        self._database_location = database_location
        self._database = database

        db_config = database.json_value('config')

        self._loaded_modules = {}
        self.modules = database.json_value('modules')
        if self.modules is None:
            self.modules = self.default_modules
        self.load_modules()
        
        execution_context = from_json(database.json_value('execution_context'))
        if execution_context is None:
            execution_context = ExecutionContext()
        self._execution_context = execution_context
            
        self._processing_engine = from_json(database.json_value('processing_engine'))        
        self._metadata_engine = from_json(database.json_value('metadata_engine'))
        
        for cfg in (db_config, config):
            if cfg:
                for n, v in cfg.items():
                    if isinstance(v, dict):
                        o = getattr(self, n)
                        if isinstance(o, Controller):
                            o.import_from_dict(v)
                            continue
                    setattr(self, n, v)

        self.init_modules()

        self.study_config = StudyConfig(engine=self)

    @property
    def database(self):
        return self._database

    @property
    def database_location(self):
        return self._database_location
    
    @property
    def execution_context(self):
        return self._execution_context

    @execution_context.setter
    def execution_context(self, execution_context):
        self._execution_context = execution_context
    
    @property
    def processing_engine(self):
        return self._processing_engine
    
    
    @property
    def metadata_engine(self):
        return self._metadata_engine
    
    @metadata_engine.setter
    def metadata_engine(self, metadata_engine):
        self._metadata_engine = metadata_engine
        self.database.set_json_value('metadata_engine', 
                                     to_json(self._metadata_engine))
    
    def load_modules(self):
        '''
        Call self.load_module for each required module. The list of modules
        to load is located in self.modules (if it is None,
        self.default_modules is used).
        '''
        if self.modules is None:
            modules = self.default_modules
        else:
            modules = self.modules
        
        for module in modules:
            self.load_module(module)
            
    def load_module(self, module):
        '''
        Load a module if it has not already been loaded (is this case,
        nothing is done)
        
        A module is a fully qualified name of a Python module (as accepted
        by Python import statement). Such a module must define the two
        following functions (and may define two others, see below):
        
        def load_module(capsul_engine, module_name):        
        def init_module(capul_engine, module_name, loaded_module):

        load_module of each module is called once before reading and applyin
        the configuration. It can be used to add traits to the CapsulEngine
        in order to define the configuration options that are used by the
        module. Values of these traits are automatically stored in
        configuration in database when self.save() is used, and they are
        retrieved from database before initializing modules.
        
        init_module of each module is called once after the reading of
        configuration and the setting of capsul engine attributes defined in
        traits.
        
        A module may define the following functions:
        
        def enter_execution_context(execution_context)
        def exit_execution_context(execution_context)
        
        enter_execution_context (resp. exit_execution_context) is called each
        time the capsul engine's exection context is activated (resp.
        deactivated). 
        '''
        if module not in self._loaded_modules:
            __import__(module)
            python_module = sys.modules.get(module)
            if python_module is None:
                raise ValueError('Cannot find %s in Python modules' % module)
            loader = getattr(python_module, 'load_module', None)
            if loader is None:
                raise ValueError('No function load_module() defined in %s' % module)
            self._loaded_modules[module] = loader(self, module)
            return True
        return False
    
    def init_modules(self):
        '''
        Call self.init_module for each required module. The list of modules
        to initialize is located in self.modules (if it is None,
        self.default_modules is used).
        '''
        if self.modules is None:
            modules = self.default_modules
        else:
            modules = self.modules
        for module in modules:
            self.init_module(module)
    
    def init_module(self, module):
        '''
        Initialize a module by calling its init_module function.
        '''
        python_module = sys.modules.get(module)
        if python_module is None:
            raise ValueError('Cannot find %s in Python modules' % module)
        initializer = getattr(python_module, 'init_module', None)
        if initializer is None:
            raise ValueError('No function init_module() defined in %s' % module)
        initializer(self, module, self._loaded_modules[module])
    
    def save(self):
        '''
        Save the full status of the CapsulEngine in the database.
        The folowing items are set in the database:
        
          'execution_context': a JSON serialization of self.execution_context
          'processing_engine': a JSON serialization of self.processing_engine
          'metadata_engine': a JSON serialization of self.metadata_engine
          'config': a dictionary containing configuration. This dictionary is
              obtained using traits defined on capsul engine (ignoring values
              that are undefined).
        '''
        self.database.set_json_value('execution_context', 
                                     to_json(self._execution_context))
        if self._processing_engine:
            self.database.set_json_value('processing_engine', 
                                        to_json(self._processing_engine))
        if self._metadata_engine:
            self.database.set_json_value('metadata_engine', 
                                        to_json(self._metadata_engine))
        config = {}
        for n in self.user_traits().keys():
            v = getattr(self, n)
            if v is Undefined:
                continue
            if isinstance(v, Controller):
                v = v.export_to_dict(exclude_undefined=True)
                if not v:
                    continue
            config[n] = v
        self.database.set_json_value('config', config)
        self.database.commit()
    
    
    #
    # Method imported from self.database
    #
    def set_named_directory(self, name, path):
        return self.database.set_named_directory(name, path)
    
    def named_directory(self, name):
        return self.database.named_directory(name)
    
    def named_directories(self):
        return self.database.set_named_directories()
    
    
    def set_json_value(self, name, json_value):
        return self.database.set_json_value(name, json_value)

    def json_value(self, name):
        return self.database.json_value(name)
        
    
    def set_path_metadata(self, path, metadata, named_directory=None):
        return self.database.set_path_metadata(name, path, metadata, named_directory)
    
    def path_metadata(self, path, named_directory=None):
        return self.database.set_path_metadata(name, path, named_directory)


    #
    # Processes and pipelines related methods
    #
    def get_process_instance(self, process_or_id, **kwargs):
        '''
        The only official way to get a process instance is to use this method.
        For now, it simply calls self.study_config.get_process_instance
        but it will change in the future.
        '''
        instance = self.study_config.get_process_instance(process_or_id,
                                                          **kwargs)
        return instance

    def start(self, process, history=True):
        '''
        Asynchronously start the exection of a process in the environment
        defined by self.processing_engine. Returns a string that is an uuid
        of the process execution and can be used to get the status of the 
        execution or wait for its termination.
        
        if history is True, an entry of the process execution is stored in
        the database. The content of this entry is to be defined but it will
        contain the process parameters (to restart the process) and will be 
        updated on process termination (for instance to store execution time
        if possible).
        '''
        raise NotImplementedError()

    def executions(self):
        raise NotImplementedError()

    def interrupt(self, execution_id):
        '''
        Try to stop the execution of a process. Does not wait for the process
        to be terminated.
        '''
        raise NotImplementedError()
    
    def wait(self, execution_id):
        '''
        Wait for the end of a process execution (either normal termination,
        interruption or error).
        '''
        raise NotImplementedError()
    
    def status(self, execution_id):
        '''
        Return information about a process execution. The content of this
        information is still to be defined.
        '''
        raise NotImplementedError()

    def detailed_information(self, execution_id):
        raise NotImplementedError()
    
    def call(self, process, history=True):
        eid = self.start(process, history)
        return self.wait(eid)
    
    def check_call(self, process, history=True):
        eid = self.start(process, history)
        status = self.wait(eid)
        self.raise_for_status(status, eid)

    def raise_for_status(self, status, execution_id=None):
        raise NotImplementedError()
Exemplo n.º 25
0
def pilot_fsl_preproc():
    """
    FSL preprocessings
    ==================
    """
    # System import
    import os
    import sys
    import datetime
    import PySide.QtGui as QtGui

    # CAPSUL import
    from capsul.qt_gui.widgets import PipelineDevelopperView
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/clindmri/fslpreproc"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    And then define the study configuration (here we activate the smart
    caching module that will be able to remember which process has already been
    processed):
    """
    study_config = StudyConfig(
        modules=["SmartCachingConfig", "FSLConfig", "MatlabConfig",
                 "SPMConfig", "NipypeConfig"],
        use_smart_caching=True,
        fsl_config="/etc/fsl/4.1/fsl.sh",
        use_fsl=True,        
        output_directory=working_dir)

    # Create pipeline
    start_time = datetime.datetime.now()
    print "Start Pipeline Creation", start_time
    pipeline = get_process_instance("clindmri.preproc.fsl_preproc.xml")
    print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)

    # View pipeline
    if 0:
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()
        del view1

    # Set pipeline input parameters
    pipeline.dfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.nii.gz"
    pipeline.bvalfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bval"
    pipeline.bvecfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bvec"
    print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)

    #print pipeline.nodes["eddy"].process._nipype_interface.inputs
    print pipeline.nodes["eddy"].process._nipype_interface.cmdline

    # Execute the pipeline in the configured study
    study_config.run(pipeline, verbose=1)
Exemplo n.º 26
0
def pilot_qa_fmri():
    """
    Imports
    -------

    This code needs 'capsul' and 'mmutils' package in order to instanciate and
    execute the pipeline and to get a toy dataset.
    These packages are available in the 'neurospin' source list or in pypi.
    """
    # Capsul import
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance

    # Mmutils import
    from mmutils.toy_datasets import get_sample_data
    """
    Parameters
    ----------

    The 'pipeline_name' parameter contains the location of the pipeline XML
    description that will perform the DICOMs conversion, and the 'outdir' the
    location of the pipeline's results: in this case a temporary directory.
    """

    pipeline_name = "mmqa.fmri.fmri_quality_assurance_bbox.xml"
    outdir = tempfile.mkdtemp()
    """
    Capsul configuration
    --------------------

    A 'StudyConfig' has to be instantiated in order to execute the pipeline
    properly. It enables us to define the results directory through the
    'output_directory' attribute, the number of CPUs to be used through the
    'number_of_cpus' attributes, and to specify that we want a log of the
    processing step through the 'generate_logging'. The 'use_scheduler'
    must be set to True if more than 1 CPU is used.
    """
    study_config = StudyConfig(number_of_cpus=1,
                               generate_logging=True,
                               use_scheduler=True,
                               output_directory=outdir)
    """
    Get the toy dataset
    -------------------

    The toy dataset is composed of a functional image that is downloaded
    if it is necessary throught the 'get_sample_data' function and exported
    locally.
    """

    localizer_dataset = get_sample_data("localizer_extra")
    """
    Pipeline definition
    -------------------

    The pipeline XML description is first imported throught the
    'get_process_instance' method, and the resulting pipeline instance is
    parametrized: in this example we decided to set the date in the converted
    file name and we set two DICOM directories to be converted in Nifti
    format.
    """

    pipeline = get_process_instance(pipeline_name)
    pipeline.image_file = localizer_dataset.fmri
    pipeline.repetition_time = 2.0
    pipeline.exclude_volume = []
    pipeline.roi_size = 21
    pipeline.score_file = os.path.join(outdir, "scores.json")
    """
    Pipeline representation
    -----------------------

    By executing this block of code, a pipeline representation can be
    displayed. This representation is composed of boxes connected to each
    other.
    """
    if 0:
        from capsul.qt_gui.widgets import PipelineDevelopperView
        from PySide import QtGui
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()
    """
    Pipeline execution
    ------------------

    Finally the pipeline is eecuted in the defined 'study_config'.
    """
    study_config.run(pipeline)
    """
    Access the result
    -----------------

    Display the computed scores
    """

    scores_file = pipeline.scores_file

    with open(scores_file, "r") as _file:
        scores = json.load(_file)

    for key, value in scores.iteritems():
        print "{0} = {1}".format(key, value)
Exemplo n.º 27
0
def morphologist_all(t1file, sid, outdir, study="morphologist", waittime=10,
                     somaworkflow=False,
                     spmexec="/i2bm/local/spm8-standalone/run_spm8.sh",
                     spmdir="/i2bm/local/spm8-standalone"):
    """ Performs all the Morphologist steps.

    Steps:

    1- Ensure image orientation and reorient it if needed (Prepare Subject for
       Anatomical Pipeline).
    2- Computation of a brain mask (Brain Mask Segmentation).
    3- Computation of a mask for each hemisphere (Split Brain Mask).
    4- A grey/white classification of each hemisphere to perform "Voxel Based
       Morphometry" (Grey White Classification) and spherical triangulation of
       cortical hemispheres (Grey White Surface).
    5- Spherical triangulation of the external interface of the cortex of one
       or two hemispheres (Get Spherical Hemi Surface).
    6- Computation of a graph representing the cortical fold topography
       (Cortical Fold Graph).
    7- Automatic identification of the cortical sulci (Automatic Sulci
       Recognition), located in the "sulci" toolbox.

    The execution is performed with soma_workflow that has to be installed in
    the bv_env environment.

    To check the worklow submission, use the 'soma_workflow_gui' command.

    If the input 't1file' has no the expected extension, an Exception will
    be raised.
    If the $outdir/$study/$sid has already been created, an Exception will
    be raised.

    Parameters
    ----------
    t1file: str (mandatory)
        the path to a ".nii.gz" anatomical T1 weighted file.
    sid: str (mandatory)
        a subject identifier.
    outdir: str (mandatory)
        the morphologist output files will be written in $outdir/$study/$sid.
    study: str (mandatory)
        the name of the study.
    waittime: float (optional, default 10)
        a delay (in seconds) used to check the worflow status.
    somaworkflow: bool (optional, default False)
        if True use somaworkflow for the execution.
    spmexec: str (optional)
        the path to the standalone SPM execution file.
    spmdir: str (optional)
        the standalone SPM directory.

    Returns
    -------
    wffile: str
        a file containing the submitted workflow.
    wfid: int
        the submitted workflow identifier.
    wfstatus: str
        the submited worflow status afer 'waittime' seconds.
    """
    # Check roughly the input file extension
    if not t1file.endswith(".nii.gz"):
        raise Exception("'{0}' is not a COMPRESSED NIFTI file.".format(t1file))

    # Create a configuration for the morphologist study
    study_config = StudyConfig(
        modules=StudyConfig.default_modules + ["FomConfig", "BrainVISAConfig"])
    study_dict = {
        "name": "morphologist_fom",
        "input_directory": outdir,
        "output_directory": outdir,
        "input_fom": "morphologist-auto-nonoverlap-1.0",
        "output_fom": "morphologist-auto-nonoverlap-1.0",
        "shared_fom": "shared-brainvisa-1.0",
        "spm_directory": spmdir,
        "use_soma_workflow": True,
        "use_fom": True,
        "spm_standalone": True,
        "use_matlab": False,
        "volumes_format": "NIFTI gz",
        "meshes_format": "GIFTI",
        "use_spm": True,
        "spm_exec": spmexec,
        "study_config.somaworkflow_computing_resource": "localhost",
        "somaworkflow_computing_resources_config": {
            "localhost": {
            }
        }
    }
    study_config.set_study_configuration(study_dict)

    # Create the morphologist pipeline
    pipeline = get_process_instance(
        "morphologist.capsul.morphologist.Morphologist")
    morphologist_pipeline = process_with_fom.ProcessWithFom(
        pipeline, study_config)
    morphologist_pipeline.attributes = dict(
        (trait_name, getattr(morphologist_pipeline, trait_name))
        for trait_name in morphologist_pipeline.user_traits())
    morphologist_pipeline.attributes["center"] = "morphologist"
    morphologist_pipeline.attributes["subject"] = sid
    morphologist_pipeline.create_completion()

    # Create morphologist expected tree
    # ToDo: use ImportT1 from axon
    subjectdir = os.path.join(outdir, study, sid)
    if os.path.isdir(subjectdir):
        raise Exception("Folder '{0}' already created.".format(subjectdir))
    os.makedirs(os.path.join(
        subjectdir, "t1mri", "default_acquisition",
        "default_analysis", "folds", "3.1", "default_session_auto"))
    os.makedirs(os.path.join(
        subjectdir, "t1mri", "default_acquisition",
        "registration"))
    os.makedirs(os.path.join(
        subjectdir, "t1mri", "default_acquisition",
        "segmentation", "mesh"))
    os.makedirs(os.path.join(
        subjectdir, "t1mri", "default_acquisition",
        "tmp"))

    # Copy T1 file in the morphologist expected location
    destfile = os.path.join(subjectdir, "t1mri",
                            "default_acquisition", sid + ".nii.gz")
    shutil.copy(t1file, destfile)

    # Create source_referential morphologist expected file
    source_referential = {"uuid": str(soma.uuid.Uuid())}
    referential_file = os.path.join(
        subjectdir, "t1mri", "default_acquisition", "registration",
        "RawT1-{0}_default_acquisition.referential".format(sid))
    attributes = "attributes = {0}".format(json.dumps(source_referential))
    with open(referential_file, "w") as openfile:
        openfile.write(attributes)

    # Create a worflow from the morphologist pipeline
    workflow = Workflow(name="{0} {1}".format(study, sid),
                        jobs=[])
    workflow.root_group = []

    # Create the workflow
    wf = pipeline_workflow.workflow_from_pipeline(
        morphologist_pipeline.process, study_config=study_config)
    workflow.add_workflow(wf, as_group="{0}_{1}".format(study, sid))
    wffile = os.path.join(subjectdir, "{0}.wf".format(study))
    pickle.dump(workflow, open(wffile, "w"))

    # Execute the workflow with somaworkflow
    if somaworkflow:
        controller = WorkflowController()
        wfid = controller.submit_workflow(
            workflow=workflow, name="{0}_{1}".format(study, sid))

        # Return the worflow status after execution
        while True:
            time.sleep(waittime)
            wfstatus = controller.workflow_status(wfid)
            if wfstatus not in [
                    "worklflow_not_started", "workflow_in_progress"]:
                break

    # Execute the workflow with subprocess
    else:
        # -> construct the ordered list of commands to be executed
        workflow_repr = workflow.to_dict()
        graph = Graph()
        for job in workflow_repr["jobs"]:
            graph.add_node(GraphNode(job, None))
        for link in workflow_repr["dependencies"]:
            graph.add_link(link[0], link[1])
        ordered_nodes = [str(node[0]) for node in graph.topological_sort()]
        commands = []
        jobs = workflow_repr["serialized_jobs"]
        temporaries = workflow_repr["serialized_temporary_paths"]
        barriers = workflow_repr["serialized_barriers"]
        for index in ordered_nodes:
            if index in jobs:
                commands.append(jobs[index]["command"])
            elif index in barriers:
                continue
            else:
                raise Exception("Unexpected node in workflow.")

        # -> Go through all commands
        tmpmap = {}
        for cmd in commands:
            # -> deal with temporary files
            for index, item in enumerate(cmd):
                if not isinstance(item, basestring):
                    if str(item) not in tmpmap:
                        if str(item) in temporaries:
                            struct = temporaries[str(item)]
                            name = cmd[2].split(";")[1].split()[-1]
                            tmppath = os.path.join(
                                subjectdir, "t1mri", "default_acquisition",
                                "tmp", str(item) + name + struct["suffix"])
                            tmpmap[str(item)] = tmppath
                        else:
                            raise MorphologistError(
                                "Can't complete command '{0}'.".format(
                                    cmd))
                    cmd[index] = tmpmap[str(item)]

            # -> execute the command
            worker = MorphologistWrapper(cmd)
            worker()
            if worker.exitcode != 0:
                raise MorphologistRuntimeError(
                    " ".join(worker.cmd), worker.stderr)

        wfstatus = "Done"
        wfid = "subprocess"

    return wffile, wfid, wfstatus
Exemplo n.º 28
0
def pilot_qa_fmri():
    """
    Imports
    -------

    This code needs 'capsul' and 'mmutils' package in order to instanciate and
    execute the pipeline and to get a toy dataset.
    These packages are available in the 'neurospin' source list or in pypi.
    """
    # Capsul import
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance

    # Mmutils import
    from mmutils.toy_datasets import get_sample_data

    """
    Parameters
    ----------

    The 'pipeline_name' parameter contains the location of the pipeline XML
    description that will perform the DICOMs conversion, and the 'outdir' the
    location of the pipeline's results: in this case a temporary directory.
    """

    pipeline_name = "mmqa.fmri.fmri_quality_assurance_bbox.xml"
    outdir = tempfile.mkdtemp()

    """
    Capsul configuration
    --------------------

    A 'StudyConfig' has to be instantiated in order to execute the pipeline
    properly. It enables us to define the results directory through the
    'output_directory' attribute, the number of CPUs to be used through the
    'number_of_cpus' attributes, and to specify that we want a log of the
    processing step through the 'generate_logging'. The 'use_scheduler'
    must be set to True if more than 1 CPU is used.
    """
    study_config = StudyConfig(
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True,
        output_directory=outdir)
    """
    Get the toy dataset
    -------------------

    The toy dataset is composed of a functional image that is downloaded
    if it is necessary throught the 'get_sample_data' function and exported
    locally.
    """

    localizer_dataset = get_sample_data("localizer_extra")

    """
    Pipeline definition
    -------------------

    The pipeline XML description is first imported throught the
    'get_process_instance' method, and the resulting pipeline instance is
    parametrized: in this example we decided to set the date in the converted
    file name and we set two DICOM directories to be converted in Nifti
    format.
    """

    pipeline = get_process_instance(pipeline_name)
    pipeline.image_file = localizer_dataset.fmri
    pipeline.repetition_time = 2.0
    pipeline.exclude_volume = []
    pipeline.roi_size = 21
    pipeline.score_file = os.path.join(outdir, "scores.json")

    """
    Pipeline representation
    -----------------------

    By executing this block of code, a pipeline representation can be
    displayed. This representation is composed of boxes connected to each
    other.
    """
    if 0:
        from capsul.qt_gui.widgets import PipelineDevelopperView
        from PySide import QtGui
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()

    """
    Pipeline execution
    ------------------

    Finally the pipeline is eecuted in the defined 'study_config'.
    """
    study_config.run(pipeline)

    """
    Access the result
    -----------------

    Display the computed scores
    """

    scores_file = pipeline.scores_file

    with open(scores_file, "r") as _file:
        scores = json.load(_file)

    for key, value in scores.iteritems():
        print "{0} = {1}".format(key, value)
Exemplo n.º 29
0
class CapsulMainWindow(MyQUiLoader):
    """ Capsul main window.
    """
    def __init__(self, pipeline_menu, ui_file, default_study_config=None):
        """ Method to initialize the Capsul main window class.

        Parameters
        ----------
        pipeline_menu: hierachic dict
            each key is a sub module of the module. Leafs contain a list with
            the url to the documentation.
        ui_file: str (mandatory)
            a filename containing the user interface description
        default_study_config: ordered dict (madatory)
            some parameters for the study configuration
        """
        # Inheritance: load user interface window
        MyQUiLoader.__init__(self, ui_file)

        # Class parameters
        self.pipeline_menu = pipeline_menu
        self.pipelines = {}
        self.pipeline = None
        self.path_to_pipeline_doc = {}

        # Define dynamic controls
        self.controls = {
            QtGui.QAction: ["actionHelp", "actionQuit", "actionBrowse",
                            "actionLoad", "actionChangeView",
                            "actionParameters", "actionRun",
                            "actionStudyConfig", "actionQualityControl"],
            QtGui.QTabWidget: ["display", ],
            QtGui.QDockWidget: ["dockWidgetBrowse", "dockWidgetParameters",
                                "dockWidgetStudyConfig", "dockWidgetBoard"],
            QtGui.QWidget: ["dock_browse", "dock_parameters",
                            "dock_study_config", "dock_board"],
            QtGui.QTreeWidget: ["menu_treectrl", ],
            QtGui.QLineEdit: ["search", ],
        }

        # Add ui class parameter with the dynamic controls and initialize
        # default values
        self.add_controls_to_ui()
        self.ui.display.setTabsClosable(True)

        # Create the study configuration
        self.study_config = StudyConfig(default_study_config)

        # Create the controller widget associated to the study
        # configuration controller
        self.study_config_widget = ScrollControllerWidget(
            self.study_config, live=True)
        self.ui.dockWidgetStudyConfig.setWidget(self.study_config_widget)

        # Create the pipeline menu
        fill_treectrl(self.ui.menu_treectrl, self.pipeline_menu)

        # Signal for window interface
        self.ui.actionHelp.triggered.connect(self.onHelpClicked)
        self.ui.actionChangeView.triggered.connect(self.onChangeViewClicked)

        # Signal for tab widget
        self.ui.display.currentChanged.connect(self.onCurrentTabChanged)
        self.ui.display.tabCloseRequested.connect(self.onCloseTabClicked)

        # Signal for dock widget
        self.ui.actionBrowse.triggered.connect(self.onBrowseClicked)
        self.ui.actionParameters.triggered.connect(self.onParametersClicked)
        self.ui.actionStudyConfig.triggered.connect(self.onStudyConfigClicked)
        self.ui.actionQualityControl.triggered.connect(self.onQualityControlClicked)

        # Initialize properly the visibility of each dock widget
        self.onBrowseClicked()
        self.onParametersClicked()
        self.onStudyConfigClicked()
        self.onQualityControlClicked()

        # Signal for the pipeline creation
        self.ui.search.textChanged.connect(self.onSearchClicked)
        self.ui.menu_treectrl.currentItemChanged.connect(
            self.onTreeSelectionChanged)
        self.ui.actionLoad.triggered.connect(self.onLoadClicked)

        # Signal for the execution
        self.ui.actionRun.triggered.connect(self.onRunClicked)

        # Set default values

        # Set some tooltips

    def show(self):
        """ Shows the widget and its child widgets.
        """
        self.ui.show()

    def add_controls_to_ui(self):
        """ Method to find dynamic controls
        """
        # Error message template
        error_message = "{0} has no attribute '{1}'"

        # Got through the class dynamic controls
        for control_type, control_item in six.iteritems(self.controls):

            # Get the dynamic control name
            for control_name in control_item:

                # Try to set the control value to the ui class parameter
                try:
                    value = self.ui.findChild(control_type, control_name)
                    if value is None:
                        logger.error(error_message.format(
                            type(self.ui), control_name))
                    setattr(self.ui, control_name, value)
                except:
                    logger.error(error_message.format(
                        type(self.ui), control_name))

    ###########################################################################
    # Slots   
    ###########################################################################

    def onRunClicked(self):
        """ Event to execute the process/pipeline.
        """
        self.study_config.run(self.pipeline, executer_qc_nodes=True, verbose=1)

    def onBrowseClicked(self):
        """ Event to show / hide the browse dock widget.
        """
        # Show browse dock widget
        if self.ui.actionBrowse.isChecked():
            self.ui.dockWidgetBrowse.show()

        # Hide browse dock widget
        else:
            self.ui.dockWidgetBrowse.hide()

    def onParametersClicked(self):
        """ Event to show / hide the parameters dock widget.
        """
        # Show parameters dock widget
        if self.ui.actionParameters.isChecked():
            self.ui.dockWidgetParameters.show()

        # Hide parameters dock widget
        else:
            self.ui.dockWidgetParameters.hide()

    def onStudyConfigClicked(self):
        """ Event to show / hide the study config dock widget.
        """
        # Show study configuration dock widget
        if self.ui.actionStudyConfig.isChecked():
            self.ui.dockWidgetStudyConfig.show()

        # Hide study configuration dock widget
        else:
            self.ui.dockWidgetStudyConfig.hide()

    def onQualityControlClicked(self):
        """ Event to show / hide the board dock widget.
        """
        # Create and show board dock widget
        if self.ui.actionQualityControl.isChecked():

            # Create the board widget associated to the pipeline controller
            # Create on the fly in order to get the last status
            # ToDo: add callbacks
            if self.pipeline is not None:
                # board_widget = BoardWidget(
                #     self.pipeline, parent=self.ui.dockWidgetParameters,
                #     name="board")
                board_widget = ScrollControllerWidget(
                    self.pipeline, name="outputs", live=True,
                    hide_labels=False, select_controls="outputs",
                    disable_controller_widget=True)
                #board_widget.setEnabled(False)
                self.ui.dockWidgetBoard.setWidget(board_widget)

            # Show the board widget
            self.ui.dockWidgetBoard.show()

        # Hide board dock widget
        else:
            self.ui.dockWidgetBoard.hide()

    def onSearchClicked(self):
        """ Event to refresh the menu tree control that contains the pipeline
        modules.
        """
        # Clear the current tree control
        self.ui.menu_treectrl.clear()

        # Build the new filtered tree control
        fill_treectrl(self.ui.menu_treectrl, self.pipeline_menu,
                      self.ui.search.text().lower())

    def onTreeSelectionChanged(self):
        """ Event to refresh the pipeline load button status.
        """
        # Get the cuurent item
        item = self.ui.menu_treectrl.currentItem()
        if item is None:
            return

        # Check if we have selected a pipeline in the tree and enable / disable
        # the load button
        url = item.text(2)
        if url == "None":
            self.ui.actionLoad.setEnabled(False)
        else:
            self.ui.actionLoad.setEnabled(True)

    def onRunStatus(self):
        """ Event to refresh the run button status.

        When all the controller widget controls are correctly filled, enable
        the user to execute the pipeline.
        """
        # Get the controller widget
        controller_widget = self.ui.dockWidgetParameters.widget().controller_widget

        # Get the controller widget status
        is_valid = controller_widget.is_valid()

        # Depending on the controller widget status enable / disable
        # the run button
        self.ui.actionRun.setEnabled(is_valid)

    def onLoadClicked(self):
        """ Event to load and display a pipeline.
        """
        # Get the pipeline instance from its string description
        item = self.ui.menu_treectrl.currentItem()
        description_list = [str(x) for x in [item.text(1), item.text(0)]
                            if x != ""]
        process_description = ".".join(description_list)
        self.pipeline = get_process_instance(process_description)

        # Create the controller widget associated to the pipeline
        # controller
        pipeline_widget = ScrollControllerWidget(
            self.pipeline, live=True, select_controls="inputs")
        self.ui.dockWidgetParameters.setWidget(pipeline_widget)

        # Add observer to refresh the run button
        controller_widget = pipeline_widget.controller_widget
        for control_name, control \
                in six.iteritems(controller_widget._controls):

            # Unpack the control item
            trait, control_class, control_instance, control_label = control

            # Add the new callback
            control_class.add_callback(self.onRunStatus, control_instance)

        # Refresh manually the run button status the first time
        self.onRunStatus()

        # Store the pipeline documentation root path
        self.path_to_pipeline_doc[self.pipeline.id] = item.text(2)

        # Store the pipeline instance
        self.pipelines[self.pipeline.name] = (
            self.pipeline, pipeline_widget)

        # Create the widget
        widget = PipelineDevelopperView(self.pipeline)
        self._insert_widget_in_tab(widget)

        # Connect the subpipeline clicked signal to the
        # onLoadSubPipelineClicked slot
        widget.subpipeline_clicked.connect(self.onLoadSubPipelineClicked)

    def onLoadSubPipelineClicked(self, name, sub_pipeline, modifiers):
        """ Event to load and display a sub pipeline.
        """
        # Store the pipeline instance in class parameters
        self.pipeline = self.pipeline.nodes[name].process

        # Create the controller widget associated to the sub pipeline
        # controller: if the sub pipeline is a ProcessIteration, disable
        # the correspondind controller widget since this pipeline is generated
        # on the fly an is not directly synchronized with the rest of the
        # pipeline.
        is_iterative_pipeline = False
        if isinstance(self.pipeline, ProcessIteration):
            is_iterative_pipeline = True
        pipeline_widget = ScrollControllerWidget(
            self.pipeline, live=True, select_controls="inputs",
            disable_controller_widget=is_iterative_pipeline)
        self.ui.dockWidgetParameters.setWidget(pipeline_widget)

        # Store the sub pipeline instance
        self.pipelines[self.pipeline.name] = (
            self.pipeline, pipeline_widget)

        # Create the widget
        widget = PipelineDevelopperView(self.pipeline)
        self._insert_widget_in_tab(widget)

        # Connect the subpipeline clicked signal to the
        # onLoadSubPipelineClicked slot
        widget.subpipeline_clicked.connect(self.onLoadSubPipelineClicked)

    def onCloseTabClicked(self, index):
        """ Event to close a pipeline view.
        """
        # Remove the pipeline from the intern pipeline list
        pipeline, pipeline_widget = self.pipelines[
            self.ui.display.tabText(index)]
        pipeline_widget.close()
        pipeline_widget.deleteLater()
        del self.pipelines[self.ui.display.tabText(index)]

        # Remove the table that contains the pipeline
        self.ui.display.removeTab(index)

    def onCurrentTabChanged(self, index):
        """ Event to refresh the controller controller widget when a new
        tab is selected
        """
        # If no valid tab index has been passed
        if index < 0:
            self.ui.actionRun.setEnabled(False)

        # A new valid tab is selected
        else:
            # Get the selected pipeline widget
            self.pipeline, pipeline_widget = self.pipelines[
                self.ui.display.tabText(index)]

            # Set the controller widget associated to the pipeline
            # controller
            self.ui.dockWidgetParameters.setWidget(pipeline_widget)

            # Refresh manually the run button status the first time
            self.onRunStatus()

    def onHelpClicked(self):
        """ Event to display the documentation of the active pipeline.
        """
        # Create a dialog box to display the html documentation
        win = QtGui.QDialog()
        win.setWindowTitle("Pipeline Help")

        # Build the pipeline documentation location
        # Possible since common tools generate the sphinx documentation
        if self.pipeline:

            # Generate the url to the active pipeline documentation
            path_to_active_pipeline_doc = os.path.join(
                self.path_to_pipeline_doc[self.pipeline.id], "generated",
                self.pipeline.id.split(".")[1], "pipeline",
                self.pipeline.id + ".html")

            # Create and fill a QWebView
            help = QtWebKit.QWebView()
            help.load(QtCore.QUrl(path_to_active_pipeline_doc))
            help.show()

            # Create and set a layout with the web view
            layout = QtGui.QHBoxLayout()
            layout.addWidget(help)
            win.setLayout(layout)

            # Display the window
            win.exec_()

        # No Pipeline loaded, cant't show the documentation message
        # Display a message box
        else:
            QtGui.QMessageBox.information(
                self.ui, "Information", "First load a pipeline!")

    def onChangeViewClicked(self):
        """ Event to switch between simple and full pipeline views.
        """
        # Check if a pipeline has been loaded
        if self._is_active_pipeline_valid():

            # Check the current display mode
            # Case PipelineDevelopperView
            if isinstance(self.ui.display.currentWidget(),
                          PipelineDevelopperView):

                # Switch to PipelineUserView display mode
                widget = PipelineUserView(self.pipeline)
                self._insert_widget_in_tab(widget)

            # Case PipelineUserView
            else:

                # Switch to PipelineDevelopperView display mode
                widget = PipelineDevelopperView(self.pipeline)
                self._insert_widget_in_tab(widget)

        # No pipeline loaded error
        else:
            logger.error("No active pipeline selected. "
                          "Have you forgotten to click the load pipeline "
                          "button?")

    #####################
    # Private interface #
    #####################

    def _insert_widget_in_tab(self, widget):
        """ Insert a new widget or replace an existing widget.

        Parameters
        ----------
        widget: a widget (mandatory)
            the widget we want to draw
        """
        # Search if the tab corresponding to the widget has already been created
        already_created = False
        index = 0

        # Go through all the tabs
        for index in range(self.ui.display.count()):

            # Check if we have a match: the tab name is equal to the current
            #pipeline name
            if (self.ui.display.tabText(index) == self.pipeline.name):
                already_created = True
                break

        # If no match found, add a new tab with the widget
        if not already_created:
            self.ui.display.addTab(
                widget, unicode(self.pipeline.name))
            self.ui.display.setCurrentIndex(
                self.ui.display.count() - 1)

        # Otherwise, replace the widget from the match tab
        else:
            # Delete the tab
            self.ui.display.removeTab(index)

            # Insert the new tab
            self.ui.display.insertTab(
                index, widget, unicode(self.pipeline.name))

            # Set the corresponding index
            self.ui.display.setCurrentIndex(index)



    def _is_active_pipeline_valid(self):
        """ Method to ceack that the active pipeline is valid

        Returns
        -------
        is_valid: bool
            True if the active pipeline is valid
        """
        return self.pipeline is not None
Exemplo n.º 30
0
logging.basicConfig(level=logging.INFO)

# CAPSUL import
from capsul.qt_gui.widgets import PipelineDevelopperView
from capsul.study_config.study_config import StudyConfig
from capsul.process.loader import get_process_instance

# CAPS import
from caps.toy_datasets import get_sample_data


# Configure the environment
start_time = datetime.datetime.now()
print "Start Configuration", start_time
study_config = StudyConfig(
    modules=["SmartCachingConfig"],
    use_smart_caching=True,
    output_directory="/volatile/nsap/catalogue/quality_assurance/")
print "Done in {0} seconds".format(datetime.datetime.now() - start_time)


# Create pipeline
start_time = datetime.datetime.now()
print "Start Pipeline Creation", start_time
pipeline = get_process_instance("mmqa.fmri.fmri_quality_assurance.xml")
print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)


# Set pipeline input parameters
start_time = datetime.datetime.now()
print "Start Parametrization", start_time
localizer_dataset = get_sample_data("localizer")
Exemplo n.º 31
0
    def __init__(self, pipeline_menu, ui_file, default_study_config=None):
        """ Method to initialize the Capsul main window class.

        Parameters
        ----------
        pipeline_menu: hierachic dict
            each key is a sub module of the module. Leafs contain a list with
            the url to the documentation.
        ui_file: str (mandatory)
            a filename containing the user interface description
        default_study_config: ordered dict (madatory)
            some parameters for the study configuration
        """
        # Inheritance: load user interface window
        MyQUiLoader.__init__(self, ui_file)

        # Class parameters
        self.pipeline_menu = pipeline_menu
        self.pipelines = {}
        self.pipeline = None
        self.path_to_pipeline_doc = {}

        # Define dynamic controls
        self.controls = {
            QtGui.QAction: [
                "actionHelp", "actionQuit", "actionBrowse", "actionLoad",
                "actionChangeView", "actionParameters", "actionRun",
                "actionStudyConfig", "actionQualityControl"
            ],
            QtGui.QTabWidget: [
                "display",
            ],
            QtGui.QDockWidget: [
                "dockWidgetBrowse", "dockWidgetParameters",
                "dockWidgetStudyConfig", "dockWidgetBoard"
            ],
            QtGui.QWidget: [
                "dock_browse", "dock_parameters", "dock_study_config",
                "dock_board"
            ],
            QtGui.QTreeWidget: [
                "menu_treectrl",
            ],
            QtGui.QLineEdit: [
                "search",
            ],
        }

        # Add ui class parameter with the dynamic controls and initialize
        # default values
        self.add_controls_to_ui()
        self.ui.display.setTabsClosable(True)

        # Create the study configuration
        self.study_config = StudyConfig(default_study_config)

        # Create the controller widget associated to the study
        # configuration controller
        self.study_config_widget = ScrollControllerWidget(self.study_config,
                                                          live=True)
        self.ui.dockWidgetStudyConfig.setWidget(self.study_config_widget)

        # Create the pipeline menu
        fill_treectrl(self.ui.menu_treectrl, self.pipeline_menu)

        # Signal for window interface
        self.ui.actionHelp.triggered.connect(self.onHelpClicked)
        self.ui.actionChangeView.triggered.connect(self.onChangeViewClicked)

        # Signal for tab widget
        self.ui.display.currentChanged.connect(self.onCurrentTabChanged)
        self.ui.display.tabCloseRequested.connect(self.onCloseTabClicked)

        # Signal for dock widget
        self.ui.actionBrowse.triggered.connect(self.onBrowseClicked)
        self.ui.actionParameters.triggered.connect(self.onParametersClicked)
        self.ui.actionStudyConfig.triggered.connect(self.onStudyConfigClicked)
        self.ui.actionQualityControl.triggered.connect(
            self.onQualityControlClicked)

        # Initialize properly the visibility of each dock widget
        self.onBrowseClicked()
        self.onParametersClicked()
        self.onStudyConfigClicked()
        self.onQualityControlClicked()

        # Signal for the pipeline creation
        self.ui.search.textChanged.connect(self.onSearchClicked)
        self.ui.menu_treectrl.currentItemChanged.connect(
            self.onTreeSelectionChanged)
        self.ui.actionLoad.triggered.connect(self.onLoadClicked)

        # Signal for the execution
        self.ui.actionRun.triggered.connect(self.onRunClicked)
Exemplo n.º 32
0
class CapsulMainWindow(MyQUiLoader):
    """ Capsul main window.
    """
    def __init__(self, pipeline_menu, ui_file, default_study_config=None):
        """ Method to initialize the Capsul main window class.

        Parameters
        ----------
        pipeline_menu: hierachic dict
            each key is a sub module of the module. Leafs contain a list with
            the url to the documentation.
        ui_file: str (mandatory)
            a filename containing the user interface description
        default_study_config: ordered dict (madatory)
            some parameters for the study configuration
        """
        # Inheritance: load user interface window
        MyQUiLoader.__init__(self, ui_file)

        # Class parameters
        self.pipeline_menu = pipeline_menu
        self.pipelines = {}
        self.pipeline = None
        self.path_to_pipeline_doc = {}

        # Define dynamic controls
        self.controls = {
            QtGui.QAction: [
                "actionHelp", "actionQuit", "actionBrowse", "actionLoad",
                "actionChangeView", "actionParameters", "actionRun",
                "actionStudyConfig", "actionQualityControl"
            ],
            QtGui.QTabWidget: [
                "display",
            ],
            QtGui.QDockWidget: [
                "dockWidgetBrowse", "dockWidgetParameters",
                "dockWidgetStudyConfig", "dockWidgetBoard"
            ],
            QtGui.QWidget: [
                "dock_browse", "dock_parameters", "dock_study_config",
                "dock_board"
            ],
            QtGui.QTreeWidget: [
                "menu_treectrl",
            ],
            QtGui.QLineEdit: [
                "search",
            ],
        }

        # Add ui class parameter with the dynamic controls and initialize
        # default values
        self.add_controls_to_ui()
        self.ui.display.setTabsClosable(True)

        # Create the study configuration
        self.study_config = StudyConfig(default_study_config)

        # Create the controller widget associated to the study
        # configuration controller
        self.study_config_widget = ScrollControllerWidget(self.study_config,
                                                          live=True)
        self.ui.dockWidgetStudyConfig.setWidget(self.study_config_widget)

        # Create the pipeline menu
        fill_treectrl(self.ui.menu_treectrl, self.pipeline_menu)

        # Signal for window interface
        self.ui.actionHelp.triggered.connect(self.onHelpClicked)
        self.ui.actionChangeView.triggered.connect(self.onChangeViewClicked)

        # Signal for tab widget
        self.ui.display.currentChanged.connect(self.onCurrentTabChanged)
        self.ui.display.tabCloseRequested.connect(self.onCloseTabClicked)

        # Signal for dock widget
        self.ui.actionBrowse.triggered.connect(self.onBrowseClicked)
        self.ui.actionParameters.triggered.connect(self.onParametersClicked)
        self.ui.actionStudyConfig.triggered.connect(self.onStudyConfigClicked)
        self.ui.actionQualityControl.triggered.connect(
            self.onQualityControlClicked)

        # Initialize properly the visibility of each dock widget
        self.onBrowseClicked()
        self.onParametersClicked()
        self.onStudyConfigClicked()
        self.onQualityControlClicked()

        # Signal for the pipeline creation
        self.ui.search.textChanged.connect(self.onSearchClicked)
        self.ui.menu_treectrl.currentItemChanged.connect(
            self.onTreeSelectionChanged)
        self.ui.actionLoad.triggered.connect(self.onLoadClicked)

        # Signal for the execution
        self.ui.actionRun.triggered.connect(self.onRunClicked)

        # Set default values

        # Set some tooltips

    def show(self):
        """ Shows the widget and its child widgets.
        """
        self.ui.show()

    def add_controls_to_ui(self):
        """ Method to find dynamic controls
        """
        # Error message template
        error_message = "{0} has no attribute '{1}'"

        # Got through the class dynamic controls
        for control_type, control_item in six.iteritems(self.controls):

            # Get the dynamic control name
            for control_name in control_item:

                # Try to set the control value to the ui class parameter
                try:
                    value = self.ui.findChild(control_type, control_name)
                    if value is None:
                        logger.error(
                            error_message.format(type(self.ui), control_name))
                    setattr(self.ui, control_name, value)
                except:
                    logger.error(
                        error_message.format(type(self.ui), control_name))

    ###########################################################################
    # Slots
    ###########################################################################

    def onRunClicked(self):
        """ Event to execute the process/pipeline.
        """
        self.study_config.run(self.pipeline, executer_qc_nodes=True, verbose=1)

    def onBrowseClicked(self):
        """ Event to show / hide the browse dock widget.
        """
        # Show browse dock widget
        if self.ui.actionBrowse.isChecked():
            self.ui.dockWidgetBrowse.show()

        # Hide browse dock widget
        else:
            self.ui.dockWidgetBrowse.hide()

    def onParametersClicked(self):
        """ Event to show / hide the parameters dock widget.
        """
        # Show parameters dock widget
        if self.ui.actionParameters.isChecked():
            self.ui.dockWidgetParameters.show()

        # Hide parameters dock widget
        else:
            self.ui.dockWidgetParameters.hide()

    def onStudyConfigClicked(self):
        """ Event to show / hide the study config dock widget.
        """
        # Show study configuration dock widget
        if self.ui.actionStudyConfig.isChecked():
            self.ui.dockWidgetStudyConfig.show()

        # Hide study configuration dock widget
        else:
            self.ui.dockWidgetStudyConfig.hide()

    def onQualityControlClicked(self):
        """ Event to show / hide the board dock widget.
        """
        # Create and show board dock widget
        if self.ui.actionQualityControl.isChecked():

            # Create the board widget associated to the pipeline controller
            # Create on the fly in order to get the last status
            # ToDo: add callbacks
            if self.pipeline is not None:
                # board_widget = BoardWidget(
                #     self.pipeline, parent=self.ui.dockWidgetParameters,
                #     name="board")
                board_widget = ScrollControllerWidget(
                    self.pipeline,
                    name="outputs",
                    live=True,
                    hide_labels=False,
                    select_controls="outputs",
                    disable_controller_widget=True)
                #board_widget.setEnabled(False)
                self.ui.dockWidgetBoard.setWidget(board_widget)

            # Show the board widget
            self.ui.dockWidgetBoard.show()

        # Hide board dock widget
        else:
            self.ui.dockWidgetBoard.hide()

    def onSearchClicked(self):
        """ Event to refresh the menu tree control that contains the pipeline
        modules.
        """
        # Clear the current tree control
        self.ui.menu_treectrl.clear()

        # Build the new filtered tree control
        fill_treectrl(self.ui.menu_treectrl, self.pipeline_menu,
                      self.ui.search.text().lower())

    def onTreeSelectionChanged(self):
        """ Event to refresh the pipeline load button status.
        """
        # Get the cuurent item
        item = self.ui.menu_treectrl.currentItem()
        if item is None:
            return

        # Check if we have selected a pipeline in the tree and enable / disable
        # the load button
        url = item.text(2)
        if url == "None":
            self.ui.actionLoad.setEnabled(False)
        else:
            self.ui.actionLoad.setEnabled(True)

    def onRunStatus(self):
        """ Event to refresh the run button status.

        When all the controller widget controls are correctly filled, enable
        the user to execute the pipeline.
        """
        # Get the controller widget
        controller_widget = self.ui.dockWidgetParameters.widget(
        ).controller_widget

        # Get the controller widget status
        is_valid = controller_widget.is_valid()

        # Depending on the controller widget status enable / disable
        # the run button
        self.ui.actionRun.setEnabled(is_valid)

    def onLoadClicked(self):
        """ Event to load and display a pipeline.
        """
        # Get the pipeline instance from its string description
        item = self.ui.menu_treectrl.currentItem()
        description_list = [
            str(x) for x in [item.text(1), item.text(0)] if x != ""
        ]
        process_description = ".".join(description_list)
        self.pipeline = get_process_instance(process_description)

        # Create the controller widget associated to the pipeline
        # controller
        pipeline_widget = ScrollControllerWidget(self.pipeline,
                                                 live=True,
                                                 select_controls="inputs")
        self.ui.dockWidgetParameters.setWidget(pipeline_widget)

        # Add observer to refresh the run button
        controller_widget = pipeline_widget.controller_widget
        for control_name, control \
                in six.iteritems(controller_widget._controls):

            # Unpack the control item
            trait, control_class, control_instance, control_label = control

            # Add the new callback
            control_class.add_callback(self.onRunStatus, control_instance)

        # Refresh manually the run button status the first time
        self.onRunStatus()

        # Store the pipeline documentation root path
        self.path_to_pipeline_doc[self.pipeline.id] = item.text(2)

        # Store the pipeline instance
        self.pipelines[self.pipeline.name] = (self.pipeline, pipeline_widget)

        # Create the widget
        widget = PipelineDevelopperView(self.pipeline)
        self._insert_widget_in_tab(widget)

        # Connect the subpipeline clicked signal to the
        # onLoadSubPipelineClicked slot
        widget.subpipeline_clicked.connect(self.onLoadSubPipelineClicked)

    def onLoadSubPipelineClicked(self, name, sub_pipeline, modifiers):
        """ Event to load and display a sub pipeline.
        """
        # Store the pipeline instance in class parameters
        self.pipeline = self.pipeline.nodes[name].process

        # Create the controller widget associated to the sub pipeline
        # controller: if the sub pipeline is a ProcessIteration, disable
        # the correspondind controller widget since this pipeline is generated
        # on the fly an is not directly synchronized with the rest of the
        # pipeline.
        is_iterative_pipeline = False
        if isinstance(self.pipeline, ProcessIteration):
            is_iterative_pipeline = True
        pipeline_widget = ScrollControllerWidget(
            self.pipeline,
            live=True,
            select_controls="inputs",
            disable_controller_widget=is_iterative_pipeline)
        self.ui.dockWidgetParameters.setWidget(pipeline_widget)

        # Store the sub pipeline instance
        self.pipelines[self.pipeline.name] = (self.pipeline, pipeline_widget)

        # Create the widget
        widget = PipelineDevelopperView(self.pipeline)
        self._insert_widget_in_tab(widget)

        # Connect the subpipeline clicked signal to the
        # onLoadSubPipelineClicked slot
        widget.subpipeline_clicked.connect(self.onLoadSubPipelineClicked)

    def onCloseTabClicked(self, index):
        """ Event to close a pipeline view.
        """
        # Remove the pipeline from the intern pipeline list
        pipeline, pipeline_widget = self.pipelines[self.ui.display.tabText(
            index)]
        pipeline_widget.close()
        pipeline_widget.deleteLater()
        del self.pipelines[self.ui.display.tabText(index)]

        # Remove the table that contains the pipeline
        self.ui.display.removeTab(index)

    def onCurrentTabChanged(self, index):
        """ Event to refresh the controller controller widget when a new
        tab is selected
        """
        # If no valid tab index has been passed
        if index < 0:
            self.ui.actionRun.setEnabled(False)

        # A new valid tab is selected
        else:
            # Get the selected pipeline widget
            self.pipeline, pipeline_widget = self.pipelines[
                self.ui.display.tabText(index)]

            # Set the controller widget associated to the pipeline
            # controller
            self.ui.dockWidgetParameters.setWidget(pipeline_widget)

            # Refresh manually the run button status the first time
            self.onRunStatus()

    def onHelpClicked(self):
        """ Event to display the documentation of the active pipeline.
        """
        # Create a dialog box to display the html documentation
        win = QtGui.QDialog()
        win.setWindowTitle("Pipeline Help")

        # Build the pipeline documentation location
        # Possible since common tools generate the sphinx documentation
        if self.pipeline:

            # Generate the url to the active pipeline documentation
            path_to_active_pipeline_doc = os.path.join(
                self.path_to_pipeline_doc[self.pipeline.id], "generated",
                self.pipeline.id.split(".")[1], "pipeline",
                self.pipeline.id + ".html")

            # Create and fill a QWebView
            help = QtWebKit.QWebView()
            help.load(QtCore.QUrl(path_to_active_pipeline_doc))
            help.show()

            # Create and set a layout with the web view
            layout = QtGui.QHBoxLayout()
            layout.addWidget(help)
            win.setLayout(layout)

            # Display the window
            win.exec_()

        # No Pipeline loaded, cant't show the documentation message
        # Display a message box
        else:
            QtGui.QMessageBox.information(self.ui, "Information",
                                          "First load a pipeline!")

    def onChangeViewClicked(self):
        """ Event to switch between simple and full pipeline views.
        """
        # Check if a pipeline has been loaded
        if self._is_active_pipeline_valid():

            # Check the current display mode
            # Case PipelineDevelopperView
            if isinstance(self.ui.display.currentWidget(),
                          PipelineDevelopperView):

                # Switch to PipelineUserView display mode
                widget = PipelineUserView(self.pipeline)
                self._insert_widget_in_tab(widget)

            # Case PipelineUserView
            else:

                # Switch to PipelineDevelopperView display mode
                widget = PipelineDevelopperView(self.pipeline)
                self._insert_widget_in_tab(widget)

        # No pipeline loaded error
        else:
            logger.error("No active pipeline selected. "
                         "Have you forgotten to click the load pipeline "
                         "button?")

    #####################
    # Private interface #
    #####################

    def _insert_widget_in_tab(self, widget):
        """ Insert a new widget or replace an existing widget.

        Parameters
        ----------
        widget: a widget (mandatory)
            the widget we want to draw
        """
        # Search if the tab corresponding to the widget has already been created
        already_created = False
        index = 0

        # Go through all the tabs
        for index in range(self.ui.display.count()):

            # Check if we have a match: the tab name is equal to the current
            #pipeline name
            if (self.ui.display.tabText(index) == self.pipeline.name):
                already_created = True
                break

        # If no match found, add a new tab with the widget
        if not already_created:
            self.ui.display.addTab(widget, unicode(self.pipeline.name))
            self.ui.display.setCurrentIndex(self.ui.display.count() - 1)

        # Otherwise, replace the widget from the match tab
        else:
            # Delete the tab
            self.ui.display.removeTab(index)

            # Insert the new tab
            self.ui.display.insertTab(index, widget,
                                      unicode(self.pipeline.name))

            # Set the corresponding index
            self.ui.display.setCurrentIndex(index)

    def _is_active_pipeline_valid(self):
        """ Method to ceack that the active pipeline is valid

        Returns
        -------
        is_valid: bool
            True if the active pipeline is valid
        """
        return self.pipeline is not None
Exemplo n.º 33
0
def pilot_gdti_estimation():
    """
    Generalized diffusion tensor estimation
    =======================================
    """
    # System import
    import os
    import sys
    import datetime
    import PySide.QtGui as QtGui

    # CAPSUL import
    from capsul.qt_gui.widgets import PipelineDevelopperView
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance

    """
    Study configuration
    -------------------

    We first define the working directory and guarantee this folder exists on
    the file system:
    """
    working_dir = "/volatile/nsap/clindmri/gdti"
    if not os.path.isdir(working_dir):
        os.makedirs(working_dir)

    """
    And then define the study configuration (here we activate the smart
    caching module that will be able to remember which process has already been
    processed):
    """
    study_config = StudyConfig(
        modules=["SmartCachingConfig"],
        use_smart_caching=True,   
        output_directory=working_dir)

    # Create pipeline
    start_time = datetime.datetime.now()
    print "Start Pipeline Creation", start_time
    pipeline = get_process_instance("clindmri.estimation.gdti.xml")
    print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)

    # View pipeline
    if 0:
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()
        del view1

    # Set pipeline input parameters
    pipeline.dfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.nii.gz"
    pipeline.bvalfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bval"
    pipeline.bvecfile = "/volatile/imagen/dmritest/000000022453/DTI/000000022453s011a1001.bvec"
    pipeline.order = 2
    pipeline.odf = False
    print "Done in {0} seconds.".format(datetime.datetime.now() - start_time)

    # Execute the pipeline in the configured study
    study_config.run(pipeline, verbose=1)
Exemplo n.º 34
0
    def __init__(self, pipeline_menu, ui_file, default_study_config=None):
        """ Method to initialize the Capsul main window class.

        Parameters
        ----------
        pipeline_menu: hierachic dict
            each key is a sub module of the module. Leafs contain a list with
            the url to the documentation.
        ui_file: str (mandatory)
            a filename containing the user interface description
        default_study_config: ordered dict (madatory)
            some parameters for the study configuration
        """
        # Inheritance: load user interface window
        MyQUiLoader.__init__(self, ui_file)

        # Class parameters
        self.pipeline_menu = pipeline_menu
        self.pipelines = {}
        self.pipeline = None
        self.path_to_pipeline_doc = {}

        # Define dynamic controls
        self.controls = {
            QtGui.QAction: ["actionHelp", "actionQuit", "actionBrowse",
                            "actionLoad", "actionChangeView",
                            "actionParameters", "actionRun",
                            "actionStudyConfig", "actionQualityControl"],
            QtGui.QTabWidget: ["display", ],
            QtGui.QDockWidget: ["dockWidgetBrowse", "dockWidgetParameters",
                                "dockWidgetStudyConfig", "dockWidgetBoard"],
            QtGui.QWidget: ["dock_browse", "dock_parameters",
                            "dock_study_config", "dock_board"],
            QtGui.QTreeWidget: ["menu_treectrl", ],
            QtGui.QLineEdit: ["search", ],
        }

        # Add ui class parameter with the dynamic controls and initialize
        # default values
        self.add_controls_to_ui()
        self.ui.display.setTabsClosable(True)

        # Create the study configuration
        self.study_config = StudyConfig(default_study_config)

        # Create the controller widget associated to the study
        # configuration controller
        self.study_config_widget = ScrollControllerWidget(
            self.study_config, live=True)
        self.ui.dockWidgetStudyConfig.setWidget(self.study_config_widget)

        # Create the pipeline menu
        fill_treectrl(self.ui.menu_treectrl, self.pipeline_menu)

        # Signal for window interface
        self.ui.actionHelp.triggered.connect(self.onHelpClicked)
        self.ui.actionChangeView.triggered.connect(self.onChangeViewClicked)

        # Signal for tab widget
        self.ui.display.currentChanged.connect(self.onCurrentTabChanged)
        self.ui.display.tabCloseRequested.connect(self.onCloseTabClicked)

        # Signal for dock widget
        self.ui.actionBrowse.triggered.connect(self.onBrowseClicked)
        self.ui.actionParameters.triggered.connect(self.onParametersClicked)
        self.ui.actionStudyConfig.triggered.connect(self.onStudyConfigClicked)
        self.ui.actionQualityControl.triggered.connect(self.onQualityControlClicked)

        # Initialize properly the visibility of each dock widget
        self.onBrowseClicked()
        self.onParametersClicked()
        self.onStudyConfigClicked()
        self.onQualityControlClicked()

        # Signal for the pipeline creation
        self.ui.search.textChanged.connect(self.onSearchClicked)
        self.ui.menu_treectrl.currentItemChanged.connect(
            self.onTreeSelectionChanged)
        self.ui.actionLoad.triggered.connect(self.onLoadClicked)

        # Signal for the execution
        self.ui.actionRun.triggered.connect(self.onRunClicked)
Exemplo n.º 35
0
def pilot_dcm2nii():
    """
    Imports
    -------

    This code needs 'capsul' and 'mmutils' package in order to instanciate and
    execute the pipeline and to get a toy dataset.
    These packages are available in the 'neurospin' source list or in pypi.
    """
    import os
    import sys
    import shutil
    import tempfile
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance
    from mmutils.toy_datasets import get_sample_data

    """
    Parameters
    ----------

    The 'pipeline_name' parameter contains the location of the pipeline XML
    description that will perform the DICOMs conversion, and the 'outdir' the
    location of the pipeline's results: in this case a temporary directory.
    """
    pipeline_name = "dcmio.dcmconverter.dcm_to_nii.xml"
    outdir = tempfile.mkdtemp()

    """
    Capsul configuration
    --------------------

    A 'StudyConfig' has to be instantiated in order to execute the pipeline
    properly. It enables us to define the results directory through the
    'output_directory' attribute, the number of CPUs to be used through the
    'number_of_cpus' attributes, and to specify that we want a log of the
    processing step through the 'generate_logging'. The 'use_scheduler'
    must be set to True if more than 1 CPU is used.
    """
    study_config = StudyConfig(
        modules=[],
        output_directory=outdir,
        number_of_cpus=1,
        generate_logging=True,
        use_scheduler=True)

    """
    Get the toy dataset
    -------------------

    The toy dataset is composed of a 3D heart dicom image that is downloaded
    if it is necessary throught the 'get_sample_data' function and exported
    locally in a 'heart.dcm' file.
    """
    dicom_dataset = get_sample_data("dicom")
    dcmfolder = os.path.join(outdir, "dicom")
    if not os.path.isdir(dcmfolder):
        os.makedirs(dcmfolder)
    shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm"))

    """
    Pipeline definition
    -------------------

    The pipeline XML description is first imported throught the
    'get_process_instance' method, and the resulting pipeline instance is
    parametrized: in this example we decided to set the date in the converted
    file name and we set two DICOM directories to be converted in Nifti
    format.
    """
    pipeline = get_process_instance(pipeline_name)
    pipeline.date_in_filename = True
    pipeline.dicom_directories = [dcmfolder, dcmfolder]
    pipeline.additional_informations = [[("Provided by", "Neurospin@2015")],
                                        [("Provided by", "Neurospin@2015"),
                                         ("TR", "1500")]]

    pipeline.dcm_tags = [("TR", [("0x0018", "0x0080")]),
                         ("TE", [("0x0018", "0x0081")])]

    """
    Pipeline representation
    -----------------------

    By executing this block of code, a pipeline representation can be
    displayed. This representation is composed of boxes connected to each
    other.
    """
    if 0:
        from capsul.qt_gui.widgets import PipelineDevelopperView
        from PySide import QtGui
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()

    """
    Pipeline execution
    ------------------

    Finally the pipeline is eecuted in the defined 'study_config'.
    """
    study_config.run(pipeline)

    """
    Access the result
    -----------------

    The 'nibabel' package is used to load the generated images. We display the
    numpy array shape and the stored repetiton and echo times: in order
    to load the 'descrip' image field we use the 'json' package.
    """
    import json
    import copy
    import nibabel

    generated_images = pipeline.filled_converted_files

    for fnames in generated_images:
        print(">>>", fnames, "...")
        im = nibabel.load(fnames[0])
        print("shape=", im.get_data().shape)
        header = im.get_header()
        a = str(header["descrip"])
        a = a.strip()
        description = json.loads(copy.deepcopy(a))
        print("TE=", description["TE"])
        print("TR=", description["TR"])
        print("Provided by=", description["Provided by"])
Exemplo n.º 36
0
def pilot_dcm2nii():
    """
    Imports
    -------

    This code needs 'capsul' and 'mmutils' package in order to instanciate and
    execute the pipeline and to get a toy dataset.
    These packages are available in the 'neurospin' source list or in pypi.
    """
    import os
    import sys
    import shutil
    import tempfile
    from capsul.study_config.study_config import StudyConfig
    from capsul.process.loader import get_process_instance
    from mmutils.toy_datasets import get_sample_data
    """
    Parameters
    ----------

    The 'pipeline_name' parameter contains the location of the pipeline XML
    description that will perform the DICOMs conversion, and the 'outdir' the
    location of the pipeline's results: in this case a temporary directory.
    """
    pipeline_name = "dcmio.dcmconverter.dcm_to_nii.xml"
    outdir = tempfile.mkdtemp()
    """
    Capsul configuration
    --------------------

    A 'StudyConfig' has to be instantiated in order to execute the pipeline
    properly. It enables us to define the results directory through the
    'output_directory' attribute, the number of CPUs to be used through the
    'number_of_cpus' attributes, and to specify that we want a log of the
    processing step through the 'generate_logging'. The 'use_scheduler'
    must be set to True if more than 1 CPU is used.
    """
    study_config = StudyConfig(modules=[],
                               output_directory=outdir,
                               number_of_cpus=1,
                               generate_logging=True,
                               use_scheduler=True)
    """
    Get the toy dataset
    -------------------

    The toy dataset is composed of a 3D heart dicom image that is downloaded
    if it is necessary throught the 'get_sample_data' function and exported
    locally in a 'heart.dcm' file.
    """
    dicom_dataset = get_sample_data("dicom")
    dcmfolder = os.path.join(outdir, "dicom")
    if not os.path.isdir(dcmfolder):
        os.makedirs(dcmfolder)
    shutil.copy(dicom_dataset.barre, os.path.join(dcmfolder, "heart.dcm"))
    """
    Pipeline definition
    -------------------

    The pipeline XML description is first imported throught the
    'get_process_instance' method, and the resulting pipeline instance is
    parametrized: in this example we decided to set the date in the converted
    file name and we set two DICOM directories to be converted in Nifti
    format.
    """
    pipeline = get_process_instance(pipeline_name)
    pipeline.date_in_filename = True
    pipeline.dicom_directories = [dcmfolder, dcmfolder]
    pipeline.additional_informations = [[("Provided by", "Neurospin@2015")],
                                        [("Provided by", "Neurospin@2015"),
                                         ("TR", "1500")]]

    pipeline.dcm_tags = [("TR", [("0x0018", "0x0080")]),
                         ("TE", [("0x0018", "0x0081")])]
    """
    Pipeline representation
    -----------------------

    By executing this block of code, a pipeline representation can be
    displayed. This representation is composed of boxes connected to each
    other.
    """
    if 0:
        from capsul.qt_gui.widgets import PipelineDevelopperView
        from PySide import QtGui
        app = QtGui.QApplication(sys.argv)
        view1 = PipelineDevelopperView(pipeline)
        view1.show()
        app.exec_()
    """
    Pipeline execution
    ------------------

    Finally the pipeline is eecuted in the defined 'study_config'.
    """
    study_config.run(pipeline)
    """
    Access the result
    -----------------

    The 'nibabel' package is used to load the generated images. We display the
    numpy array shape and the stored repetiton and echo times: in order
    to load the 'descrip' image field we use the 'json' package.
    """
    import json
    import copy
    import nibabel

    generated_images = pipeline.filled_converted_files

    for fnames in generated_images:
        print(">>>", fnames, "...")
        im = nibabel.load(fnames[0])
        print("shape=", im.get_data().shape)
        header = im.get_header()
        a = str(header["descrip"])
        a = a.strip()
        description = json.loads(copy.deepcopy(a))
        print("TE=", description["TE"])
        print("TR=", description["TR"])
        print("Provided by=", description["Provided by"])
Exemplo n.º 37
0
class CapsulEngine(Controller):
    '''
    A CapsulEngine is the mandatory entry point of all software using Capsul.
    It contains objects to store configuration and metadata, defines execution
    environment(s) (possibly remote) and performs pipelines execution.

    A CapsulEngine must be created using capsul.engine.capsul_engine function.
    For instance::

        from capsul.engine import capsul_engine
        ce = capsul_engine()

    Or::

        from capsul.api import capsul_engine
        ce = capsul_engine()

    By default, CapsulEngine only stores necessary configuration. But it may be
    necessary to modify the Python environment globally to apply this
    configuration. For instance, Nipype must be configured globally. If SPM is
    configured in CapsulEngine, it is necessary to explicitly activate the
    configuration in order to modify the global configuration of Nipype for
    SPM. This activation is done by explicitly activating the execution
    context of the capsul engine with the following code, inside a running
    process::

        from capsul.engine import capsul_engine, activate_configuration
        ce = capsul_engine()
        # Nipype is not configured here
        config = capsul_engine.settings.select_configurations(
            'global', {'nipype': 'any'})
        activate_configuration(config)
        # Nipype is configured here

    .. note::

        CapsulEngine is the replacement of the older
        :class:`~capsul.study_config.study_config.StudyConfig`, which is still
        present in Capsul 2.2 for backward compatibility, but will disappear in
        later versions. In Capsul 2.2 both objects exist, and are synchronized
        internally, which means that a StudyConfig object will also create a
        CapsulEngine, and the other way, and modifications in the StudyConfig
        object will change the corresponding item in CapsulEngine and vice
        versa. Functionalities of StudyConfig are moving internally to
        CapsulEngine, StudyConfig being merely a wrapper.

    **Using CapsulEngine**

    It is used to store configuration variables, and to handle execution within
    the configured context. The configuration has 2 independent axes:
    configuration modules, which provide additional configuration variables,
    and "environments" which typically represent computing resources.

    *Computing resources*

    Capsul is using :somaworkflow:`Soma-Workflow <index.html>` to run
    processes, and is thus able to connect and execute on a remote computing
    server. The remote computing resource may have a different configuration
    from the client one (paths for software or data, available external
    software etc). So configurations specific to different computing resources
    should be handled in CapsulEngine. For this, the configuration section is
    split into several configuration entries, one for each computing resource.

    As this is a little bit complex to handle at first, a "global"
    configuration (what we call "environment") is used to maintain all common
    configuration options. It is typically used to work on the local machine,
    especially for users who only work locally.

    Configuration is stored in a database (either internal or persistent),
    through the :class:`~capsul.engine.settings.Settings` object found in
    ``CapsulEngine.settings``.
    Access and modification of settings should occur within a session block
    using ``with capsul_engine.settings as session``. See the
    :class:`~capsul.engine.settings.Settings` class for details.

    ::

        >>> from capsul.api import capsul_engine
        >>> ce = capsul_engine()
        >>> config = ce.settings.select_configurations('global')
        >>> config = ce.global_config
        >>> print(config)
        {'capsul_engine': {'uses': {'capsul.engine.module.fsl': 'ALL',
          'capsul.engine.module.matlab': 'ALL',
          'capsul.engine.module.spm': 'ALL'}}}

    Whenever a new computing resource is used, it can be added as a new
    environment key to all configuration operations.

    Note that the settings store all possible configurations for all
    environments (or computing resources), but are not "activated": this is
    only done at runtime in specific process execution functions: each process
    may need to select and use a different configuration from other ones, and
    activate it individually.

    :class:`~capsul.process.process.Process` subclasses or instances may
    provide their configuration requirements via their
    :meth:`~capsul.process.process.Process.requirements` method. This method
    returns a dictionary of request strings (one element per needed module)
    that will be used to select one configuration amongst the available
    settings entries of each required module.

    *configuration modules*

    The configuration is handled through a set of configuration modules. Each
    is dedicated for a topic (for instance handling a specific external
    software paths, or managing process parameters completion, etc). A module
    adds a settings table in the database, with its own variables, and is able
    to manage runtime configuration of programs, if needed, through its
    ``activate_configurations`` function. Capsul comes with a
    set of predefined modules:
    :class:`~capsul.engine.module.attributes`,
    :class:`~capsul.engine.module.axon`,
    :class:`~capsul.engine.module.fom`,
    :class:`~capsul.engine.module.fsl`,
    :class:`~capsul.engine.module.matlab`,
    :class:`~capsul.engine.module.spm`

    **Methods**
    '''
    def __init__(self, database_location, database, require):
        '''
        CapsulEngine.__init__(self, database_location, database, config=None)

        The CapsulEngine constructor should not be called directly.
        Use :func:`capsul_engine` factory function instead.
        '''
        super(CapsulEngine, self).__init__()

        self._settings = None

        self._database_location = database_location
        self._database = database

        self._loaded_modules = set()
        self.load_modules(require)

        from capsul.study_config.study_config import StudyConfig
        self.study_config = StudyConfig(engine=self)

        self._metadata_engine = from_json(
            database.json_value('metadata_engine'))

        self._connected_resource = ''

    @property
    def settings(self):
        if self._settings is None:
            self._settings = Settings(self.database.db)
        return self._settings

    @property
    def database(self):
        return self._database

    @property
    def database_location(self):
        return self._database_location

    @property
    def metadata_engine(self):
        return self._metadata_engine

    @metadata_engine.setter
    def metadata_engine(self, metadata_engine):
        self._metadata_engine = metadata_engine
        self.database.set_json_value('metadata_engine',
                                     to_json(self._metadata_engine))

    def load_modules(self, require):
        '''
        Call self.load_module for each required module. The list of modules
        to load is located in self.modules (if it is None,
        capsul.module.default_modules is used).
        '''
        if require is None:
            require = default_modules

        for module in require:
            self.load_module(module)

    def load_module(self, module_name):
        '''
        Load a module if it has not already been loaded (is this case,
        nothing is done)
        
        A module is a fully qualified name of a Python module (as accepted
        by Python import statement). Such a module must define the two
        following functions (and may define two others, see below):
        
        def load_module(capsul_engine, module_name):        
        def set_environ(config, environ):
        
        load_module of each module is called once before reading and applying
        the configuration. It can be used to add traits to the CapsulEngine
        in order to define the configuration options that are used by the
        module. Values of these traits are automatically stored in
        configuration in database when self.save() is used, and they are
        retrieved from database before initializing modules.
        
        set_environ is called in the context of the processing (i.e. on
        the, possibly remote, machine that runs the pipelines). It receives
        the configuration as a JSON compatible dictionary (for instance a
        CapsulEngine attribute `capsul_engine.spm.directory` would be
        config['spm']['directory']). The function must modify the environ
        dictionary to set the environment variables that must be defined
        for pipeline configuration. These variables are typically used by
        modules in capsul.in_context module to manage running external
        software with appropriate configuration. 
        '''
        module_name = self.settings.module_name(module_name)
        if module_name not in self._loaded_modules:
            self._loaded_modules.add(module_name)
            python_module = importlib.import_module(module_name)
            init_settings = getattr(python_module, 'init_settings', None)
            if init_settings is not None:
                init_settings(self)
            return True
        return False

    #
    # Method imported from self.database
    #

    # TODO: take computing resource in account in the following methods

    def set_named_directory(self, name, path):
        return self.database.set_named_directory(name, path)

    def named_directory(self, name):
        return self.database.named_directory(name)

    def named_directories(self):
        return self.database.set_named_directories()

    def set_json_value(self, name, json_value):
        return self.database.set_json_value(name, json_value)

    def json_value(self, name):
        return self.database.json_value(name)

    def set_path_metadata(self, path, metadata, named_directory=None):
        return self.database.set_path_metadata(path, metadata, named_directory)

    def path_metadata(self, path, named_directory=None):
        return self.database.set_path_metadata(path, named_directory)

    def import_configs(self, environment, config_dict):
        '''
        Import config values from a dictionary as given by
        :meth:`Settings.select_configurations`.

        Compared to :meth:`Settings.import_configs` this method (at
        :class:`CapsulEngine` level) also loads the required modules.
        '''
        modules = config_dict.get('capsul_engine', {}).get('uses', {})
        for module in modules:
            self.load_module(module)
        self.settings.import_configs(environment, config_dict)

    #
    # Processes and pipelines related methods
    #
    def get_process_instance(self, process_or_id, **kwargs):
        '''
        The only official way to get a process instance is to use this method.
        For now, it simply calls self.study_config.get_process_instance
        but it will change in the future.
        '''
        instance = self.study_config.get_process_instance(
            process_or_id, **kwargs)
        return instance

    def get_iteration_pipeline(self,
                               pipeline_name,
                               node_name,
                               process_or_id,
                               iterative_plugs=None,
                               do_not_export=None,
                               make_optional=None,
                               **kwargs):
        """ Create a pipeline with an iteration node iterating the given
        process.

        Parameters
        ----------
        pipeline_name: str
            pipeline name
        node_name: str
            iteration node name in the pipeline
        process_or_id: process description
            as in :meth:`get_process_instance`
        iterative_plugs: list (optional)
            passed to :meth:`Pipeline.add_iterative_process`
        do_not_export: list
            passed to :meth:`Pipeline.add_iterative_process`
        make_optional: list
            passed to :meth:`Pipeline.add_iterative_process`

        Returns
        -------
        pipeline: :class:`Pipeline` instance
        """
        from capsul.pipeline.pipeline import Pipeline

        pipeline = Pipeline()
        pipeline.name = pipeline_name
        pipeline.set_study_config(get_ref(self.study_config))
        pipeline.add_iterative_process(node_name, process_or_id,
                                       iterative_plugs, do_not_export,
                                       **kwargs)
        pipeline.autoexport_nodes_parameters(include_optional=True)
        return pipeline

    def start(self,
              process,
              workflow=None,
              history=True,
              get_pipeline=False,
              **kwargs):
        '''
        Asynchronously start the execution of a process or pipeline in the
        connected computing environment. Returns an identifier of
        the process execution and can be used to get the status of the
        execution or wait for its termination.

        TODO:
        if history is True, an entry of the process execution is stored in
        the database. The content of this entry is to be defined but it will
        contain the process parameters (to restart the process) and will be
        updated on process termination (for instance to store execution time
        if possible).

        Parameters
        ----------
        process: Process or Pipeline instance
        workflow: Workflow instance (optional - if already defined before call)
        history: bool (optional)
            TODO: not implemented yet.
        get_pipeline: bool (optional)
            if True, start() will return a tuple (execution_id, pipeline). The
            pipeline is normally the input pipeline (process) if it is actually
            a pipeline. But if the input process is a "single process", it will
            be inserted into a small pipeline for execution. This pipeline will
            be the one actually run, and may be passed to :meth:`wait` to set
            output parameters.

        Returns
        -------
        execution_id: int
            execution identifier (actually a soma-workflow id)
        pipeline: Pipeline instance (optional)
            only returned if get_pipeline is True.
        '''
        return run.start(self, process, workflow, history, get_pipeline,
                         **kwargs)

    def connect(self, computing_resource):
        '''
        Connect the capsul engine to a computing resource
        '''
        self._connected_resource = computing_resource

    def connected_to(self):
        '''
        Return the name of the computing resource this capsul engine is
        connected to or None if it is not connected.
        '''
        return self._connected_resource

    def disconnect(self):
        '''
        Disconnect from a computing resource.
        '''
        self._connected_resource = None

    def executions(self):
        '''
        List the execution identifiers of all processes that have been started
        but not disposed in the connected computing resource. Raises an
        exception if the computing resource is not connected.
        '''
        raise NotImplementedError()

    def dispose(self, execution_id, conditional=False):
        '''
        Update the database with the current state of a process execution and
        free the resources used in the computing resource (i.e. remove the 
        workflow from SomaWorkflow).

        If ``conditional`` is set to True, then dispose is only done if the
        configuration does not specify to keep succeeded / failed workflows.
        '''
        run.dispose(self, execution_id, conditional=conditional)

    def interrupt(self, execution_id):
        '''
        Try to stop the execution of a process. Does not wait for the process
        to be terminated.
        '''
        return run.interrupt(self, execution_id)

    def wait(self, execution_id, timeout=-1, pipeline=None):
        '''
        Wait for the end of a process execution (either normal termination,
        interruption or error).
        '''
        return run.wait(self, execution_id, timeout=timeout, pipeline=pipeline)

    def status(self, execution_id):
        '''
        Return a simple value with the status of an execution (queued, 
        running, terminated, error, etc.)
        '''
        return run.status(self, execution_id)

    def detailed_information(self, execution_id):
        '''
        Return complete (and possibly big) information about a process
        execution.
        '''
        return run.detailed_information(self, execution_id)

    def call(self, process, history=True, *kwargs):
        return run.call(self, process, history=history, **kwargs)

    def check_call(self, process, history=True, **kwargs):
        return run.check_call(self, process, history=history, **kwargs)

    def raise_for_status(self, status, execution_id=None):
        '''
        Raise an exception if a process execution failed
        '''
        run.raise_for_status(self, status, execution_id)
Exemplo n.º 38
0
class CapsulEngine(Controller):
    default_modules = ['capsul.engine.module.spm',
                       'capsul.engine.module.fsl']
        
    def __init__(self, 
                 database_location,
                 database,
                 config=None):
        '''
        CapsulEngine constructor should not be called directly.
        Use engine() factory function instead.
        '''
        super(CapsulEngine, self).__init__()
        
        self._database_location = database_location
        self._database = database

        self.study_config = StudyConfig()
        
        db_config = database.json_value('config')
        self.modules = database.json_value('modules')
        if self.modules is None:
            self.modules = self.default_modules
        self.load_modules()
        
        execution_context = from_json(database.json_value('execution_context'))
        if execution_context is None:
            execution_context = ExecutionContext()
        self._execution_context = execution_context
            
        self._processing_engine = from_json(database.json_value('processing_engine'))        
        self._metadata_engine = from_json(database.json_value('metadata_engine'))
        
        for cfg in (db_config, config):
            if cfg:
                for n, v in cfg.items():
                    if isinstance(v, dict):
                        o = getattr(self, n)
                        if isinstance(o, Controller):
                            o.import_from_dict(v)
                            continue
                    setattr(self, n, v)

        self.init_modules()

    @property
    def database(self):
        return self._database

    @property
    def database_location(self):
        return self._database_location
    
    @property
    def execution_context(self):
        return self._execution_context

    @execution_context.setter
    def execution_context(self, execution_context):
        self._execution_context = execution_context
    
    @property
    def processing_engine(self):
        return self._processing_engine
    
    
    @property
    def metadata_engine(self):
        return self._metadata_engine
    
    @metadata_engine.setter
    def metadata_engine(self, metadata_engine):
        self._metadata_engine = metadata_engine
        self.database.set_json_value('metadata_engine', 
                                     to_json(self._metadata_engine))
    
    def load_modules(self):
        if self.modules is None:
            modules = self.default_modules
        else:
            modules = self.modules
        
        self._loaded_modules = {}
        for module in modules:
            self.load_module(module)
            
    def load_module(self, module):
        if module not in self._loaded_modules:
            __import__(module)
            python_module = sys.modules.get(module)
            if python_module is None:
                raise ValueError('Cannot find %s in Python modules' % module)
            loader = getattr(python_module, 'load_module', None)
            if loader is None:
                raise ValueError('No function load_module() defined in %s' % module)
            self._loaded_modules[module] = loader(self, module)
            return True
        return False
    
    def init_modules(self):
        if self.modules is None:
            modules = self.default_modules
        else:
            modules = self.modules
        for module in modules:
            self.init_module(module)
    
    def init_module(self, module):
        python_module = sys.modules.get(module)
        if python_module is None:
            raise ValueError('Cannot find %s in Python modules' % module)
        initializer = getattr(python_module, 'init_module', None)
        if initializer is None:
            raise ValueError('No function init_module() defined in %s' % module)
        initializer(self, module, self._loaded_modules[module])
    
    def save(self):
        self.database.set_json_value('execution_context', 
                                     to_json(self._execution_context))
        if self._processing_engine:
            self.database.set_json_value('processing_engine', 
                                        to_json(self._processing_engine))
        if self._metadata_engine:
            self.database.set_json_value('metadata_engine', 
                                        to_json(self._metadata_engine))
        config = {}
        for n in self.user_traits().keys():
            v = getattr(self, n)
            if v is Undefined:
                continue
            if isinstance(v, Controller):
                v = v.export_to_dict(exclude_undefined=True)
                if not v:
                    continue
            config[n] = v
        self.database.set_json_value('config', config)
        self.database.commit()
    
    
    #
    # Method imported from self.database
    #
    def set_named_directory(self, name, path):
        return self.database.set_named_directory(name, path)
    
    def named_directory(self, name):
        return self.database.named_directory(name)
    
    def named_directories(self):
        return self.database.set_named_directories()
    
    
    def set_json_value(self, name, json_value):
        return self.database.set_json_value(name, json_value)

    def json_value(self, name):
        return self.database.json_value(name)
        
    
    def set_path_metadata(self, path, metadata, named_directory=None):
        return self.database.set_path_metadata(name, path, metadata, named_directory)
    
    def path_metadata(self, path, named_directory=None):
        return self.database.set_path_metadata(name, path, named_directory)



    def get_process_instance(self, process_or_id, **kwargs):
        '''
        The supported way to get a process instance is to use this method.
        For now, it simply calls self.study_config.get_process_instance
        but it will change in the future.
        '''
        instance = self.study_config.get_process_instance(process_or_id,
                                                          **kwargs)
        return instance