예제 #1
0
    def setUp(self):
        """ In the setup construct the pipeline and set some input parameters.
        """
        self.directory = tempfile.mkdtemp(prefix="capsul_test")

        self.study_config = StudyConfig()

        # Construct the pipeline
        self.pipeline = self.study_config.get_process_instance(MyPipeline)

        # Set some input parameters
        self.pipeline.input_image = [
            os.path.join(self.directory, "toto"),
            os.path.join(self.directory, "tutu")
        ]
        self.pipeline.dynamic_parameter = [3, 1]
        self.pipeline.other_input = 5

        # build a pipeline with dependencies
        self.small_pipeline \
            = self.study_config.get_process_instance(MySmallPipeline)
        self.small_pipeline.files_to_create = [
            os.path.join(self.directory, "toto"),
            os.path.join(self.directory, "tutu")
        ]
        self.small_pipeline.dynamic_parameter = [3, 1]
        self.small_pipeline.other_input = 5

        # build a bigger pipeline with several levels
        self.big_pipeline \
            = self.study_config.get_process_instance(MyBigPipeline)
예제 #2
0
    def display_parameters(self, item):
        """ This method was used to display the parameters of a process.
        It will be useful to generate processes in the near future.
        """

        study_config = StudyConfig(modules=StudyConfig.default_modules +
                                   ['NipypeConfig'])

        process_name = item.text(0)
        list_path = []
        while item is not self.process_library.topLevelItem(0):
            item = item.parent()
            list_path.append(item.text(0))

        list_path = list(reversed(list_path))
        package_name = '.'.join(list_path)

        __import__(package_name)
        pkg = sys.modules[package_name]

        for k, v in sorted(list(pkg.__dict__.items())):
            if k == process_name:
                try:
                    process = get_process_instance(v)
                except:
                    print('AIEEEE')
                    pass
                else:
                    print(process.get_inputs())
                txt = "Inputs: \n" + str(v.input_spec())
                txt2 = "\nOutputs: \n" + str(v.output_spec())
                self.label_test.setText(txt + txt2)
예제 #3
0
 def setUp(self):
     default_config = SortedDictionary(
         ("use_soma_workflow", True)
     )
     self.study_config = StudyConfig(init_config=default_config)
     self.atomic_pipeline = MyAtomicPipeline()
     self.composite_pipeline = MyCompositePipeline()
예제 #4
0
 def setUp(self):
     """ Initialize the TestQCNodes class
     """
     self.pipeline = MyPipeline()
     self.pipeline.input = 'dummy_input'
     self.pipeline.output = 'dummy_output'
     self.output_directory = tempfile.mkdtemp()
     self.study_config = StudyConfig(output_directory=self.output_directory)
예제 #5
0
 def test_loo_xml_io(self):
     sc = StudyConfig()
     pipeline = sc.get_process_instance(PipelineLOO)
     xml_file = tempfile.mkstemp(suffix='_capsul.xml')
     xmlfname = xml_file[1]
     os.close(xml_file[0])
     self.temp_files.append(xmlfname)
     xml.save_xml_pipeline(pipeline, xmlfname)
     pipeline2 = sc.get_process_instance(xmlfname)
     self._test_loo_pipeline(pipeline2)
예제 #6
0
 def test_loo_py_io(self):
     sc = StudyConfig()
     pipeline = sc.get_process_instance(PipelineLOO)
     py_file = tempfile.mkstemp(suffix='_capsul.py')
     pyfname = py_file[1]
     os.close(py_file[0])
     self.temp_files.append(pyfname)
     python_export.save_py_pipeline(pipeline, pyfname)
     pipeline2 = sc.get_process_instance(pyfname)
     self._test_loo_pipeline(pipeline2)
예제 #7
0
def init_study_config(init_config={}):
    study_config = StudyConfig('test_study',
                               modules=['FomConfig', 'SomaWorkflowConfig'],
                               init_config=init_config)
    study_config.input_directory = '/tmp/in'
    study_config.output_directory = '/tmp/out'
    study_config.attributes_schema_paths.append(
        'capsul.attributes.test.test_attributed_process')

    return study_config
예제 #8
0
 def test_custom_nodes_py_io(self):
     sc = StudyConfig()
     pipeline = sc.get_process_instance(Pipeline1)
     py_file = tempfile.mkstemp(suffix='_capsul.py')
     pyfname = py_file[1]
     os.close(py_file[0])
     self.add_py_tmpfile(pyfname)
     python_export.save_py_pipeline(pipeline, pyfname)
     pipeline2 = sc.get_process_instance(pyfname)
     self._test_custom_nodes(pipeline)
예제 #9
0
 def test_custom_nodes_workflow(self):
     sc = StudyConfig()
     pipeline = sc.get_process_instance(Pipeline1)
     pipeline.main_input = '/dir/file'
     pipeline.output_directory = '/dir/out_dir'
     wf = pipeline_workflow.workflow_from_pipeline(pipeline,
                                                   create_directories=False)
     self.assertEqual(len(wf.jobs), 3)
     self.assertEqual(len(wf.dependencies), 2)
     self.assertEqual(
         sorted([[x.name for x in d] for d in wf.dependencies]),
         sorted([['train1', 'train2'], ['train2', 'test']]))
예제 #10
0
 def test_custom_nodes_workflow(self):
     sc = StudyConfig()
     pipeline = sc.get_process_instance(Pipeline1)
     pipeline.main_input = os.path.join(self.temp_dir, 'file')
     pipeline.output_directory = os.path.join(self.temp_dir, 'out_dir')
     wf = pipeline_workflow.workflow_from_pipeline(pipeline,
                                                   create_directories=False)
     self.assertEqual(len(wf.jobs), 7)
     self.assertEqual(len(wf.dependencies), 6)
     self.assertEqual(
         sorted([[x.name for x in d] for d in wf.dependencies]),
         sorted([['LOO', 'train1'], ['train1', 'train2'],
                 ['train1', 'intermediate_output'], ['train2', 'test'],
                 ['train2', 'output_file'], ['test', 'test_output']]))
예제 #11
0
def init_study_config(init_config={}):
    study_config = StudyConfig(
        'test_study',
        modules=['AttributesConfig', 'SomaWorkflowConfig'],
        init_config=init_config)
    study_config.input_directory = '/tmp/in'
    study_config.output_directory = '/tmp/out'
    study_config.attributes_schema_paths.append(
        'capsul.attributes.test.test_attributed_process')
    study_config.attributes_schemas['input'] = 'custom_ex'
    study_config.attributes_schemas['output'] = 'custom_ex'
    study_config.path_completion = 'custom_ex'

    return study_config
예제 #12
0
 def setUp(self):
     default_config = SortedDictionary(("use_soma_workflow", True))
     # use a custom temporary soma-workflow dir to avoid concurrent
     # access problems
     tmpdb = tempfile.mkstemp('', prefix='soma_workflow')
     os.close(tmpdb[0])
     os.unlink(tmpdb[1])
     self.soma_workflow_temp_dir = tmpdb[1]
     os.mkdir(self.soma_workflow_temp_dir)
     swf_conf = '[%s]\nSOMA_WORKFLOW_DIR = %s\n' \
         % (socket.gethostname(), tmpdb[1])
     swconfig.Configuration.search_config_path \
         = staticmethod(lambda : StringIO.StringIO(swf_conf))
     self.study_config = StudyConfig(init_config=default_config)
     self.atomic_pipeline = MyAtomicPipeline()
     self.composite_pipeline = MyCompositePipeline()
예제 #13
0
 def test_mapreduce(self):
     sc = StudyConfig()
     pipeline = sc.get_process_instance(PipelineMapReduce)
     pipeline.main_inputs = [
         os.path.join(self.temp_dir, 'file%d' % i) for i in range(4)
     ]
     pipeline.subjects = ['Robert', 'Gustave']
     pipeline.output_directory = os.path.join(self.temp_dir, 'out_dir')
     self.assertEqual(pipeline.nodes['cat'].process.files, [
         os.path.join(pipeline.output_directory,
                      '%s_test_output' % pipeline.subjects[0]),
         os.path.join(pipeline.output_directory,
                      '%s_test_output' % pipeline.subjects[1])
     ])
     wf = pipeline_workflow.workflow_from_pipeline(pipeline,
                                                   create_directories=False)
     self.assertEqual(len(wf.jobs), 19)
     #print(sorted([(d[0].name, d[1].name) for d in wf.dependencies]))
     self.assertEqual(len(wf.dependencies), 28)
예제 #14
0
 def test_leave_one_out_pipeline(self):
     sc = StudyConfig()
     pipeline = sc.get_process_instance(PipelineLOO)
     self._test_loo_pipeline(pipeline)
예제 #15
0
 def test_custom_nodes(self):
     sc = StudyConfig()
     pipeline = sc.get_process_instance(Pipeline1)
     self._test_custom_nodes(pipeline)
예제 #16
0
pipelines = find_pipeline_and_process(
    os.path.basename(options.module))["pipeline_descs"]
logger.info("Found '{0}' pipeline(s) in '{1}'.".format(
    len(pipelines), options.module))

# Sort pipelines processes
# From the pipelines full path 'm1.m2.pipeline' get there module names 'm2'
module_names = set([x.split(".")[1] for x in pipelines])
# Sort each pipeline according to its module name.
# The result is a dict of the form 'd[m2] = [pipeline1, pipeline2, ...]'.
sorted_pipelines = dict((x, []) for x in module_names)
for pipeline in pipelines:
    module_name = pipeline.split(".")[1]
    sorted_pipelines[module_name].append(pipeline)

study_config = StudyConfig(modules=StudyConfig.default_modules + ['FomConfig'])

# Generate a png representation of each pipeline.
for module_name, module_pipelines in sorted_pipelines.items():

    # this docwriter is juste used to manage short names
    docwriter = PipelineHelpWriter([], short_names=short_names)

    # Where the documentation will be written: a relative path from the
    # makefile
    short_name = docwriter.get_short_name(module_name)
    outdir = os.path.join(base_outdir, short_name,  "schema")
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    # Go through all pipeline
예제 #17
0

def check_call(study_config, batch_file, **kwargs):
    '''
    Equivalent to Python soma.subprocess.check_call for SPM batch
    '''
    check_spm_configuration(study_config)
    cmd = spm_command(study_config, batch_file)
    return soma.subprocess.check_call(cmd, **kwargs)


def check_output(study_config, command, **kwargs):
    '''
    Equivalent to Python soma.subprocess.check_output for SPM batch
    '''
    check_spm_configuration(study_config)
    cmd = spm_command(study_config, batch_file)
    return soma.subprocess.check_output(cmd, **kwargs)


if __name__ == '__main__':
    from capsul.api import StudyConfig
    from capsul.soma.subprocess.spm import check_call as call_spm
    import tempfile

    sc = StudyConfig(spm_directory='/home/yc176684/spm12-standalone-7219')
    batch = tempfile.NamedTemporaryFile(suffix='.m')
    batch.write("fprintf(1, '%s', spm('dir'));")
    batch.flush()
    call_spm(sc, batch.name)
예제 #18
0
def main():
    ''' Run the :mod:`capsul.process.runprocess` module as a commandline
    '''

    usage = '''Usage: python -m capsul [options] processname [arg1] [arg2] ...
    [argx=valuex] [argy=valuey] ...

    Example:
    python -m capsul threshold ~/data/irm.ima /tmp/th.nii threshold1=80

    Named arguments (in the shape argx=valuex) may address sub-processes of a
    pipeline, using the dot separator:

    PrepareSubject.t1mri=/home/myself/mymri.nii

    For a more precise description, please look at the web documentation:
    http://brainvisa.info/capsul/user_doc/user_guide_tree/index.html
    '''

    # Set up logging on stderr. This must be called before any logging takes
    # place, to avoid "No handlers could be found for logger" errors.
    logging.basicConfig()

    parser = OptionParser(description='Run a single CAPSUL process',
                          usage=usage)
    group1 = OptionGroup(
        parser,
        'Config',
        description='Processing configuration, database options')
    group1.add_option(
        '--studyconfig',
        dest='studyconfig',
        help='load StudyConfig configuration from the given file (JSON)')
    group1.add_option('-i',
                      '--input',
                      dest='input_directory',
                      help='input data directory (if not specified in '
                      'studyconfig file). If not specified neither on the '
                      'commandline nor study configfile, taken as the same as '
                      'output.')
    group1.add_option('-o',
                      '--output',
                      dest='output_directory',
                      help='output data directory (if not specified in '
                      'studyconfig file). If not specified neither on the '
                      'commandline nor study configfile, taken as the same as '
                      'input.')
    parser.add_option_group(group1)

    group2 = OptionGroup(
        parser,
        'Processing',
        description='Processing options, distributed execution')
    group2.add_option('--swf',
                      '--soma_workflow',
                      dest='soma_workflow',
                      default=False,
                      action='store_true',
                      help='use soma_workflow. Soma-Workflow '
                      'configuration has to be setup and valid for non-local '
                      'execution, and additional file transfer options '
                      'may be used. The default is *not* to use SWF and '
                      'process mono-processor, sequential execution.')
    group2.add_option('-r',
                      '--resource_id',
                      dest='resource_id',
                      default=None,
                      help='soma-workflow resource ID, defaults to localhost')
    group2.add_option('-p',
                      '--password',
                      dest='password',
                      default=None,
                      help='password to access the remote computing resource. '
                      'Do not specify it if using a ssh key')
    group2.add_option('--rsa-pass',
                      dest='rsa_key_pass',
                      default=None,
                      help='RSA key password, for ssh key access')
    group2.add_option('--queue',
                      dest='queue',
                      default=None,
                      help='Queue to use on the computing resource. If not '
                      'specified, use the default queue.')
    #group2.add_option('--input-processing', dest='input_file_processing',
    #default=None, help='Input files processing: local_path, '
    #'transfer, translate, or translate_shared. The default is '
    #'local_path if the computing resource is the localhost, or '
    #'translate_shared otherwise.')
    #group2.add_option('--output-processing', dest='output_file_processing',
    #default=None, help='Output files processing: local_path, '
    #'transfer, or translate. The default is local_path.')
    group2.add_option('--keep-succeeded-workflow',
                      dest='keep_succeded_workflow',
                      action='store_true',
                      default=False,
                      help='keep the workflow in the computing resource '
                      'database after execution. By default it is removed.')
    group2.add_option('--delete-failed-workflow',
                      dest='delete_failed_workflow',
                      action='store_true',
                      default=False,
                      help='delete the workflow in the computing resource '
                      'database after execution, if it has failed. By default '
                      'it is kept.')
    parser.add_option_group(group2)

    group3 = OptionGroup(parser, 'Iteration', description='Iteration')
    group3.add_option('-I',
                      '--iterate',
                      dest='iterate_on',
                      action='append',
                      help='Iterate the given process, iterating over the '
                      'given parameter(s). Multiple parameters may be '
                      'iterated jointly using several -I options. In the '
                      'process parameters, values are replaced by lists, all '
                      'iterated lists should have the same size.\n'
                      'Ex:\n'
                      'python -m capsul -I par_a -I par_c a_process '
                      'par_a="[1, 2]" par_b="something" '
                      'par_c="[\\"one\\", \\"two\\"]"')
    parser.add_option_group(group3)

    group4 = OptionGroup(parser, 'Attributes completion')
    group4.add_option('-a',
                      '--attribute',
                      dest='attributes',
                      action='append',
                      default=[],
                      help='set completion (including FOM) attribute. '
                      'Syntax: attribute=value, value the same syntax as '
                      'process parameters (python syntax for lists, for '
                      'instance), with proper quotes if needed for shell '
                      'escaping.\n'
                      'Ex: -a acquisition="default" '
                      '-a subject=\'["s1", "s2"]\'')
    parser.add_option_group(group4)

    group5 = OptionGroup(parser,
                         'Help',
                         description='Help and documentation options')
    group5.add_option('--process-help',
                      dest='process_help',
                      action='store_true',
                      default=False,
                      help='display specified process help')
    parser.add_option_group(group5)

    parser.disable_interspersed_args()
    (options, args) = parser.parse_args()

    if options.studyconfig:
        study_config = StudyConfig(modules=StudyConfig.default_modules +
                                   ['FomConfig', 'BrainVISAConfig'])
        if yaml:
            scdict = yaml.load(open(options.studyconfig))
        else:
            scdict = json.load(open(options.studyconfig))
        study_config.set_study_configuration(scdict)
    else:
        study_config = StudyConfig()
        study_config.read_configuration()

    if options.input_directory:
        study_config.input_directory = options.input_directory
    if options.output_directory:
        study_config.output_directory = options.output_directory
    if study_config.output_directory in (None, Undefined) \
            and study_config.input_directory not in (None, Undefined):
        study_config.output_directory = study_config.input_directory
    if study_config.input_directory in (None, Undefined) \
            and study_config.output_directory not in (None, Undefined):
        study_config.input_directory = study_config.output_directory
    study_config.somaworkflow_keep_succeeded_workflows \
        = options.keep_succeded_workflow
    study_config.somaworkflow_keep_failed_workflows \
        = not options.delete_failed_workflow

    kwre = re.compile('([a-zA-Z_](\.?[a-zA-Z0-9_])*)\s*=\s*(.*)$')

    attributes = {}
    for att in options.attributes:
        m = kwre.match(att)
        if m is None:
            raise SyntaxError('syntax error in attribute definition: %s' % att)
        attributes[m.group(1)] = convert_commandline_parameter(m.group(3))

    args = tuple((convert_commandline_parameter(i) for i in args))
    kwargs = {}
    todel = []
    for arg in args:
        if isinstance(arg, six.string_types):
            m = kwre.match(arg)
            if m is not None:
                kwargs[m.group(1)] = convert_commandline_parameter(m.group(3))
                todel.append(arg)
    args = [arg for arg in args if arg not in todel]

    if not args:
        parser.print_usage()
        sys.exit(2)

    # get the main process
    process_name = args[0]
    args = args[1:]

    iterated = options.iterate_on
    try:
        process = get_process_with_params(process_name, study_config, iterated,
                                          attributes, *args, **kwargs)
    except ProcessParamError as e:
        print("error: {0}".format(e), file=sys.stderr)
        sys.exit(1)

    if options.process_help:
        process.help()

        print()

        completion_engine \
            = ProcessCompletionEngine.get_completion_engine(process)
        attribs = completion_engine.get_attribute_values()
        aval = attribs.export_to_dict()
        print('Completion attributes:')
        print('----------------------')
        print()
        print('(note: may differ depending on study config file contents, '
              'completion rules (FOM)...)')
        print()

        skipped = set(['generated_by_parameter', 'generated_by_process'])
        for name, value in six.iteritems(aval):
            if name in skipped:
                continue
            ttype = attribs.trait(name).trait_type.__class__.__name__
            if isinstance(attribs.trait(name).trait_type, List):
                ttype += '(%s)' \
                    % attribs.trait(name).inner_traits[
                        0].trait_type.__class__.__name__
            print('%s:' % name, ttype)
            if value not in (None, Undefined):
                print('   ', value)

        print()
        del aval, attribs, completion_engine, process
        sys.exit(0)

    resource_id = options.resource_id
    password = options.password
    rsa_key_pass = options.rsa_key_pass
    queue = options.queue
    file_processing = []

    study_config.use_soma_workflow = options.soma_workflow

    if options.soma_workflow:
        file_processing = [None, None]

    else:
        file_processing = [None, None]

    res = run_process_with_distribution(
        study_config,
        process,
        options.soma_workflow,
        resource_id=resource_id,
        password=password,
        rsa_key_pass=rsa_key_pass,
        queue=queue,
        input_file_processing=file_processing[0],
        output_file_processing=file_processing[1])

    sys.exit(0)