Ejemplo n.º 1
0
        work_dir = os.path.join(os.getcwd(), 'working')
    wf.base_dir = work_dir
    datasink.inputs.base_directory = os.path.join(work_dir, 'output')
    wf.config['execution'] = dict(crashdump_dir=os.path.join(work_dir,
                                                             'crashdumps'),
                                  stop_on_first_crash=True)
    #wf.run('MultiProc', plugin_args={'n_procs': 4})
    eg = wf.run('Linear')
    wf.export('openfmri.py')
    wf.write_graph(dotfilename='hgraph.dot', graph2use='hierarchical')
    wf.write_graph(dotfilename='egraph.dot', graph2use='exec')
    wf.write_graph(dotfilename='fgraph.dot', graph2use='flat')
    wf.write_graph(dotfilename='ograph.dot', graph2use='orig')
    return eg

if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(prog='fmri_openfmri.py',
                                     description=__doc__)
    parser.add_argument('-d', '--datasetdir', required=True)
    parser.add_argument('-s', '--subject', default=None)
    parser.add_argument('-m', '--model', default=1)
    parser.add_argument('-t', '--task', default=1)
    args = parser.parse_args()
    eg = analyze_openfmri_dataset(data_dir=os.path.abspath(args.datasetdir),
                             subject=args.subject,
                             model_id=int(args.model),
                             task_id=int(args.task))
    from nipype.pipeline.utils import write_prov
    g = write_prov(eg, format='turtle')
Ejemplo n.º 2
0
                return
            if 'shouldfail' in testspec and testspec['shouldfail'] == True:
                self.assertThat(e,
                    Annotate("an expected failure did not occur in test '%s': %s (%s)"
                                 % (testid, str(e), e.__class__.__name__),
                                 Equals(None)))
        finally:
            execinfo['stdout'] = capture_stdout.getvalue()
            execinfo['stderr'] = capture_stderr.getvalue()
            sys.stdout = rescue_stdout
            sys.stderr = rescue_stderr

        # try dumping provenance info
        try:
            from nipype.pipeline.utils import write_prov
            write_prov(exec_graph,
                       filename=opj(workflow.base_dir, 'provenance.json'))
        except ImportError:
            lgr.debug("local nipype version doesn't support provenance capture")


    def _check_output_presence(self, spec):
        outspec = spec.get('outputs', {})
        unmatched_output = []
        for ospec_id in outspec:
            ospec = outspec[ospec_id]
            ospectype = ospec['type']
            if ospectype == 'file':
                self.assertThat(
                    ospec['value'],
                    Annotate('expected output file missing', FileExists()))
            elif ospectype == 'directory':
Ejemplo n.º 3
0
                self.assertThat(
                    e,
                    Annotate(
                        "an expected failure did not occur in test '%s': %s (%s)"
                        % (testid, str(e), e.__class__.__name__),
                        Equals(None)))
        finally:
            execinfo['stdout'] = capture_stdout.getvalue()
            execinfo['stderr'] = capture_stderr.getvalue()
            sys.stdout = rescue_stdout
            sys.stderr = rescue_stderr

        # try dumping provenance info
        try:
            from nipype.pipeline.utils import write_prov
            write_prov(exec_graph,
                       filename=opj(workflow.base_dir, 'provenance.json'))
        except ImportError:
            lgr.debug(
                "local nipype version doesn't support provenance capture")

    def _check_output_presence(self, spec):
        outspec = spec.get('outputs', {})
        unmatched_output = []
        for ospec_id in outspec:
            ospec = outspec[ospec_id]
            ospectype = ospec['type']
            if ospectype == 'file':
                self.assertThat(
                    ospec['value'],
                    Annotate('expected output file missing', FileExists()))
            elif ospectype == 'directory':
Ejemplo n.º 4
0
    def _execute_nipype_test(self, testid, testspec):
        # TODO merge/refactor this one with the plain python code method
        from cStringIO import StringIO
        import sys
        execinfo = self._details['exec_info'][testid]
        try:
            import nipype
        except ImportError:
            self.skipTest("Nipype not found, skipping test")
        # where is the workflow
        if 'file' in testspec:
            testwffilepath = testspec['file']
            lgr.debug("using custom workflow file name '%s'" % testwffilepath)
        else:
            testwffilepath = 'workflow.py'
            lgr.debug("using default workflow file name '%s'" % testwffilepath)
        # execute the script and extract the workflow
        locals = dict()
        try:
            execfile(testwffilepath, dict(), locals)
        except Exception as e:
            lgr.error("%s: %s" % (e.__class__.__name__, str(e)))
            self.assertThat(e,
                Annotate("test workflow setup failed: %s (%s)"
                         % (e.__class__.__name__, str(e)), Equals(None)))
        self.assertThat(locals,
            Annotate("test workflow script create expected workflow object",
                     Contains('test_workflow')))
        workflow = locals['test_workflow']
        # make sure nipype executes it in the right place
        workflow.base_dir=os.path.abspath(opj(os.curdir, '_workflow_exec'))
        # we want content, not time based hashing
        if 'execution' in workflow.config:
            workflow.config['execution']['hash_method'] = "content"
        else:
            workflow.config['execution'] = dict(hash_method="content")
        # execution
        try:
            rescue_stdout = sys.stdout
            rescue_stderr = sys.stderr
            sys.stdout = capture_stdout = StringIO()
            sys.stderr = capture_stderr = StringIO()
            try:
                exec_graph = workflow.run()
            except Exception as e:
                execinfo['exception'] = dict(type=e.__class__.__name__,
                                             info=str(e))
                if not 'shouldfail' in testspec or testspec['shouldfail'] == False:
                    lgr.error("%s: %s" % (e.__class__.__name__, str(e)))
                    self.assertThat(e,
                        Annotate("exception occured while executing Nipype workflow in test '%s': %s (%s)"
                                 % (testid, str(e), e.__class__.__name__),
                                 Equals(None)))
                return
            if 'shouldfail' in testspec and testspec['shouldfail'] == True:
                self.assertThat(e,
                    Annotate("an expected failure did not occur in test '%s': %s (%s)"
                                 % (testid, str(e), e.__class__.__name__),
                                 Equals(None)))
        finally:
            execinfo['stdout'] = capture_stdout.getvalue()
            execinfo['stderr'] = capture_stderr.getvalue()
            sys.stdout = rescue_stdout
            sys.stderr = rescue_stderr

        # try dumping provenance info
        try:
            from nipype.pipeline.utils import write_prov
            write_prov(exec_graph,
                       filename=opj(workflow.base_dir, 'provenance.json'))
        except ImportError:
            lgr.debug("local nipype version doesn't support provenance capture")