Exemple #1
0
def test_display_config(monkeypatch, dispnum):
    """Check that the display_variable option is used ($DISPLAY not set)"""
    config._display = None
    dispstr = ':%d' % dispnum
    config.set('execution', 'display_variable', dispstr)
    monkeypatch.delitem(os.environ, 'DISPLAY', raising=False)
    assert config.get_display() == config.get('execution', 'display_variable')
    # Test that it was correctly cached
    assert config.get_display() == config.get('execution', 'display_variable')
Exemple #2
0
def test_provenance_exists(tmpdir):
    tmpdir.chdir()
    from nipype import config
    from nipype.interfaces.base import CommandLine
    provenance_state = config.get('execution', 'write_provenance')
    hash_state = config.get('execution', 'hash_method')
    config.enable_provenance()
    CommandLine('echo hello').run()
    config.set('execution', 'write_provenance', provenance_state)
    config.set('execution', 'hash_method', hash_state)
    assert tmpdir.join('provenance.provn').check()
def test_provenance_exists(tmpdir):
    tmpdir.chdir()
    from nipype import config
    from nipype.interfaces.base import CommandLine
    provenance_state = config.get('execution', 'write_provenance')
    hash_state = config.get('execution', 'hash_method')
    config.enable_provenance()
    CommandLine('echo hello').run()
    config.set('execution', 'write_provenance', provenance_state)
    config.set('execution', 'hash_method', hash_state)
    assert tmpdir.join('provenance.provn').check()
Exemple #4
0
def test_provenance_exists(tmpdir):
    tmpdir.chdir()
    from nipype import config
    from nipype.interfaces.base import CommandLine

    provenance_state = config.get("execution", "write_provenance")
    hash_state = config.get("execution", "hash_method")
    config.enable_provenance()
    CommandLine("echo hello").run()
    config.set("execution", "write_provenance", provenance_state)
    config.set("execution", "hash_method", hash_state)
    assert tmpdir.join("provenance.provn").check()
Exemple #5
0
def test_provenance_exists(tmpdir):
    tempdir = str(tmpdir)
    os.chdir(tempdir)
    from nipype import config
    from nipype.interfaces.base import CommandLine
    provenance_state = config.get('execution', 'write_provenance')
    hash_state = config.get('execution', 'hash_method')
    config.enable_provenance()
    CommandLine('echo hello').run()
    config.set('execution', 'write_provenance', provenance_state)
    config.set('execution', 'hash_method', hash_state)
    provenance_exists = os.path.exists(os.path.join(tempdir, 'provenance.provn'))
    assert provenance_exists
Exemple #6
0
def RunConnectivityWorkflow(path, outDir, workdir, conf_file=None):

    if conf_file is not None:
        res = open(conf_file, 'r').read()
        conf = json.loads(res)
        config.update_config(conf)

    plugin = config.get("execution", "plugin")
    plugin_args = config.get("execution", "plugin_args")
    if plugin_args is not None:
        plugin_args = eval(plugin_args)
    wf = BuildConnectivityWorkflowSurface(path, outDir)
    wf.base_dir = workdir
    wf.run(plugin=plugin, plugin_args=plugin_args)
Exemple #7
0
def test_provenance_exists(tmpdir):
    tempdir = str(tmpdir)
    os.chdir(tempdir)
    from nipype import config
    from nipype.interfaces.base import CommandLine
    provenance_state = config.get('execution', 'write_provenance')
    hash_state = config.get('execution', 'hash_method')
    config.enable_provenance()
    CommandLine('echo hello').run()
    config.set('execution', 'write_provenance', provenance_state)
    config.set('execution', 'hash_method', hash_state)
    provenance_exists = os.path.exists(
        os.path.join(tempdir, 'provenance.provn'))
    assert provenance_exists
Exemple #8
0
 def enable_file_logging(self, filename):
     """
     Hack to define a filename for the log file! It overloads the
     'enable_file_logging' method in 'nipype/utils/logger.py' file.
     """
     import logging
     from logging.handlers import RotatingFileHandler as RFHandler
     config = self._config
     LOG_FILENAME = os.path.join(config.get('logging', 'log_directory'),
                                 filename)
     hdlr = RFHandler(LOG_FILENAME,
                      maxBytes=int(config.get('logging', 'log_size')),
                      backupCount=int(
                          config.get('logging', 'log_rotate')))
     formatter = logging.Formatter(fmt=self.fmt, datefmt=self.datefmt)
     hdlr.setFormatter(formatter)
     self._logger.addHandler(hdlr)
     self._fmlogger.addHandler(hdlr)
     self._iflogger.addHandler(hdlr)
     self._hdlr = hdlr
Exemple #9
0
    def _list_outputs(self):
        """Execute this module.
        """

        # Init variables
        outputs = self.output_spec().get()
        out_files = []
        # Use hardlink
        use_hardlink = str2bool(
            config.get('execution', 'try_hard_link_datasink'))

        outdir = os.path.abspath(self.inputs.base_directory)

        # Iterate through outputs attributes {key : path(s)}
        for key, files in list(self.inputs._outputs.items()):
            if not isdefined(files):
                continue
            files = ensure_list(files)

            # flattening list
            if isinstance(files, list):
                if isinstance(files[0], list):
                    files = [item for sublist in files for item in sublist]

            # Iterate through passed-in source files
            for src in ensure_list(files):
                # Format src and dst files
                src = os.path.abspath(src)
                if not os.path.isfile(src):
                    src = os.path.join(src, '')
                dst = self._get_dst(src)
                dst = os.path.join(outdir, dst)

                # If src is a file, copy it to dst
                if os.path.isfile(src):
                    copyfile(src,
                             dst,
                             copy=True,
                             hashmethod='content',
                             use_hardlink=use_hardlink)
                    out_files.append(dst)
                # If src is a directory, copy entire contents to dst dir
                elif os.path.isdir(src):
                    if os.path.exists(dst) and self.inputs.remove_dest_dir:
                        shutil.rmtree(dst)
                    copytree(src, dst)
                    out_files.append(dst)

        # Return outputs dictionary
        outputs['out_file'] = out_files

        return outputs
        sys.exit()

result_dir = os.path.abspath(args.output)
if not os.path.exists(result_dir):
    os.mkdir(result_dir)

infosource = pe.Node(niu.IdentityInterface(
    fields=['projects', 'subjects', 'experiments', 'scans']),
                     name='infosource',
                     synchronize=True)
infosource.iterables = [('projects', args.project), ('subjects', args.subject),
                        ('experiments', args.experiment), ('scans', args.scan)]

r = pe.Workflow(name='xnat_downloader')
r.base_output_dir = 'xnat_grabber'
r.base_dir = config.get('logging', 'log_directory')

dg = pe.Node(interface=nio.XNATSource(
    infields=['project', 'subject', 'experiment', 'scan'],
    outfields=['output']),
             name='dg')

if args.dicom:
    resource = 'DICOM'
else:
    resource = 'NIFTI'

dg.inputs.query_template = '/projects/%s/subjects/%s/experiments/%s/scans/%s/resources/' + resource
dg.inputs.query_template_args['output'] = [[
    'project', 'subject', 'experiment', 'scan'
]]
Exemple #11
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    database, start_time, subject, master_config = args
    assert 'baseline' in master_config['components'] or 'longitudinal' in master_config['components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    # HACK:
    #    To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), re-instantiate database
    # database.__init__(defaultDBName=database.dbName, subject_list=database.subjectList)
    #
    # END HACK
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName

    while time.time() < start_time:
        time.sleep(start_time - time.time() + 1)
        print "Delaying start for {subject}".format(subject=subject)
    print("===================== SUBJECT: {0} ===========================".format(subject))

    subjectWorkflow = pe.Workflow(name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')

    sessionWorkflow = dict()
    inputsSpec = dict()
    sessions = database.getSessionsFromSubject(subject)
    # print "These are the sessions: ", sessions
    if 'baseline' in master_config['components']:
        current_phase = 'baseline'
        from baseline import create_baseline as create_wkfl
    elif 'longitudinal' in master_config['components']:
        current_phase = 'longitudinal'
        from longitudinal import create_longitudial as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_{1}".format(session, current_phase)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, current_phase)
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),
                                      interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,
                                               interpMode='Linear', pipeline_name=pname)

        subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),
                                                                                  ('T2s', 'inputspec.T2s'),
                                                                                  ('PDs', 'inputspec.PDs'),
                                                                                  ('FLs', 'inputspec.FLs'),
                                                                                  ('OTs', 'inputspec.OTHERs'),
                                                                                  ]),
                                 (atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',
                                                                         'inputspec.atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'inputspec.atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
                                ])
        if current_phase == 'baseline':
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),
                                                                            ('ExtendedAtlasDefinition_xml',
                                                                             'inputspec.atlasDefinition')]),
                                 ])
        else:
            assert current_phase == 'longitudinal', "Phase value is unknown: {0}".format(current_phase)

    from utils import run_workflow, print_workflow
    if False:
        print_workflow(template, plugin=master_config['execution']['plugin'], dotfilename='template')
    return run_workflow(template, plugin=master_config['execution']['plugin'], plugin_args=master_config['plugin_args'])
def test_debug_mode():
    from ... import logging

    sofc_config = config.get("execution", "stop_on_first_crash")
    ruo_config = config.get("execution", "remove_unnecessary_outputs")
    ki_config = config.get("execution", "keep_inputs")
    wf_config = config.get("logging", "workflow_level")
    if_config = config.get("logging", "interface_level")
    ut_config = config.get("logging", "utils_level")

    wf_level = logging.getLogger("nipype.workflow").level
    if_level = logging.getLogger("nipype.interface").level
    ut_level = logging.getLogger("nipype.utils").level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get("execution", "stop_on_first_crash") == "true"
    assert config.get("execution", "remove_unnecessary_outputs") == "false"
    assert config.get("execution", "keep_inputs") == "true"
    assert config.get("logging", "workflow_level") == "DEBUG"
    assert config.get("logging", "interface_level") == "DEBUG"
    assert config.get("logging", "utils_level") == "DEBUG"

    assert logging.getLogger("nipype.workflow").level == 10
    assert logging.getLogger("nipype.interface").level == 10
    assert logging.getLogger("nipype.utils").level == 10

    # Restore config and levels
    config.set("execution", "stop_on_first_crash", sofc_config)
    config.set("execution", "remove_unnecessary_outputs", ruo_config)
    config.set("execution", "keep_inputs", ki_config)
    config.set("logging", "workflow_level", wf_config)
    config.set("logging", "interface_level", if_config)
    config.set("logging", "utils_level", ut_config)
    logging.update_logging(config)

    assert config.get("execution", "stop_on_first_crash") == sofc_config
    assert config.get("execution", "remove_unnecessary_outputs") == ruo_config
    assert config.get("execution", "keep_inputs") == ki_config
    assert config.get("logging", "workflow_level") == wf_config
    assert config.get("logging", "interface_level") == if_config
    assert config.get("logging", "utils_level") == ut_config

    assert logging.getLogger("nipype.workflow").level == wf_level
    assert logging.getLogger("nipype.interface").level == if_level
    assert logging.getLogger("nipype.utils").level == ut_level
Exemple #13
0
def main(args):
    subjects, master_config = args

    import os
    import sys
    import traceback

    # Set universal pipeline options
    from nipype import config
    config.update_config(master_config)
    assert config.get('execution',
                      'plugin') == master_config['execution']['plugin']

    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import nipype.interfaces.ants as ants

    from template import MergeByExtendListElements, xml_filename
    from PipeLineFunctionHelpers import mapPosteriorList
    from atlasNode import GetAtlasNode, MakeNewAtlasTemplate
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.distributed import modify_qsub_args

    template = pe.Workflow(name='SubjectAtlas_Template')
    template.base_dir = master_config['logging']['log_directory']

    BAtlas = GetAtlasNode(master_config['previouscache'], 'BAtlas')

    inputspec = pe.Node(interface=IdentityInterface(fields=['subject']),
                        name='inputspec')
    inputspec.iterables = ('subject', subjects)

    baselineDG = pe.Node(nio.DataGrabber(infields=['subject'],
                                         outfields=[
                                             't1_average', 't2_average',
                                             'pd_average', 'fl_average',
                                             'outputLabels', 'posteriorImages'
                                         ]),
                         name='Baseline_DG')
    baselineDG.inputs.base_directory = master_config['previousresult']
    baselineDG.inputs.sort_filelist = True
    baselineDG.inputs.raise_on_empty = False
    baselineDG.inputs.template = '*/%s/*/Baseline/%s.nii.gz'
    baselineDG.inputs.template_args['t1_average'] = [[
        'subject', 't1_average_BRAINSABC'
    ]]
    baselineDG.inputs.template_args['t2_average'] = [[
        'subject', 't2_average_BRAINSABC'
    ]]
    baselineDG.inputs.template_args['pd_average'] = [[
        'subject', 'pd_average_BRAINSABC'
    ]]
    baselineDG.inputs.template_args['fl_average'] = [[
        'subject', 'fl_average_BRAINSABC'
    ]]
    baselineDG.inputs.template_args['outputLabels'] = [[
        'subject', 'brain_label_seg'
    ]]
    baselineDG.inputs.field_template = {
        'posteriorImages': '*/%s/*/TissueClassify/POSTERIOR_%s.nii.gz'
    }
    posterior_files = [
        'AIR', 'BASAL', 'CRBLGM', 'CRBLWM', 'CSF', 'GLOBUS', 'HIPPOCAMPUS',
        'NOTCSF', 'NOTGM', 'NOTVB', 'NOTWM', 'SURFGM', 'THALAMUS', 'VB', 'WM'
    ]
    baselineDG.inputs.template_args['posteriorImages'] = [[
        'subject', posterior_files
    ]]

    MergeByExtendListElementsNode = pe.Node(
        Function(
            function=MergeByExtendListElements,
            input_names=['t1s', 't2s', 'pds', 'fls', 'labels', 'posteriors'],
            output_names=[
                'ListOfImagesDictionaries', 'registrationImageTypes',
                'interpolationMapping'
            ]),
        run_without_submitting=True,
        name="99_MergeByExtendListElements")
    from PipeLineFunctionHelpers import WrapPosteriorImagesFromDictionaryFunction as wrapfunc
    template.connect([(inputspec, baselineDG, [('subject', 'subject')]),
                      (baselineDG, MergeByExtendListElementsNode,
                       [('t1_average', 't1s'), ('t2_average', 't2s'),
                        ('pd_average', 'pds'), ('fl_average', 'fls'),
                        ('outputLabels', 'labels'),
                        (('posteriorImages', wrapfunc), 'posteriors')])])

    myInitAvgWF = pe.Node(
        interface=ants.AverageImages(),
        name='Atlas_antsSimpleAverage')  # was 'Phase1_antsSimpleAverage'
    myInitAvgWF.inputs.dimension = 3
    myInitAvgWF.inputs.normalize = True
    template.connect(baselineDG, 't1_average', myInitAvgWF, "images")
    ####################################################################################################
    # TEMPLATE_BUILD_RUN_MODE = 'MULTI_IMAGE'
    # if numSessions == 1:
    #     TEMPLATE_BUILD_RUN_MODE = 'SINGLE_IMAGE'
    ####################################################################################################
    from BAWantsRegistrationBuildTemplate import BAWantsRegistrationTemplateBuildSingleIterationWF as registrationWF
    buildTemplateIteration1 = registrationWF('iteration01')
    # buildTemplateIteration2 = buildTemplateIteration1.clone(name='buildTemplateIteration2')
    buildTemplateIteration2 = registrationWF('Iteration02')

    MakeNewAtlasTemplateNode = pe.Node(
        interface=Function(
            function=MakeNewAtlasTemplate,
            input_names=[
                't1_image', 'deformed_list', 'AtlasTemplate', 'outDefinition'
            ],
            output_names=['outAtlasFullPath', 'clean_deformed_list']),
        # This is a lot of work, so submit it run_without_submitting=True,
        run_without_submitting=
        True,  # HACK:  THIS NODE REALLY SHOULD RUN ON THE CLUSTER!
        name='99_MakeNewAtlasTemplate')

    if master_config['execution'][
            'plugin'] == 'SGE':  # for some nodes, the qsub call needs to be modified on the cluster

        MakeNewAtlasTemplateNode.plugin_args = {
            'template': master_config['plugin_args']['template'],
            'qsub_args': modify_qsub_args(master_config['queue'], '1000M', 1,
                                          1),
            'overwrite': True
        }
        for bt in [buildTemplateIteration1, buildTemplateIteration2]:
            ##################################################
            # *** Hans, is this TODO already addressed? ***  #
            # ---->  # TODO:  Change these parameters  <---- #
            ##################################################
            BeginANTS = bt.get_node("BeginANTS")
            BeginANTS.plugin_args = {
                'template':
                master_config['plugin_args']['template'],
                'overwrite':
                True,
                'qsub_args':
                modify_qsub_args(master_config['queue'],
                                 '9000M',
                                 4,
                                 hard=False)
            }
            wimtdeformed = bt.get_node("wimtdeformed")
            wimtdeformed.plugin_args = {
                'template':
                master_config['plugin_args']['template'],
                'overwrite':
                True,
                'qsub_args':
                modify_qsub_args(master_config['queue'], '2000M', 1, 2)
            }
            AvgAffineTransform = bt.get_node("AvgAffineTransform")
            AvgAffineTransform.plugin_args = {
                'template': master_config['plugin_args']['template'],
                'overwrite': True,
                'qsub_args': modify_qsub_args(master_config['queue'], '2000M',
                                              1)
            }
            wimtPassivedeformed = bt.get_node("wimtPassivedeformed")
            wimtPassivedeformed.plugin_args = {
                'template':
                master_config['plugin_args']['template'],
                'overwrite':
                True,
                'qsub_args':
                modify_qsub_args(master_config['queue'], '2000M', 1, 2)
            }

    template.connect([
        (myInitAvgWF, buildTemplateIteration1, [('output_average_image',
                                                 'inputspec.fixed_image')]),
        (MergeByExtendListElementsNode, buildTemplateIteration1,
         [('ListOfImagesDictionaries', 'inputspec.ListOfImagesDictionaries'),
          ('registrationImageTypes', 'inputspec.registrationImageTypes'),
          ('interpolationMapping', 'inputspec.interpolationMapping')]),
        (buildTemplateIteration1, buildTemplateIteration2,
         [('outputspec.template', 'inputspec.fixed_image')]),
        (MergeByExtendListElementsNode, buildTemplateIteration2,
         [('ListOfImagesDictionaries', 'inputspec.ListOfImagesDictionaries'),
          ('registrationImageTypes', 'inputspec.registrationImageTypes'),
          ('interpolationMapping', 'inputspec.interpolationMapping')]),
        (inputspec, MakeNewAtlasTemplateNode, [(('subject', xml_filename),
                                                'outDefinition')]),
        (BAtlas, MakeNewAtlasTemplateNode, [('ExtendedAtlasDefinition_xml_in',
                                             'AtlasTemplate')]),
        (buildTemplateIteration2, MakeNewAtlasTemplateNode,
         [('outputspec.template', 't1_image'),
          ('outputspec.passive_deformed_templates', 'deformed_list')]),
    ])

    # Create DataSinks
    Atlas_DataSink = pe.Node(nio.DataSink(), name="Atlas_DS")
    Atlas_DataSink.overwrite = master_config['ds_overwrite']
    Atlas_DataSink.inputs.base_directory = master_config['resultdir']

    Subject_DataSink = pe.Node(nio.DataSink(), name="Subject_DS")
    Subject_DataSink.overwrite = master_config['ds_overwrite']
    Subject_DataSink.inputs.base_directory = master_config['resultdir']

    template.connect([
        (inputspec, Atlas_DataSink, [('subject', 'container')]),
        (buildTemplateIteration1, Atlas_DataSink,
         [('outputspec.template', 'Atlas.iteration1')]),  # Unnecessary
        (MakeNewAtlasTemplateNode, Atlas_DataSink, [('outAtlasFullPath',
                                                     'Atlas.definitions')]),
        (BAtlas, Atlas_DataSink,
         [('template_landmarks_50Lmks_fcsv', 'Atlas.20111119_BCD.@fcsv'),
          ('template_weights_50Lmks_wts', 'Atlas.20111119_BCD.@wts'),
          ('LLSModel_50Lmks_hdf5', 'Atlas.20111119_BCD.@hdf5'),
          ('T1_50Lmks_mdl', 'Atlas.20111119_BCD.@mdl')]),
        (inputspec, Subject_DataSink, [(('subject', outputPattern),
                                        'regexp_substitutions')]),
        (buildTemplateIteration2, Subject_DataSink,
         [('outputspec.template', 'ANTSTemplate.@template')]),
        (MakeNewAtlasTemplateNode, Subject_DataSink, [
            ('clean_deformed_list', 'ANTSTemplate.@passive_deformed_templates')
        ]),
    ])

    from utils import run_workflow, print_workflow
    if False:
        print_workflow(template,
                       plugin=master_config['execution']['plugin'],
                       dotfilename='template')
    return run_workflow(template,
                        plugin=master_config['execution']['plugin'],
                        plugin_args=master_config['plugin_args'])
Exemple #14
0
def test_debug_mode():
    from ... import logging

    sofc_config = config.get('execution', 'stop_on_first_crash')
    ruo_config = config.get('execution', 'remove_unnecessary_outputs')
    ki_config = config.get('execution', 'keep_inputs')
    wf_config = config.get('logging', 'workflow_level')
    if_config = config.get('logging', 'interface_level')
    ut_config = config.get('logging', 'utils_level')

    wf_level = logging.getLogger('nipype.workflow').level
    if_level = logging.getLogger('nipype.interface').level
    ut_level = logging.getLogger('nipype.utils').level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get('execution', 'stop_on_first_crash') == 'true'
    assert config.get('execution', 'remove_unnecessary_outputs') == 'false'
    assert config.get('execution', 'keep_inputs') == 'true'
    assert config.get('logging', 'workflow_level') == 'DEBUG'
    assert config.get('logging', 'interface_level') == 'DEBUG'
    assert config.get('logging', 'utils_level') == 'DEBUG'

    assert logging.getLogger('nipype.workflow').level == 10
    assert logging.getLogger('nipype.interface').level == 10
    assert logging.getLogger('nipype.utils').level == 10

    # Restore config and levels
    config.set('execution', 'stop_on_first_crash', sofc_config)
    config.set('execution', 'remove_unnecessary_outputs', ruo_config)
    config.set('execution', 'keep_inputs', ki_config)
    config.set('logging', 'workflow_level', wf_config)
    config.set('logging', 'interface_level', if_config)
    config.set('logging', 'utils_level', ut_config)
    logging.update_logging(config)

    assert config.get('execution', 'stop_on_first_crash') == sofc_config
    assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config
    assert config.get('execution', 'keep_inputs') == ki_config
    assert config.get('logging', 'workflow_level') == wf_config
    assert config.get('logging', 'interface_level') == if_config
    assert config.get('logging', 'utils_level') == ut_config

    assert logging.getLogger('nipype.workflow').level == wf_level
    assert logging.getLogger('nipype.interface').level == if_level
    assert logging.getLogger('nipype.utils').level == ut_level
Exemple #15
0
def create_singleSession(dataDict, master_config, interpMode, pipeline_name):
    """
    create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """
    assert 'tissue_classify' in master_config['components'] or \
      'auxlmk' in master_config['components'] or \
      'segmentation' in master_config['components']

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, Directory, traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Split, Rename, IdentityInterface, Function

    from workflows.baseline import baseline_workflow as create_baseline
    from PipeLineFunctionHelpers import convertToList
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName
    from workflows.utils import run_workflow, print_workflow
    from workflows.atlasNode import MakeAtlasNode

    project = dataDict['project']
    subject = dataDict['subject']
    session = dataDict['session']

    pname = "{0}_{1}_{2}".format(master_config['workflow_type'], subject, session)
    sessionWorkflow = create_baseline(project, subject, session, master_config,
                             phase=master_config['workflow_type'],
                             interpMode=interpMode,
                             pipeline_name=pipeline_name)
    sessionWorkflow.base_dir = master_config['cachedir']

    inputsSpec = sessionWorkflow.get_node('inputspec')
    inputsSpec.inputs.T1s = dataDict['T1s']
    inputsSpec.inputs.T2s = dataDict['T2s']
    inputsSpec.inputs.PDs = dataDict['PDs']
    inputsSpec.inputs.FLs = dataDict['FLs']
    inputsSpec.inputs.OTHERs = dataDict['OTs']
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas_{0}'.format(session))  # TODO: input atlas csv
    sessionWorkflow.connect([(atlasNode, inputsSpec, [('template_landmarks_50Lmks_fcsv',
                                                                         'atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputTemplateModel')]),
                                ])
    if True:  # FIXME: current_phase == 'baseline':
        sessionWorkflow.connect([(atlasNode, inputsSpec, [('template_t1', 'template_t1'),
                                                          ('ExtendedAtlasDefinition_xml',
                                                           'atlasDefinition')]),
                                 ])
    else:
        template_DG = pe.Node(interface=nio.DataGrabber(infields=['subject'],
                                                        outfields=['template_t1', 'outAtlasFullPath']),
                              name='Template_DG')
        template_DG.inputs.base_directory = master_config['previousresult']
        template_DG.inputs.subject = subject
        template_DG.inputs.template = 'SUBJECT_TEMPLATES/%s/AVG_%s.nii.gz'
        template_DG.inputs.template_args['template_t1'] = [['subject', 'T1']]
        template_DG.inputs.field_template = {'outAtlasFullPath': 'Atlas/definitions/AtlasDefinition_%s.xml'}
        template_DG.inputs.template_args['outAtlasFullPath'] = [['subject']]
        template_DG.inputs.sort_filelist = True
        template_DG.inputs.raise_on_empty = True

        sessionWorkflow.connect([(template_DG, inputsSpec, [('outAtlasFullPath', 'atlasDefinition'),
                                                            ('template_t1', 'template_t1')]),
                                 ])

    if 'segmentation' in master_config['components']:
        from workflows.segmentation import segmentation
        from workflows.WorkupT1T2BRAINSCut import GenerateWFName
        try:
            bCutInputName = ".".join([GenerateWFName(project, subject, session, 'Segmentation'), 'inputspec'])
        except:
            print project, subject, session
            raise
        sname = 'segmentation'
        onlyT1 = not(len(dataDict['T2s']) > 0)
        segWF = segmentation(project, subject, session, master_config, onlyT1, pipeline_name=sname)
        sessionWorkflow.connect([(atlasNode, segWF,
                                [('hncma-atlas', 'inputspec.hncma-atlas'),
                                 ('template_t1', 'inputspec.template_t1'),
                                 ('template_t1', bCutInputName + '.template_t1'),
                                 ('rho', bCutInputName + '.rho'),
                                 ('phi', bCutInputName + '.phi'),
                                 ('theta', bCutInputName + '.theta'),
                                 ('l_caudate_ProbabilityMap', bCutInputName + '.l_caudate_ProbabilityMap'),
                                 ('r_caudate_ProbabilityMap', bCutInputName + '.r_caudate_ProbabilityMap'),
                                 ('l_hippocampus_ProbabilityMap', bCutInputName + '.l_hippocampus_ProbabilityMap'),
                                 ('r_hippocampus_ProbabilityMap', bCutInputName + '.r_hippocampus_ProbabilityMap'),
                                 ('l_putamen_ProbabilityMap', bCutInputName + '.l_putamen_ProbabilityMap'),
                                 ('r_putamen_ProbabilityMap', bCutInputName + '.r_putamen_ProbabilityMap'),
                                 ('l_thalamus_ProbabilityMap', bCutInputName + '.l_thalamus_ProbabilityMap'),
                                 ('r_thalamus_ProbabilityMap', bCutInputName + '.r_thalamus_ProbabilityMap'),
                                 ('l_accumben_ProbabilityMap', bCutInputName + '.l_accumben_ProbabilityMap'),
                                 ('r_accumben_ProbabilityMap', bCutInputName + '.r_accumben_ProbabilityMap'),
                                 ('l_globus_ProbabilityMap', bCutInputName + '.l_globus_ProbabilityMap'),
                                 ('r_globus_ProbabilityMap', bCutInputName + '.r_globus_ProbabilityMap'),
                                 ('trainModelFile_txtD0060NT0060_gz',
                                  bCutInputName + '.trainModelFile_txtD0060NT0060_gz')])])
        outputSpec = sessionWorkflow.get_node('outputspec')
        sessionWorkflow.connect([(outputSpec, segWF, [('t1_average', 'inputspec.t1_average'),
                                             ('LMIatlasToSubject_tx', 'inputspec.LMIatlasToSubject_tx'),
                                             ('outputLabels', 'inputspec.inputLabels'),
                                             ('posteriorImages', 'inputspec.posteriorImages'),
                                             ('tc_atlas2sessionInverse_tx',
                                              'inputspec.TissueClassifyatlasToSubjectInverseTransform'),
                                             ('UpdatedPosteriorsList', 'inputspec.UpdatedPosteriorsList'),
                                             ('outputHeadLabels', 'inputspec.inputHeadLabels')])
                                ])
        if not onlyT1:
            sessionWorkflow.connect([(outputSpec, segWF, [('t1_average', 'inputspec.t2_average')])])

    return sessionWorkflow
Exemple #16
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    start_time, subject, master_config = args
    assert 'tissue_classify' in master_config['components'] or 'auxlmk' in master_config['components'] or 'segmentation' in master_config['components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName
    from utils import run_workflow, print_workflow

    # while time.time() < start_time:
        # time.sleep(start_time - time.time() + 1)
        # print "Delaying start for {subject}".format(subject=subject)
    # print("===================== SUBJECT: {0} ===========================".format(subject))

    subjectWorkflow = pe.Workflow(name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG


    sessionWorkflow = dict()
    inputsSpec = dict()
    # To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), instantiate database here
    database = OpenSubjectDatabase(master_config['cachedir'], [subject], master_config['prefix'], master_config['dbfile'])
    # print database.getAllSessions()
    database.open_connection()

    sessions = database.getSessionsFromSubject(subject)
    print "These are the sessions: ", sessions
    # TODO: atlas input csv read
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')
    # atlasNode = GetAtlasNode(master_config['previouscache'], 'BAtlas')
    from singleSession import create_singleSession as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_singleSession".format(session)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, 'singleSession')
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),
                                      interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,
                                               interpMode='Linear', pipeline_name=pname)

        subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),
                                                                                  ('T2s', 'inputspec.T2s'),
                                                                                  ('PDs', 'inputspec.PDs'),
                                                                                  ('FLs', 'inputspec.FLs'),
                                                                                  ('OTs', 'inputspec.OTHERs'),
                                                                                  ]),
                                 (atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',
                                                                         'inputspec.atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'inputspec.atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
                                ])
        if 'segmentation' in master_config['components']:
            from WorkupT1T2BRAINSCut import GenerateWFName
            try:
                bCutInputName = ".".join(['segmentation', GenerateWFName(project, subject, session, 'Segmentation'), 'inputspec'])
            except:
                print project, subject, session
                raise
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session],
                                      [('hncma-atlas', 'segmentation.inputspec.hncma-atlas'),
                                       ('template_t1', 'segmentation.inputspec.template_t1'),
                                       ('template_t1', bCutInputName + '.template_t1'),
                                       ('rho', bCutInputName + '.rho'),
                                       ('phi', bCutInputName + '.phi'),
                                       ('theta', bCutInputName + '.theta'),
                                       ('l_caudate_ProbabilityMap', bCutInputName + '.l_caudate_ProbabilityMap'),
                                       ('r_caudate_ProbabilityMap', bCutInputName + '.r_caudate_ProbabilityMap'),
                                       ('l_hippocampus_ProbabilityMap', bCutInputName + '.l_hippocampus_ProbabilityMap'),
                                       ('r_hippocampus_ProbabilityMap', bCutInputName + '.r_hippocampus_ProbabilityMap'),
                                       ('l_putamen_ProbabilityMap', bCutInputName + '.l_putamen_ProbabilityMap'),
                                       ('r_putamen_ProbabilityMap', bCutInputName + '.r_putamen_ProbabilityMap'),
                                       ('l_thalamus_ProbabilityMap', bCutInputName + '.l_thalamus_ProbabilityMap'),
                                       ('r_thalamus_ProbabilityMap', bCutInputName + '.r_thalamus_ProbabilityMap'),
                                       ('l_accumben_ProbabilityMap', bCutInputName + '.l_accumben_ProbabilityMap'),
                                       ('r_accumben_ProbabilityMap', bCutInputName + '.r_accumben_ProbabilityMap'),
                                       ('l_globus_ProbabilityMap', bCutInputName + '.l_globus_ProbabilityMap'),
                                       ('r_globus_ProbabilityMap', bCutInputName + '.r_globus_ProbabilityMap'),
                                       ('trainModelFile_txtD0060NT0060_gz',
                                        bCutInputName + '.trainModelFile_txtD0060NT0060_gz')])])
        if True:  # FIXME: current_phase == 'baseline':
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),
                                                                            ('ExtendedAtlasDefinition_xml',
                                                                             'inputspec.atlasDefinition')]),
                                 ])
        else:
            template_DG = pe.Node(interface=nio.DataGrabber(infields=['subject'],
                                  outfields=['template_t1', 'outAtlasFullPath']),
                                  name='Template_DG')
            template_DG.inputs.base_directory = master_config['previousresult']
            template_DG.inputs.subject = subject
            template_DG.inputs.template = 'SUBJECT_TEMPLATES/%s/AVG_%s.nii.gz'
            template_DG.inputs.template_args['template_t1'] = [['subject', 'T1']]
            template_DG.inputs.field_template = {'outAtlasFullPath': 'Atlas/definitions/AtlasDefinition_%s.xml'}
            template_DG.inputs.template_args['outAtlasFullPath'] = [['subject']]
            template_DG.inputs.sort_filelist = True
            template_DG.inputs.raise_on_empty = True

            baw201.connect([(template_DG, sessionWorkflow[session], [('outAtlasFullPath', 'inputspec.atlasDefinition'),
                                                                     ('template_t1', 'inputspec.template_t1')]),
                           ])
        # HACK: only run first subject
        break
        # END HACK
        if not True:
            return print_workflow(subjectWorkflow,
                                  plugin=master_config['execution']['plugin'], dotfilename='subjectWorkflow') #, graph2use='flat')
    try:
        return subjectWorkflow.run(plugin='SGEGraph', plugin_args=master_config['plugin_args'])
    except:
        return 1
Exemple #17
0
def run_workflow(args, run=True):
    """Connect and execute the QAP Nipype workflow for one bundle of data.

    - This function will update the resource pool with what is found in the
      output directory (if it already exists). If the final expected output
      of the pipeline is already found, the pipeline will not run and it
      will move onto the next bundle. If the final expected output is not
      present, the pipeline begins to build itself backwards.

    :type args: tuple
    :param args: A 7-element tuple of information comprising of the bundle's
                 resource pool, a list of participant info, the configuration
                 options, the pipeline ID run name and miscellaneous run args.
    :rtype: dictionary
    :return: A dictionary with information about the workflow run, its status,
             and results.
    """

    import os
    import os.path as op

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu

    import qap
    from qap_utils import read_json

    import glob

    import time
    from time import strftime
    from nipype import config as nyconfig

    # unpack args
    resource_pool_dict, sub_info_list, config, run_name, runargs, \
        bundle_idx, num_bundles = args

    # Read and apply general settings in config
    keep_outputs = config.get('write_all_outputs', False)

    # take date+time stamp for run identification purposes
    pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    pipeline_start_time = time.time()

    if "workflow_log_dir" not in config.keys():
        config["workflow_log_dir"] = config["output_directory"]

    bundle_log_dir = op.join(config["workflow_log_dir"],
                             '_'.join(["bundle", str(bundle_idx)]))

    try:
        os.makedirs(bundle_log_dir)
    except:
        if not op.isdir(bundle_log_dir):
            err = "[!] Bundle log directory unable to be created.\n" \
                    "Path: %s\n\n" % bundle_log_dir
            raise Exception(err)
        else:
            pass

    # set up logging
    nyconfig.update_config(
        {'logging': {
            'log_directory': bundle_log_dir,
            'log_to_file': True
        }})
    logging.update_logging(nyconfig)

    logger.info("QAP version %s" % qap.__version__)
    logger.info("Pipeline start time: %s" % pipeline_start_stamp)

    workflow = pe.Workflow(name=run_name)
    workflow.base_dir = op.join(config["working_directory"])

    # set up crash directory
    workflow.config['execution'] = \
        {'crashdump_dir': config["output_directory"]}

    # create the one node all participants will start from
    starter_node = pe.Node(niu.Function(input_names=['starter'],
                                        output_names=['starter'],
                                        function=starter_node_func),
                           name='starter_node')

    # set a dummy variable
    starter_node.inputs.starter = ""

    new_outputs = 0

    # iterate over each subject in the bundle
    logger.info("Starting bundle %s out of %s.." %
                (str(bundle_idx), str(num_bundles)))
    # results dict
    rt = {'status': 'Started', 'bundle_log_dir': bundle_log_dir}

    for sub_info in sub_info_list:

        resource_pool = resource_pool_dict[sub_info]

        # in case we're dealing with string entries in the data dict
        try:
            resource_pool.keys()
        except AttributeError:
            continue

        # resource pool check
        invalid_paths = []

        for resource in resource_pool.keys():
            try:
                if not op.isfile(
                        resource_pool[resource]) and resource != "site_name":
                    invalid_paths.append((resource, resource_pool[resource]))
            except:
                err = "\n\n[!]"
                raise Exception(err)

        if len(invalid_paths) > 0:
            err = "\n\n[!] The paths provided in the subject list to the " \
                  "following resources are not valid:\n"

            for path_tuple in invalid_paths:
                err = "%s%s: %s\n" % (err, path_tuple[0], path_tuple[1])

            err = "%s\n\n" % err
            raise Exception(err)

        # process subject info
        sub_id = str(sub_info[0])
        # for nipype
        if "-" in sub_id:
            sub_id = sub_id.replace("-", "_")
        if "." in sub_id:
            sub_id = sub_id.replace(".", "_")

        if sub_info[1]:
            session_id = str(sub_info[1])
            # for nipype
            if "-" in session_id:
                session_id = session_id.replace("-", "_")
            if "." in session_id:
                session_id = session_id.replace(".", "_")
        else:
            session_id = "session_0"

        if sub_info[2]:
            scan_id = str(sub_info[2])
            # for nipype
            if "-" in scan_id:
                scan_id = scan_id.replace("-", "_")
            if "." in scan_id:
                scan_id = scan_id.replace(".", "_")
        else:
            scan_id = "scan_0"

        name = "_".join(["", sub_id, session_id, scan_id])

        rt[name] = {
            'id': sub_id,
            'session': session_id,
            'scan': scan_id,
            'resource_pool': str(resource_pool)
        }

        logger.info("Participant info: %s" % name)

        # set output directory
        output_dir = op.join(config["output_directory"], run_name, sub_id,
                             session_id, scan_id)

        try:
            os.makedirs(output_dir)
        except:
            if not op.isdir(output_dir):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % output_dir
                raise Exception(err)
            else:
                pass

        # for QAP spreadsheet generation only
        config.update({
            "subject_id": sub_id,
            "session_id": session_id,
            "scan_id": scan_id,
            "run_name": run_name
        })

        if "site_name" in resource_pool:
            config.update({"site_name": resource_pool["site_name"]})

        logger.info("Configuration settings:\n%s" % str(config))

        qap_types = [
            "anatomical_spatial", "functional_spatial", "functional_temporal"
        ]

        # update that resource pool with what's already in the output
        # directory
        for resource in os.listdir(output_dir):
            if (op.exists(op.join(output_dir, resource))
                    and resource not in resource_pool.keys()):
                try:
                    resource_pool[resource] = \
                        glob.glob(op.join(output_dir, resource, "*"))[0]
                except IndexError:
                    if ".json" in resource:
                        # load relevant json info into resource pool
                        json_file = op.join(output_dir, resource)
                        json_dict = read_json(json_file)
                        sub_json_dict = json_dict["%s %s %s" %
                                                  (sub_id, session_id,
                                                   scan_id)]

                        if "anatomical_header_info" in sub_json_dict.keys():
                            resource_pool["anatomical_header_info"] = \
                                sub_json_dict["anatomical_header_info"]

                        if "functional_header_info" in sub_json_dict.keys():
                            resource_pool["functional_header_info"] = \
                                sub_json_dict["functional_header_info"]

                        for qap_type in qap_types:
                            if qap_type in sub_json_dict.keys():
                                resource_pool["_".join(["qap",qap_type])] = \
                                    sub_json_dict[qap_type]
                except:
                    # a stray file in the sub-sess-scan output directory
                    pass

        # create starter node which links all of the parallel workflows within
        # the bundle together as a Nipype pipeline
        resource_pool["starter"] = (starter_node, 'starter')

        # individual workflow and logger setup
        logger.info("Contents of resource pool for this participant:\n%s" %
                    str(resource_pool))

        # start connecting the pipeline
        qw = None
        for qap_type in qap_types:
            if "_".join(["qap", qap_type]) not in resource_pool.keys():
                if qw is None:
                    from qap import qap_workflows as qw
                wf_builder = \
                    getattr(qw, "_".join(["qap", qap_type, "workflow"]))
                workflow, resource_pool = wf_builder(workflow, resource_pool,
                                                     config, name)

        if ("anatomical_scan" in resource_pool.keys()) and \
            ("anatomical_header_info" not in resource_pool.keys()):
            if qw is None:
                from qap import qap_workflows as qw
            workflow, resource_pool = \
                qw.qap_gather_header_info(workflow, resource_pool, config,
                    name, "anatomical")

        if ("functional_scan" in resource_pool.keys()) and \
            ("functional_header_info" not in resource_pool.keys()):
            if qw is None:
                from qap import qap_workflows as qw
            workflow, resource_pool = \
                qw.qap_gather_header_info(workflow, resource_pool, config,
                    name, "functional")

        # set up the datasinks
        out_list = []
        for output in resource_pool.keys():
            for qap_type in qap_types:
                if qap_type in output:
                    out_list.append("_".join(["qap", qap_type]))

        # write_all_outputs (writes everything to the output directory, not
        # just the final JSON files)
        if keep_outputs:
            out_list = resource_pool.keys()
        logger.info("Outputs we're keeping: %s" % str(out_list))
        logger.info('Resource pool keys after workflow connection: '
                    '{}'.format(str(resource_pool.keys())))

        # Save reports to out_dir if necessary
        if config.get('write_report', False):

            if ("qap_mosaic" in resource_pool.keys()) and  \
                    ("qap_mosaic" not in out_list):
                out_list += ['qap_mosaic']

            # The functional temporal also has an FD plot
            if 'qap_functional_temporal' in resource_pool.keys():
                if ("qap_fd" in resource_pool.keys()) and \
                        ("qap_fd" not in out_list):
                    out_list += ['qap_fd']

        for output in out_list:
            # we use a check for len()==2 here to select those items in the
            # resource pool which are tuples of (node, node_output), instead
            # of the items which are straight paths to files

            # resource pool items which are in the tuple format are the
            # outputs that have been created in this workflow because they
            # were not present in the subject list YML (the starting resource
            # pool) and had to be generated
            if (len(resource_pool[output]) == 2) and (output != "starter"):
                ds = pe.Node(nio.DataSink(),
                             name='datasink_%s%s' % (output, name))
                ds.inputs.base_directory = output_dir
                node, out_file = resource_pool[output]
                workflow.connect(node, out_file, ds, output)
                new_outputs += 1
            elif ".json" in resource_pool[output]:
                new_outputs += 1

    logger.info("New outputs: %s" % str(new_outputs))

    # run the pipeline (if there is anything to do)
    if new_outputs > 0:
        if config.get('write_graph', False):
            workflow.write_graph(dotfilename=op.join(
                config["output_directory"], "".join([run_name, ".dot"])),
                                 simple_form=False)
            workflow.write_graph(graph2use="orig",
                                 dotfilename=op.join(
                                     config["output_directory"],
                                     "".join([run_name, ".dot"])),
                                 simple_form=False)
            workflow.write_graph(graph2use="hierarchical",
                                 dotfilename=op.join(
                                     config["output_directory"],
                                     "".join([run_name, ".dot"])),
                                 simple_form=False)
        if run:
            try:
                logger.info("Running with plugin %s" % runargs["plugin"])
                logger.info("Using plugin args %s" % runargs["plugin_args"])
                workflow.run(plugin=runargs["plugin"],
                             plugin_args=runargs["plugin_args"])
                rt['status'] = 'finished'
                logger.info("Workflow run finished for bundle %s." %
                            str(bundle_idx))
            except Exception as e:  # TODO We should be more specific here ...
                errmsg = e
                rt.update({'status': 'failed'})
                logger.info("Workflow run failed for bundle %s." %
                            str(bundle_idx))
                # ... however this is run inside a pool.map: do not raise
                # Exception
        else:
            return workflow

    else:
        rt['status'] = 'cached'
        logger.info("\nEverything is already done for bundle %s." %
                    str(bundle_idx))

    # Remove working directory when done
    if not keep_outputs:
        try:
            work_dir = op.join(workflow.base_dir, scan_id)

            if op.exists(work_dir):
                import shutil
                shutil.rmtree(work_dir)
        except:
            logger.warn("Couldn\'t remove the working directory!")
            pass

    if rt["status"] == "failed":
        logger.error(errmsg)
    else:
        pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
        pipeline_end_time = time.time()
        logger.info("Elapsed time (minutes) since last start: %s" %
                    ((pipeline_end_time - pipeline_start_time) / 60))
        logger.info("Pipeline end time: %s" % pipeline_end_stamp)

    return rt
Exemple #18
0
def segmentation(projectid,
                 subjectid,
                 sessionid,
                 master_config,
                 BAtlas,
                 onlyT1=True,
                 pipeline_name=''):
    import os.path
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces import ants
    from nipype.interfaces.utility import IdentityInterface, Function, Merge
    # Set universal pipeline options
    from nipype import config
    config.update_config(master_config)
    assert config.get('execution',
                      'plugin') == master_config['execution']['plugin']

    from PipeLineFunctionHelpers import ClipT1ImageWithBrainMask
    from WorkupT1T2BRAINSCut import CreateBRAINSCutWorkflow
    from utilities.distributed import modify_qsub_args
    from SEMTools import BRAINSSnapShotWriter

    baw200 = pe.Workflow(name=pipeline_name)

    # HACK: print for debugging
    for key, itme in master_config.items():
        print "-" * 30
        print key, ":", itme
    print "-" * 30
    #END HACK

    inputsSpec = pe.Node(interface=IdentityInterface(fields=[
        't1_average', 't2_average', 'LMIatlasToSubject_tx', 'inputLabels',
        'inputHeadLabels', 'posteriorImages',
        'TissueClassifyatlasToSubjectInverseTransform', 'UpdatedPosteriorsList'
    ]),
                         run_without_submitting=True,
                         name='inputspec')

    # outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
    #                       run_without_submitting=True, name='outputspec')

    currentClipT1ImageWithBrainMaskName = 'ClipT1ImageWithBrainMask_' + str(
        subjectid) + "_" + str(sessionid)
    ClipT1ImageWithBrainMaskNode = pe.Node(
        interface=Function(
            function=ClipT1ImageWithBrainMask,
            input_names=['t1_image', 'brain_labels', 'clipped_file_name'],
            output_names=['clipped_file']),
        name=currentClipT1ImageWithBrainMaskName)
    ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = 'clipped_from_BABC_labels_t1.nii.gz'

    baw200.connect([(inputsSpec, ClipT1ImageWithBrainMaskNode,
                     [('t1_average', 't1_image'),
                      ('inputLabels', 'brain_labels')])])

    currentAtlasToSubjectantsRegistration = 'AtlasToSubjectANTsRegistration_' + str(
        subjectid) + "_" + str(sessionid)
    AtlasToSubjectantsRegistration = pe.Node(
        interface=ants.Registration(),
        name=currentAtlasToSubjectantsRegistration)

    AtlasToSubjectantsRegistration.inputs.dimension = 3
    AtlasToSubjectantsRegistration.inputs.transforms = ["Affine", "SyN"]
    AtlasToSubjectantsRegistration.inputs.transform_parameters = [[0.1],
                                                                  [
                                                                      0.15,
                                                                      3.0, 0.0
                                                                  ]]
    AtlasToSubjectantsRegistration.inputs.metric = ['Mattes', 'CC']
    AtlasToSubjectantsRegistration.inputs.sampling_strategy = ['Regular', None]
    AtlasToSubjectantsRegistration.inputs.sampling_percentage = [1.0, 1.0]
    AtlasToSubjectantsRegistration.inputs.metric_weight = [1.0, 1.0]
    AtlasToSubjectantsRegistration.inputs.radius_or_number_of_bins = [32, 4]
    AtlasToSubjectantsRegistration.inputs.number_of_iterations = [[
        1000, 1000, 1000
    ], [10000, 500, 500, 200]]
    AtlasToSubjectantsRegistration.inputs.convergence_threshold = [5e-7, 5e-7]
    AtlasToSubjectantsRegistration.inputs.convergence_window_size = [25, 25]
    AtlasToSubjectantsRegistration.inputs.use_histogram_matching = [True, True]
    AtlasToSubjectantsRegistration.inputs.shrink_factors = [[4, 2, 1],
                                                            [5, 4, 2, 1]]
    AtlasToSubjectantsRegistration.inputs.smoothing_sigmas = [[4, 2, 0],
                                                              [5, 4, 2, 0]]
    AtlasToSubjectantsRegistration.inputs.sigma_units = ["vox", "vox"]
    AtlasToSubjectantsRegistration.inputs.use_estimate_learning_rate_once = [
        False, False
    ]
    AtlasToSubjectantsRegistration.inputs.write_composite_transform = True
    AtlasToSubjectantsRegistration.inputs.collapse_output_transforms = True
    AtlasToSubjectantsRegistration.inputs.output_transform_prefix = 'AtlasToSubject_'
    AtlasToSubjectantsRegistration.inputs.winsorize_lower_quantile = 0.025
    AtlasToSubjectantsRegistration.inputs.winsorize_upper_quantile = 0.975
    AtlasToSubjectantsRegistration.inputs.collapse_linear_transforms_to_fixed_image_header = False
    AtlasToSubjectantsRegistration.inputs.output_warped_image = 'atlas2subject.nii.gz'
    AtlasToSubjectantsRegistration.inputs.output_inverse_warped_image = 'subject2atlas.nii.gz'

    baw200.connect([(inputsSpec, AtlasToSubjectantsRegistration,
                     [('LMIatlasToSubject_tx', 'initial_moving_transform'),
                      ('t1_average', 'fixed_image')]),
                    (BAtlas, AtlasToSubjectantsRegistration,
                     [('template_t1', 'moving_image')])])

    myLocalSegWF = CreateBRAINSCutWorkflow(projectid, subjectid, sessionid,
                                           'Segmentation',
                                           master_config['queue'],
                                           master_config['long_q'], BAtlas,
                                           onlyT1)

    MergeStage2AverageImagesName = "99_mergeAvergeStage2Images_" + str(
        sessionid)
    MergeStage2AverageImages = pe.Node(interface=Merge(2),
                                       run_without_submitting=True,
                                       name=MergeStage2AverageImagesName)

    baw200.connect([(inputsSpec, myLocalSegWF, [
        ('t1_average', 'inputspec.T1Volume'),
        ('posteriorImages', "inputspec.posteriorDictionary"),
        ('inputLabels', 'inputspec.RegistrationROI'),
    ]), (inputsSpec, MergeStage2AverageImages, [('t1_average', 'in1')]),
                    (AtlasToSubjectantsRegistration, myLocalSegWF,
                     [('composite_transform',
                       'inputspec.atlasToSubjectTransform')])])

    if not onlyT1:
        baw200.connect([
            (inputsSpec, myLocalSegWF, [('t2_average', 'inputspec.T2Volume')]),
            (inputsSpec, MergeStage2AverageImages, [('t2_average', 'in2')])
        ])
        file_count = 15  # Count of files to merge into MergeSessionSubjectToAtlas
    else:
        file_count = 14  # Count of files to merge into MergeSessionSubjectToAtlas

    ## NOTE: Element 0 of AccumulatePriorsList is the accumulated GM tissue
    # baw200.connect([(AccumulateLikeTissuePosteriorsNode, myLocalSegWF,
    #               [(('AccumulatePriorsList', getListIndex, 0), "inputspec.TotalGM")]),
    #               ])

    ### Now define where the final organized outputs should go.
    DataSink = pe.Node(nio.DataSink(),
                       name="CleanedDenoisedSegmentation_DS_" +
                       str(subjectid) + "_" + str(sessionid))
    DataSink.overwrite = master_config['ds_overwrite']
    DataSink.inputs.base_directory = master_config['resultdir']
    # DataSink.inputs.regexp_substitutions = GenerateOutputPattern(projectid, subjectid, sessionid,'BRAINSCut')
    # DataSink.inputs.regexp_substitutions = GenerateBRAINSCutImagesOutputPattern(projectid, subjectid, sessionid)
    DataSink.inputs.substitutions = [
        ('Segmentations',
         os.path.join(projectid, subjectid, sessionid,
                      'CleanedDenoisedRFSegmentations')),
        ('subjectANNLabel_', ''), ('ANNContinuousPrediction', ''),
        ('subject.nii.gz', '.nii.gz'), ('_seg.nii.gz', '_seg.nii.gz'),
        ('.nii.gz', '_seg.nii.gz'), ('_seg_seg', '_seg')
    ]

    baw200.connect([
        (myLocalSegWF, DataSink,
         [('outputspec.outputBinaryLeftCaudate', 'Segmentations.@LeftCaudate'),
          ('outputspec.outputBinaryRightCaudate',
           'Segmentations.@RightCaudate'),
          ('outputspec.outputBinaryLeftHippocampus',
           'Segmentations.@LeftHippocampus'),
          ('outputspec.outputBinaryRightHippocampus',
           'Segmentations.@RightHippocampus'),
          ('outputspec.outputBinaryLeftPutamen', 'Segmentations.@LeftPutamen'),
          ('outputspec.outputBinaryRightPutamen',
           'Segmentations.@RightPutamen'),
          ('outputspec.outputBinaryLeftThalamus',
           'Segmentations.@LeftThalamus'),
          ('outputspec.outputBinaryRightThalamus',
           'Segmentations.@RightThalamus'),
          ('outputspec.outputBinaryLeftAccumben',
           'Segmentations.@LeftAccumben'),
          ('outputspec.outputBinaryRightAccumben',
           'Segmentations.@RightAccumben'),
          ('outputspec.outputBinaryLeftGlobus', 'Segmentations.@LeftGlobus'),
          ('outputspec.outputBinaryRightGlobus', 'Segmentations.@RightGlobus'),
          ('outputspec.outputLabelImageName', 'Segmentations.@LabelImageName'),
          ('outputspec.outputCSVFileName', 'Segmentations.@CSVFileName')]),
        # (myLocalSegWF, DataSink, [('outputspec.cleaned_labels', 'Segmentations.@cleaned_labels')])
    ])

    MergeStage2BinaryVolumesName = "99_MergeStage2BinaryVolumes_" + str(
        sessionid)
    MergeStage2BinaryVolumes = pe.Node(interface=Merge(12),
                                       run_without_submitting=True,
                                       name=MergeStage2BinaryVolumesName)

    baw200.connect([(myLocalSegWF, MergeStage2BinaryVolumes,
                     [('outputspec.outputBinaryLeftAccumben', 'in1'),
                      ('outputspec.outputBinaryLeftCaudate', 'in2'),
                      ('outputspec.outputBinaryLeftPutamen', 'in3'),
                      ('outputspec.outputBinaryLeftGlobus', 'in4'),
                      ('outputspec.outputBinaryLeftThalamus', 'in5'),
                      ('outputspec.outputBinaryLeftHippocampus', 'in6'),
                      ('outputspec.outputBinaryRightAccumben', 'in7'),
                      ('outputspec.outputBinaryRightCaudate', 'in8'),
                      ('outputspec.outputBinaryRightPutamen', 'in9'),
                      ('outputspec.outputBinaryRightGlobus', 'in10'),
                      ('outputspec.outputBinaryRightThalamus', 'in11'),
                      ('outputspec.outputBinaryRightHippocampus', 'in12')])])

    ## SnapShotWriter for Segmented result checking:
    SnapShotWriterNodeName = "SnapShotWriter_" + str(sessionid)
    SnapShotWriter = pe.Node(interface=BRAINSSnapShotWriter(),
                             name=SnapShotWriterNodeName)

    SnapShotWriter.inputs.outputFilename = 'snapShot' + str(
        sessionid) + '.png'  # output specification
    SnapShotWriter.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0]
    SnapShotWriter.inputs.inputSliceToExtractInPhysicalPoint = [
        -3, -7, -3, 5, 7, 22, -22
    ]

    baw200.connect([(MergeStage2AverageImages, SnapShotWriter,
                     [('out', 'inputVolumes')]),
                    (MergeStage2BinaryVolumes, SnapShotWriter,
                     [('out', 'inputBinaryVolumes')]),
                    (SnapShotWriter, DataSink,
                     [('outputFilename', 'Segmentations.@outputSnapShot')])])

    currentAntsLabelWarpToSubject = 'AntsLabelWarpToSubject' + str(
        subjectid) + "_" + str(sessionid)
    AntsLabelWarpToSubject = pe.Node(interface=ants.ApplyTransforms(),
                                     name=currentAntsLabelWarpToSubject)

    AntsLabelWarpToSubject.inputs.dimension = 3
    AntsLabelWarpToSubject.inputs.output_image = 'warped_hncma_atlas_seg.nii.gz'
    AntsLabelWarpToSubject.inputs.interpolation = "MultiLabel"

    baw200.connect([(AtlasToSubjectantsRegistration, AntsLabelWarpToSubject,
                     [('composite_transform', 'transforms')]),
                    (inputsSpec, AntsLabelWarpToSubject,
                     [('t1_average', 'reference_image')]),
                    (BAtlas, AntsLabelWarpToSubject, [('hncma-atlas',
                                                       'input_image')])])
    #####
    ### Now define where the final organized outputs should go.
    AntsLabelWarpedToSubject_DSName = "AntsLabelWarpedToSubject_DS_" + str(
        sessionid)
    AntsLabelWarpedToSubject_DS = pe.Node(nio.DataSink(),
                                          name=AntsLabelWarpedToSubject_DSName)
    AntsLabelWarpedToSubject_DS.overwrite = master_config['ds_overwrite']
    AntsLabelWarpedToSubject_DS.inputs.base_directory = master_config[
        'resultdir']
    AntsLabelWarpedToSubject_DS.inputs.substitutions = [
        ('AntsLabelWarpedToSubject',
         os.path.join(projectid, subjectid, sessionid,
                      'AntsLabelWarpedToSubject'))
    ]

    baw200.connect([(AntsLabelWarpToSubject, AntsLabelWarpedToSubject_DS,
                     [('output_image', 'AntsLabelWarpedToSubject')])])

    MergeSessionSubjectToAtlasName = "99_MergeSessionSubjectToAtlas_" + str(
        sessionid)
    MergeSessionSubjectToAtlas = pe.Node(interface=Merge(file_count),
                                         run_without_submitting=True,
                                         name=MergeSessionSubjectToAtlasName)

    baw200.connect([
        (myLocalSegWF, MergeSessionSubjectToAtlas,
         [('outputspec.outputBinaryLeftAccumben', 'in1'),
          ('outputspec.outputBinaryLeftCaudate', 'in2'),
          ('outputspec.outputBinaryLeftPutamen', 'in3'),
          ('outputspec.outputBinaryLeftGlobus', 'in4'),
          ('outputspec.outputBinaryLeftThalamus', 'in5'),
          ('outputspec.outputBinaryLeftHippocampus', 'in6'),
          ('outputspec.outputBinaryRightAccumben', 'in7'),
          ('outputspec.outputBinaryRightCaudate', 'in8'),
          ('outputspec.outputBinaryRightPutamen', 'in9'),
          ('outputspec.outputBinaryRightGlobus', 'in10'),
          ('outputspec.outputBinaryRightThalamus', 'in11'),
          ('outputspec.outputBinaryRightHippocampus', 'in12')]),
        # (FixWMPartitioningNode, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList', 'in13')]),
        (inputsSpec, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList',
                                                   'in13')]),
        (inputsSpec, MergeSessionSubjectToAtlas, [('t1_average', 'in14')])
    ])

    if not onlyT1:
        assert file_count == 15
        baw200.connect([(inputsSpec, MergeSessionSubjectToAtlas,
                         [('t2_average', 'in15')])])

    LinearSubjectToAtlasANTsApplyTransformsName = 'LinearSubjectToAtlasANTsApplyTransforms_' + str(
        sessionid)
    LinearSubjectToAtlasANTsApplyTransforms = pe.MapNode(
        interface=ants.ApplyTransforms(),
        iterfield=['input_image'],
        name=LinearSubjectToAtlasANTsApplyTransformsName)
    LinearSubjectToAtlasANTsApplyTransforms.inputs.interpolation = 'Linear'

    baw200.connect([
        (AtlasToSubjectantsRegistration,
         LinearSubjectToAtlasANTsApplyTransforms, [
             ('inverse_composite_transform', 'transforms')
         ]),
        (BAtlas, LinearSubjectToAtlasANTsApplyTransforms,
         [('template_t1', 'reference_image')]),
        (MergeSessionSubjectToAtlas, LinearSubjectToAtlasANTsApplyTransforms,
         [('out', 'input_image')])
    ])

    MergeMultiLabelSessionSubjectToAtlasName = "99_MergeMultiLabelSessionSubjectToAtlas_" + str(
        sessionid)
    MergeMultiLabelSessionSubjectToAtlas = pe.Node(
        interface=Merge(2),
        run_without_submitting=True,
        name=MergeMultiLabelSessionSubjectToAtlasName)

    baw200.connect([(inputsSpec, MergeMultiLabelSessionSubjectToAtlas,
                     [('inputLabels', 'in1'), ('inputHeadLabels', 'in2')])])

    ### This is taking this sessions RF label map back into NAC atlas space.
    #{
    MultiLabelSubjectToAtlasANTsApplyTransformsName = 'MultiLabelSubjectToAtlasANTsApplyTransforms_' + str(
        sessionid)
    MultiLabelSubjectToAtlasANTsApplyTransforms = pe.MapNode(
        interface=ants.ApplyTransforms(),
        iterfield=['input_image'],
        name=MultiLabelSubjectToAtlasANTsApplyTransformsName)
    MultiLabelSubjectToAtlasANTsApplyTransforms.inputs.interpolation = 'MultiLabel'

    baw200.connect([
        (AtlasToSubjectantsRegistration,
         MultiLabelSubjectToAtlasANTsApplyTransforms, [
             ('inverse_composite_transform', 'transforms')
         ]),
        (BAtlas, MultiLabelSubjectToAtlasANTsApplyTransforms,
         [('template_t1', 'reference_image')]),
        (MergeMultiLabelSessionSubjectToAtlas,
         MultiLabelSubjectToAtlasANTsApplyTransforms, [('out', 'input_image')])
    ])
    #}
    ### Now we must take the sessions to THIS SUBJECTS personalized atlas.
    #{
    #}

    ### Now define where the final organized outputs should go.
    Subj2Atlas_DSName = "SubjectToAtlas_DS_" + str(sessionid)
    Subj2Atlas_DS = pe.Node(nio.DataSink(), name=Subj2Atlas_DSName)
    Subj2Atlas_DS.overwrite = master_config['ds_overwrite']
    Subj2Atlas_DS.inputs.base_directory = master_config['resultdir']
    Subj2Atlas_DS.inputs.regexp_substitutions = [
        (r'_LinearSubjectToAtlasANTsApplyTransforms_[^/]*',
         r'' + sessionid + '/')
    ]

    baw200.connect([(LinearSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS, [
        ('output_image', 'SubjectToAtlasWarped.@linear_output_images')
    ])])

    Subj2AtlasTransforms_DSName = "SubjectToAtlasTransforms_DS_" + str(
        sessionid)
    Subj2AtlasTransforms_DS = pe.Node(nio.DataSink(),
                                      name=Subj2AtlasTransforms_DSName)
    Subj2AtlasTransforms_DS.overwrite = master_config['ds_overwrite']
    Subj2AtlasTransforms_DS.inputs.base_directory = master_config['resultdir']
    Subj2AtlasTransforms_DS.inputs.regexp_substitutions = [
        (r'SubjectToAtlasWarped', r'SubjectToAtlasWarped/' + sessionid + '/')
    ]

    baw200.connect([(AtlasToSubjectantsRegistration, Subj2AtlasTransforms_DS, [
        ('composite_transform', 'SubjectToAtlasWarped.@composite_transform'),
        ('inverse_composite_transform',
         'SubjectToAtlasWarped.@inverse_composite_transform')
    ])])
    # baw200.connect([(MultiLabelSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS, [('output_image', 'SubjectToAtlasWarped.@multilabel_output_images')])])

    if master_config['execution'][
            'plugin'] == 'SGE':  # for some nodes, the qsub call needs to be modified on the cluster
        AtlasToSubjectantsRegistration.plugin_args = {
            'template':
            master_config['plugin_args']['template'],
            'overwrite':
            True,
            'qsub_args':
            modify_qsub_args(master_config['queue'], '9000M', 4, hard=False)
        }
        SnapShotWriter.plugin_args = {
            'template':
            master_config['plugin_args']['template'],
            'overwrite':
            True,
            'qsub_args':
            modify_qsub_args(master_config['queue'], '1000M', 1, 1, hard=False)
        }
        LinearSubjectToAtlasANTsApplyTransforms.plugin_args = {
            'template':
            master_config['plugin_args']['template'],
            'overwrite':
            True,
            'qsub_args':
            modify_qsub_args(master_config['queue'], '1000M', 1, hard=True)
        }
        MultiLabelSubjectToAtlasANTsApplyTransforms.plugin_args = {
            'template':
            master_config['plugin_args']['template'],
            'overwrite':
            True,
            'qsub_args':
            modify_qsub_args(master_config['queue'], '1000M', 1, hard=True)
        }

    return baw200
Exemple #19
0
def main(args):
    subjects, master_config = args

    import os
    import sys
    import traceback

    # Set universal pipeline options
    from nipype import config
    config.update_config(master_config)
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']

    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import nipype.interfaces.ants as ants

    from template import MergeByExtendListElements, xml_filename
    from PipeLineFunctionHelpers import mapPosteriorList
    from atlasNode import GetAtlasNode, MakeNewAtlasTemplate
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.distributed import modify_qsub_args

    template = pe.Workflow(name='SubjectAtlas_Template')
    template.base_dir = master_config['logging']['log_directory']

    if 'previouscache' in master_config:
        # Running off previous baseline experiment
        BAtlas = GetAtlasNode(master_config['previouscache'], 'BAtlas')
    else:
        # Running after previous baseline experiment
        BAtlas = GetAtlasNode(os.path.dirname(master_config['atlascache']), 'BAtlas')
    inputspec = pe.Node(interface=IdentityInterface(fields=['subject']), name='inputspec')
    inputspec.iterables = ('subject', subjects)

    baselineDG = pe.Node(nio.DataGrabber(infields=['subject'], outfields=['t1_average', 't2_average', 'pd_average',
                                                                            'fl_average', 'outputLabels', 'posteriorImages']),
                         name='Baseline_DG')
    if 'previousresult' in master_config:
        baselineDG.inputs.base_directory = master_config['previousresult']
    else:
        baselineDG.inputs.base_directory = master_config['resultdir']
    baselineDG.inputs.sort_filelist = True
    baselineDG.inputs.raise_on_empty = False
    baselineDG.inputs.template = '*/%s/*/Baseline/%s.nii.gz'
    baselineDG.inputs.template_args['t1_average'] = [['subject', 't1_average_BRAINSABC']]
    baselineDG.inputs.template_args['t2_average'] = [['subject', 't2_average_BRAINSABC']]
    baselineDG.inputs.template_args['pd_average'] = [['subject', 'pd_average_BRAINSABC']]
    baselineDG.inputs.template_args['fl_average'] = [['subject', 'fl_average_BRAINSABC']]
    baselineDG.inputs.template_args['outputLabels'] = [['subject', 'brain_label_seg']]
    baselineDG.inputs.field_template = {'posteriorImages':'*/%s/*/TissueClassify/POSTERIOR_%s.nii.gz'}
    posterior_files = ['AIR', 'BASAL', 'CRBLGM', 'CRBLWM', 'CSF', 'GLOBUS', 'HIPPOCAMPUS', 'NOTCSF', 'NOTGM', 'NOTVB', 'NOTWM',
                       'SURFGM', 'THALAMUS', 'VB', 'WM']
    baselineDG.inputs.template_args['posteriorImages'] = [['subject', posterior_files]]

    MergeByExtendListElementsNode = pe.Node(Function(function=MergeByExtendListElements,
                                                     input_names=['t1s', 't2s',
                                                                  'pds', 'fls',
                                                                  'labels', 'posteriors'],
                                                     output_names=['ListOfImagesDictionaries', 'registrationImageTypes',
                                                                   'interpolationMapping']),
                                            run_without_submitting=True, name="99_MergeByExtendListElements")
    from PipeLineFunctionHelpers import WrapPosteriorImagesFromDictionaryFunction as wrapfunc
    template.connect([(inputspec, baselineDG, [('subject', 'subject')]),
                      (baselineDG, MergeByExtendListElementsNode, [('t1_average', 't1s'),
                                                                   ('t2_average', 't2s'),
                                                                   ('pd_average', 'pds'),
                                                                   ('fl_average', 'fls'),
                                                                   ('outputLabels', 'labels'),
                                                                   (('posteriorImages', wrapfunc), 'posteriors')])
                    ])

    myInitAvgWF = pe.Node(interface=ants.AverageImages(), name='Atlas_antsSimpleAverage')  # was 'Phase1_antsSimpleAverage'
    myInitAvgWF.inputs.dimension = 3
    myInitAvgWF.inputs.normalize = True
    template.connect(baselineDG, 't1_average', myInitAvgWF, "images")
    ####################################################################################################
    # TEMPLATE_BUILD_RUN_MODE = 'MULTI_IMAGE'
    # if numSessions == 1:
    #     TEMPLATE_BUILD_RUN_MODE = 'SINGLE_IMAGE'
    ####################################################################################################
    from BAWantsRegistrationBuildTemplate import BAWantsRegistrationTemplateBuildSingleIterationWF as registrationWF
    buildTemplateIteration1 = registrationWF('iteration01')
    # buildTemplateIteration2 = buildTemplateIteration1.clone(name='buildTemplateIteration2')
    buildTemplateIteration2 = registrationWF('Iteration02')

    MakeNewAtlasTemplateNode = pe.Node(interface=Function(function=MakeNewAtlasTemplate,
                                                          input_names=['t1_image', 'deformed_list', 'AtlasTemplate', 'outDefinition'],
                                                          output_names=['outAtlasFullPath', 'clean_deformed_list']),
                                       # This is a lot of work, so submit it run_without_submitting=True,
                                       run_without_submitting=True,  # HACK:  THIS NODE REALLY SHOULD RUN ON THE CLUSTER!
                                       name='99_MakeNewAtlasTemplate')

    if master_config['execution']['plugin'] == 'SGE':  # for some nodes, the qsub call needs to be modified on the cluster

        MakeNewAtlasTemplateNode.plugin_args = {'template': master_config['plugin_args']['template'],
                                                'qsub_args': modify_qsub_args(master_config['queue'], '1000M', 1, 1),
                                                'overwrite': True}
        for bt in [buildTemplateIteration1, buildTemplateIteration2]:
            ##################################################
            # *** Hans, is this TODO already addressed? ***  #
            # ---->  # TODO:  Change these parameters  <---- #
            ##################################################
            BeginANTS = bt.get_node("BeginANTS")
            BeginANTS.plugin_args = {'template': master_config['plugin_args']['template'], 'overwrite': True,
                                     'qsub_args': modify_qsub_args(master_config['queue'], '9000M', 4, hard=False)}
            wimtdeformed = bt.get_node("wimtdeformed")
            wimtdeformed.plugin_args = {'template': master_config['plugin_args']['template'], 'overwrite': True,
                                        'qsub_args': modify_qsub_args(master_config['queue'], '2000M', 1, 2)}
            AvgAffineTransform = bt.get_node("AvgAffineTransform")
            AvgAffineTransform.plugin_args = {'template': master_config['plugin_args']['template'], 'overwrite': True,
                                              'qsub_args': modify_qsub_args(master_config['queue'], '2000M', 1)}
            wimtPassivedeformed = bt.get_node("wimtPassivedeformed")
            wimtPassivedeformed.plugin_args = {'template': master_config['plugin_args']['template'], 'overwrite': True,
                                                'qsub_args': modify_qsub_args(master_config['queue'], '2000M', 1, 2)}

    template.connect([(myInitAvgWF, buildTemplateIteration1, [('output_average_image', 'inputspec.fixed_image')]),
                      (MergeByExtendListElementsNode, buildTemplateIteration1, [('ListOfImagesDictionaries', 'inputspec.ListOfImagesDictionaries'),
                                                                                ('registrationImageTypes', 'inputspec.registrationImageTypes'),
                                                                                ('interpolationMapping','inputspec.interpolationMapping')]),
                      (buildTemplateIteration1, buildTemplateIteration2, [('outputspec.template', 'inputspec.fixed_image')]),
                      (MergeByExtendListElementsNode, buildTemplateIteration2, [('ListOfImagesDictionaries', 'inputspec.ListOfImagesDictionaries'),
                                                                                ('registrationImageTypes','inputspec.registrationImageTypes'),
                                                                                ('interpolationMapping', 'inputspec.interpolationMapping')]),
                      (inputspec, MakeNewAtlasTemplateNode, [(('subject', xml_filename), 'outDefinition')]),
                      (BAtlas, MakeNewAtlasTemplateNode, [('ExtendedAtlasDefinition_xml_in', 'AtlasTemplate')]),
                      (buildTemplateIteration2, MakeNewAtlasTemplateNode, [('outputspec.template', 't1_image'),
                                                                           ('outputspec.passive_deformed_templates', 'deformed_list')]),
                      ])

    # Create DataSinks
    Atlas_DataSink = pe.Node(nio.DataSink(), name="Atlas_DS")
    Atlas_DataSink.overwrite = master_config['ds_overwrite']
    Atlas_DataSink.inputs.base_directory = master_config['resultdir']

    Subject_DataSink = pe.Node(nio.DataSink(), name="Subject_DS")
    Subject_DataSink.overwrite = master_config['ds_overwrite']
    Subject_DataSink.inputs.base_directory = master_config['resultdir']

    template.connect([(inputspec, Atlas_DataSink, [('subject', 'container')]),
                      (buildTemplateIteration1, Atlas_DataSink, [('outputspec.template', 'Atlas.iteration1')]),  # Unnecessary
                      (MakeNewAtlasTemplateNode, Atlas_DataSink, [('outAtlasFullPath', 'Atlas.definitions')]),
                      (BAtlas, Atlas_DataSink, [('template_landmarks_50Lmks_fcsv', 'Atlas.20111119_BCD.@fcsv'),
                                                ('template_weights_50Lmks_wts', 'Atlas.20111119_BCD.@wts'),
                                                ('LLSModel_50Lmks_hdf5', 'Atlas.20111119_BCD.@hdf5'),
                                                ('T1_50Lmks_mdl', 'Atlas.20111119_BCD.@mdl')]),
                      (inputspec, Subject_DataSink, [(('subject', outputPattern), 'regexp_substitutions')]),
                      (buildTemplateIteration2, Subject_DataSink, [('outputspec.template', 'ANTSTemplate.@template')]),
                      (MakeNewAtlasTemplateNode, Subject_DataSink, [('clean_deformed_list', 'ANTSTemplate.@passive_deformed_templates')]),
                     ])

    from utils import run_workflow, print_workflow
    if False:
        print_workflow(template, plugin=master_config['execution']['plugin'], dotfilename='template')
    return run_workflow(template, plugin=master_config['execution']['plugin'], plugin_args=master_config['plugin_args'])
def run_workflow(args, run=True):
    """Connect and execute the QAP Nipype workflow for one bundle of data.

    - This function will update the resource pool with what is found in the
      output directory (if it already exists). If the final expected output
      of the pipeline is already found, the pipeline will not run and it
      will move onto the next bundle. If the final expected output is not
      present, the pipeline begins to build itself backwards.

    :type args: tuple
    :param args: A 7-element tuple of information comprising of the bundle's
                 resource pool, a list of participant info, the configuration
                 options, the pipeline ID run name and miscellaneous run args.
    :rtype: dictionary
    :return: A dictionary with information about the workflow run, its status,
             and results.
    """

    import os
    import os.path as op

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu

    import qap
    from qap_utils import read_json

    import glob

    import time
    from time import strftime
    from nipype import config as nyconfig

    # unpack args
    resource_pool_dict, sub_info_list, config, run_name, runargs, \
        bundle_idx, num_bundles = args

    # Read and apply general settings in config
    keep_outputs = config.get('write_all_outputs', False)

    # take date+time stamp for run identification purposes
    pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    pipeline_start_time = time.time()

    if "workflow_log_dir" not in config.keys():
        config["workflow_log_dir"] = config["output_directory"]

    bundle_log_dir = op.join(config["workflow_log_dir"],
                             '_'.join(["bundle", str(bundle_idx)]))

    try:
        os.makedirs(bundle_log_dir)
    except:
        if not op.isdir(bundle_log_dir):
            err = "[!] Bundle log directory unable to be created.\n" \
                    "Path: %s\n\n" % bundle_log_dir
            raise Exception(err)
        else:
            pass

    # set up logging
    nyconfig.update_config(
        {'logging': {'log_directory': bundle_log_dir, 'log_to_file': True}})
    logging.update_logging(nyconfig)

    logger.info("QAP version %s" % qap.__version__)
    logger.info("Pipeline start time: %s" % pipeline_start_stamp)

    workflow = pe.Workflow(name=run_name)
    workflow.base_dir = op.join(config["working_directory"])

    # set up crash directory
    workflow.config['execution'] = \
        {'crashdump_dir': config["output_directory"]}

    # create the one node all participants will start from
    starter_node = pe.Node(niu.Function(input_names=['starter'], 
                                        output_names=['starter'], 
                                        function=starter_node_func),
                           name='starter_node')

    # set a dummy variable
    starter_node.inputs.starter = ""

    new_outputs = 0

    # iterate over each subject in the bundle
    logger.info("Starting bundle %s out of %s.." % (str(bundle_idx),
                                                    str(num_bundles)))
    # results dict
    rt = {'status': 'Started', 'bundle_log_dir': bundle_log_dir}

    for sub_info in sub_info_list:

        resource_pool = resource_pool_dict[sub_info]

        # in case we're dealing with string entries in the data dict
        try:
            resource_pool.keys()
        except AttributeError:
            continue

        # resource pool check
        invalid_paths = []

        for resource in resource_pool.keys():
            try:
                if not op.isfile(resource_pool[resource]) and resource != "site_name":
                    invalid_paths.append((resource, resource_pool[resource]))
            except:
                err = "\n\n[!]"
                raise Exception(err)

        if len(invalid_paths) > 0:
            err = "\n\n[!] The paths provided in the subject list to the " \
                  "following resources are not valid:\n"

            for path_tuple in invalid_paths:
                err = "%s%s: %s\n" % (err, path_tuple[0], path_tuple[1])

            err = "%s\n\n" % err
            raise Exception(err)

        # process subject info
        sub_id = str(sub_info[0])
        # for nipype
        if "-" in sub_id:
            sub_id = sub_id.replace("-","_")
        if "." in sub_id:
            sub_id = sub_id.replace(".","_")

        if sub_info[1]:
            session_id = str(sub_info[1])
            # for nipype
            if "-" in session_id:
                session_id = session_id.replace("-","_")
            if "." in session_id:
                session_id = session_id.replace(".","_")
        else:
            session_id = "session_0"

        if sub_info[2]:
            scan_id = str(sub_info[2])
            # for nipype
            if "-" in scan_id:
                scan_id = scan_id.replace("-","_")
            if "." in scan_id:
                scan_id = scan_id.replace(".","_")
        else:
            scan_id = "scan_0"

        name = "_".join(["", sub_id, session_id, scan_id])

        rt[name] = {'id': sub_id, 'session': session_id, 'scan': scan_id,
                    'resource_pool': str(resource_pool)}

        logger.info("Participant info: %s" % name)

        # set output directory
        output_dir = op.join(config["output_directory"], run_name,
                             sub_id, session_id, scan_id)

        try:
            os.makedirs(output_dir)
        except:
            if not op.isdir(output_dir):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % output_dir
                raise Exception(err)
            else:
                pass

        # for QAP spreadsheet generation only
        config.update({"subject_id": sub_id, "session_id": session_id,
                       "scan_id": scan_id, "run_name": run_name})

        if "site_name" in resource_pool:
            config.update({"site_name": resource_pool["site_name"]})

        logger.info("Configuration settings:\n%s" % str(config))

        qap_types = ["anatomical_spatial", 
                     "functional_spatial", 
                     "functional_temporal"]

        # update that resource pool with what's already in the output
        # directory
        for resource in os.listdir(output_dir):
            if (op.exists(op.join(output_dir, resource)) and
                    resource not in resource_pool.keys()):
                try:
                    resource_pool[resource] = \
                        glob.glob(op.join(output_dir, resource, "*"))[0]
                except IndexError:
                    if ".json" in resource:
                        # load relevant json info into resource pool
                        json_file = op.join(output_dir, resource)
                        json_dict = read_json(json_file)
                        sub_json_dict = json_dict["%s %s %s" % (sub_id,
                                                                session_id,
                                                                scan_id)]

                        if "anatomical_header_info" in sub_json_dict.keys():
                            resource_pool["anatomical_header_info"] = \
                                sub_json_dict["anatomical_header_info"]

                        if "functional_header_info" in sub_json_dict.keys():
                            resource_pool["functional_header_info"] = \
                                sub_json_dict["functional_header_info"]

                        for qap_type in qap_types:
                            if qap_type in sub_json_dict.keys():
                                resource_pool["_".join(["qap",qap_type])] = \
                                    sub_json_dict[qap_type]
                except:
                    # a stray file in the sub-sess-scan output directory
                    pass

        # create starter node which links all of the parallel workflows within
        # the bundle together as a Nipype pipeline
        resource_pool["starter"] = (starter_node, 'starter')

        # individual workflow and logger setup
        logger.info("Contents of resource pool for this participant:\n%s"
                    % str(resource_pool))

        # start connecting the pipeline
        qw = None
        for qap_type in qap_types:
            if "_".join(["qap", qap_type]) not in resource_pool.keys():
                if qw is None:
                    from qap import qap_workflows as qw
                wf_builder = \
                    getattr(qw, "_".join(["qap", qap_type, "workflow"]))
                workflow, resource_pool = wf_builder(workflow, resource_pool,
                                                     config, name)

        if ("anatomical_scan" in resource_pool.keys()) and \
            ("anatomical_header_info" not in resource_pool.keys()):
            if qw is None:
                from qap import qap_workflows as qw
            workflow, resource_pool = \
                qw.qap_gather_header_info(workflow, resource_pool, config,
                    name, "anatomical")

        if ("functional_scan" in resource_pool.keys()) and \
            ("functional_header_info" not in resource_pool.keys()):
            if qw is None:
                from qap import qap_workflows as qw
            workflow, resource_pool = \
                qw.qap_gather_header_info(workflow, resource_pool, config,
                    name, "functional")

        # set up the datasinks
        out_list = []
        for output in resource_pool.keys():
            for qap_type in qap_types:
                if qap_type in output:
                    out_list.append("_".join(["qap", qap_type]))

        # write_all_outputs (writes everything to the output directory, not
        # just the final JSON files)
        if keep_outputs:
            out_list = resource_pool.keys()
        logger.info("Outputs we're keeping: %s" % str(out_list))
        logger.info('Resource pool keys after workflow connection: '
                    '{}'.format(str(resource_pool.keys())))

        # Save reports to out_dir if necessary
        if config.get('write_report', False):

            if ("qap_mosaic" in resource_pool.keys()) and  \
                    ("qap_mosaic" not in out_list):
                out_list += ['qap_mosaic']

            # The functional temporal also has an FD plot
            if 'qap_functional_temporal' in resource_pool.keys():
                if ("qap_fd" in resource_pool.keys()) and \
                        ("qap_fd" not in out_list):
                    out_list += ['qap_fd']

        for output in out_list:
            # we use a check for len()==2 here to select those items in the
            # resource pool which are tuples of (node, node_output), instead
            # of the items which are straight paths to files

            # resource pool items which are in the tuple format are the
            # outputs that have been created in this workflow because they
            # were not present in the subject list YML (the starting resource
            # pool) and had to be generated
            if (len(resource_pool[output]) == 2) and (output != "starter"):
                ds = pe.Node(nio.DataSink(), name='datasink_%s%s'
                                                  % (output,name))
                ds.inputs.base_directory = output_dir
                node, out_file = resource_pool[output]
                workflow.connect(node, out_file, ds, output)
                new_outputs += 1
            elif ".json" in resource_pool[output]:
                new_outputs += 1

    logger.info("New outputs: %s" % str(new_outputs))

    # run the pipeline (if there is anything to do)
    if new_outputs > 0:
        if config.get('write_graph', False):
            workflow.write_graph(
                dotfilename=op.join(config["output_directory"],
                                    "".join([run_name, ".dot"])),
                simple_form=False)
            workflow.write_graph(
                graph2use="orig",
                dotfilename=op.join(config["output_directory"],
                                    "".join([run_name, ".dot"])),
                simple_form=False)
            workflow.write_graph(
                graph2use="hierarchical",
                dotfilename=op.join(config["output_directory"],
                                    "".join([run_name, ".dot"])),
                simple_form=False)
        if run:
            try:
                logger.info("Running with plugin %s" % runargs["plugin"])
                logger.info("Using plugin args %s" % runargs["plugin_args"])
                workflow.run(plugin=runargs["plugin"],
                             plugin_args=runargs["plugin_args"])
                rt['status'] = 'finished'
                logger.info("Workflow run finished for bundle %s."
                            % str(bundle_idx))
            except Exception as e:  # TODO We should be more specific here ...
                errmsg = e
                rt.update({'status': 'failed'})
                logger.info("Workflow run failed for bundle %s."
                            % str(bundle_idx))
                # ... however this is run inside a pool.map: do not raise
                # Exception
        else:
            return workflow

    else:
        rt['status'] = 'cached'
        logger.info("\nEverything is already done for bundle %s."
                    % str(bundle_idx))

    # Remove working directory when done
    if not keep_outputs:
        try:
            work_dir = op.join(workflow.base_dir, scan_id)

            if op.exists(work_dir):
                import shutil
                shutil.rmtree(work_dir)
        except:
            logger.warn("Couldn\'t remove the working directory!")
            pass

    if rt["status"] == "failed":
        logger.error(errmsg)
    else:
        pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
        pipeline_end_time = time.time()
        logger.info("Elapsed time (minutes) since last start: %s"
                    % ((pipeline_end_time - pipeline_start_time) / 60))
        logger.info("Pipeline end time: %s" % pipeline_end_stamp)

    return rt
Exemple #21
0
    def _list_outputs(self):
        """Execute this module.
        """

        # Init variables
        outputs = self.output_spec().get()
        out_files = []
        # Use hardlink
        use_hardlink = str2bool(
            config.get('execution', 'try_hard_link_datasink'))

        # Set local output directory if specified
        if isdefined(self.inputs.local_copy):
            outdir = self.inputs.local_copy
        else:
            outdir = self.inputs.base_directory
            # If base directory isn't given, assume current directory
            if not isdefined(outdir):
                outdir = '.'

        # Check if base directory reflects S3 bucket upload
        s3_flag, bucket_name = self._check_s3_base_dir()
        if s3_flag:
            s3dir = self.inputs.base_directory
            # If user overrides bucket object, use that
            if self.inputs.bucket:
                bucket = self.inputs.bucket
            # Otherwise fetch bucket object using name
            else:
                try:
                    bucket = self._fetch_bucket(bucket_name)
                # If encountering an exception during bucket access, set output
                # base directory to a local folder
                except Exception as exc:
                    s3dir = '<N/A>'
                    if not isdefined(self.inputs.local_copy):
                        local_out_exception = os.path.join(
                            os.path.expanduser('~'),
                            's3_datasink_' + bucket_name)
                        outdir = local_out_exception
                    # Log local copying directory
                    iflogger.info(
                        'Access to S3 failed! Storing outputs locally at: '
                        '%s\nError: %s', outdir, exc)
        else:
            s3dir = '<N/A>'

        # If container input is given, append that to outdir
        if isdefined(self.inputs.container):
            outdir = os.path.join(outdir, self.inputs.container)
            s3dir = os.path.join(s3dir, self.inputs.container)

        # If sinking to local folder
        if outdir != s3dir:
            outdir = os.path.abspath(outdir)
            # Create the directory if it doesn't exist
            if not os.path.exists(outdir):
                try:
                    os.makedirs(outdir)
                except OSError as inst:
                    if 'File exists' in inst.strerror:
                        pass
                    else:
                        raise (inst)

        # Iterate through outputs attributes {key : path(s)}
        for key, files in list(self.inputs._outputs.items()):
            if not isdefined(files):
                continue
            iflogger.debug("key: %s files: %s", key, str(files))
            files = ensure_list(files if files else [])
            tempoutdir = outdir
            if s3_flag:
                s3tempoutdir = s3dir
            for d in key.split('.'):
                if d[0] == '@':
                    continue
                tempoutdir = os.path.join(tempoutdir, d)
                if s3_flag:
                    s3tempoutdir = os.path.join(s3tempoutdir, d)

            # flattening list
            if files and isinstance(files, list):
                if isinstance(files[0], list):
                    files = [item for sublist in files for item in sublist]

            # Iterate through passed-in source files
            for src in ensure_list(files):
                # Format src and dst files
                src = os.path.abspath(src)
                if not os.path.isfile(src):
                    src = os.path.join(src, '')
                dst = self._get_dst(src)
                if s3_flag:
                    s3dst = os.path.join(s3tempoutdir, dst)
                    s3dst = self._substitute(s3dst)
                dst = os.path.join(tempoutdir, dst)
                dst = self._substitute(dst)
                path, _ = os.path.split(dst)

                # If we're uploading to S3
                if s3_flag:
                    self._upload_to_s3(bucket, src, s3dst)
                    out_files.append(s3dst)
                # Otherwise, copy locally src -> dst
                if not s3_flag or isdefined(self.inputs.local_copy):
                    # Create output directory if it doesn't exist
                    if not os.path.exists(path):
                        try:
                            os.makedirs(path)
                        except OSError as inst:
                            if 'File exists' in inst.strerror:
                                pass
                            else:
                                raise (inst)
                    # If src == dst, it's already home
                    if (not os.path.exists(dst)) or (os.stat(src) !=
                                                     os.stat(dst)):
                        # If src is a file, copy it to dst
                        if os.path.isfile(src):
                            iflogger.debug(f'copyfile: {src} {dst}')
                            copyfile(src,
                                     dst,
                                     copy=True,
                                     hashmethod='content',
                                     use_hardlink=use_hardlink)
                        # If src is a directory, copy
                        # entire contents to dst dir
                        elif os.path.isdir(src):
                            if (os.path.exists(dst)
                                    and self.inputs.remove_dest_dir):
                                iflogger.debug('removing: %s', dst)
                                shutil.rmtree(dst)
                            iflogger.debug('copydir: %s %s', src, dst)
                            copytree(src, dst)
                            out_files.append(dst)

        # Return outputs dictionary
        outputs['out_file'] = out_files

        return outputs
from IPython.display import Image

from nipype.interfaces.base import (
    traits,
    TraitedSpec,
    CommandLineInputSpec,
    CommandLine,
    File,
    isdefined
)
from nipype.utils.filemanip import fname_presuffix

from nipype import config

config.set('execution', 'display_variable', os.environ['DISPLAY'])
print config.get('execution', 'display_variable')

                        
class MP2RageSkullStripInputSpec(CommandLineInputSpec):
    in_filter_image = traits.File(mandatory=False, argstr='-inFilter %s', desc=' Filter Image')
    in_inv2 = traits.File(exists=True, argstr='-inInv2 %s', desc='Inv2 Image')
    in_t1 = traits.File(exists=True, argstr='-inT1 %s', desc='T1 Map image')
    int_t1_weighted = traits.File(exists=True, argstr='-inT1weighted %s', desc='T1-Weighted Image')
    out_brain_mask = traits.File('brain_mask.nii.gz', usedefault=True, argstr='-outBrain %s', desc='Path/name of brain mask')
    out_masked_t1 = traits.File(argstr='-outMasked %s', desc='Create masked T1')    
    out_masked_t1_weighted = traits.Bool(argstr='-outMasked2 %s', desc='Path/name of masked T1-weighted image')        
    out_masked_filter_image = traits.Bool(argstr='-outMasked3 %s', desc='Path/name of masked Filter image')    
    
class MP2RageSkullStripOutputSpec(TraitedSpec):
    brain_mask = traits.File()
    masked_t1 = traits.File()
Exemple #23
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    database, start_time, subject, master_config = args
    assert 'baseline' in master_config[
        'components'] or 'longitudinal' in master_config[
            'components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    # HACK:
    #    To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), re-instantiate database
    # database.__init__(defaultDBName=database.dbName, subject_list=database.subjectList)
    #
    # END HACK
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution',
                      'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName

    while time.time() < start_time:
        time.sleep(start_time - time.time() + 1)
        print "Delaying start for {subject}".format(subject=subject)
    print("===================== SUBJECT: {0} ===========================".
          format(subject))

    subjectWorkflow = pe.Workflow(
        name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')

    sessionWorkflow = dict()
    inputsSpec = dict()
    sessions = database.getSessionsFromSubject(subject)
    # print "These are the sessions: ", sessions
    if 'baseline' in master_config['components']:
        current_phase = 'baseline'
        from baseline import create_baseline as create_wkfl
    elif 'longitudinal' in master_config['components']:
        current_phase = 'longitudinal'
        from longitudinal import create_longitudial as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_{1}".format(
            session,
            current_phase)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, current_phase)
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(
            name='inputspec_{0}'.format(session),
            interface=IdentityInterface(
                fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(
            session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(
            session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(
            session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(
            session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(
            session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project,
                                               subject,
                                               session,
                                               master_config,
                                               interpMode='Linear',
                                               pipeline_name=pname)

        subjectWorkflow.connect([
            (inputsSpec[session], sessionWorkflow[session], [
                ('T1s', 'inputspec.T1s'),
                ('T2s', 'inputspec.T2s'),
                ('PDs', 'inputspec.PDs'),
                ('FLs', 'inputspec.FLs'),
                ('OTs', 'inputspec.OTHERs'),
            ]),
            (atlasNode, sessionWorkflow[session],
             [('template_landmarks_50Lmks_fcsv',
               'inputspec.atlasLandmarkFilename'),
              ('template_weights_50Lmks_wts', 'inputspec.atlasWeightFilename'),
              ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
              ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
        ])
        if current_phase == 'baseline':
            subjectWorkflow.connect([
                (atlasNode, sessionWorkflow[session],
                 [('template_t1', 'inputspec.template_t1'),
                  ('ExtendedAtlasDefinition_xml', 'inputspec.atlasDefinition')
                  ]),
            ])
        else:
            assert current_phase == 'longitudinal', "Phase value is unknown: {0}".format(
                current_phase)

    from utils import run_workflow, print_workflow
    if False:
        print_workflow(template,
                       plugin=master_config['execution']['plugin'],
                       dotfilename='template')
    return run_workflow(template,
                        plugin=master_config['execution']['plugin'],
                        plugin_args=master_config['plugin_args'])
Exemple #24
0
def test_debug_mode():
    from ... import logging

    sofc_config = config.get('execution', 'stop_on_first_crash')
    ruo_config = config.get('execution', 'remove_unnecessary_outputs')
    ki_config = config.get('execution', 'keep_inputs')
    wf_config = config.get('logging', 'workflow_level')
    if_config = config.get('logging', 'interface_level')
    ut_config = config.get('logging', 'utils_level')

    wf_level = logging.getLogger('nipype.workflow').level
    if_level = logging.getLogger('nipype.interface').level
    ut_level = logging.getLogger('nipype.utils').level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get('execution', 'stop_on_first_crash') == 'true'
    assert config.get('execution', 'remove_unnecessary_outputs') == 'false'
    assert config.get('execution', 'keep_inputs') == 'true'
    assert config.get('logging', 'workflow_level') == 'DEBUG'
    assert config.get('logging', 'interface_level') == 'DEBUG'
    assert config.get('logging', 'utils_level') == 'DEBUG'

    assert logging.getLogger('nipype.workflow').level == 10
    assert logging.getLogger('nipype.interface').level == 10
    assert logging.getLogger('nipype.utils').level == 10

    # Restore config and levels
    config.set('execution', 'stop_on_first_crash', sofc_config)
    config.set('execution', 'remove_unnecessary_outputs', ruo_config)
    config.set('execution', 'keep_inputs', ki_config)
    config.set('logging', 'workflow_level', wf_config)
    config.set('logging', 'interface_level', if_config)
    config.set('logging', 'utils_level', ut_config)
    logging.update_logging(config)

    assert config.get('execution', 'stop_on_first_crash') == sofc_config
    assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config
    assert config.get('execution', 'keep_inputs') == ki_config
    assert config.get('logging', 'workflow_level') == wf_config
    assert config.get('logging', 'interface_level') == if_config
    assert config.get('logging', 'utils_level') == ut_config

    assert logging.getLogger('nipype.workflow').level == wf_level
    assert logging.getLogger('nipype.interface').level == if_level
    assert logging.getLogger('nipype.utils').level == ut_level
Exemple #25
0
def segmentation(projectid, subjectid, sessionid, master_config, onlyT1=True, pipeline_name=''):
    import os.path
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces import ants
    from nipype.interfaces.utility import IdentityInterface, Function, Merge
    # Set universal pipeline options
    from nipype import config
    config.update_config(master_config)
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']

    from PipeLineFunctionHelpers import ClipT1ImageWithBrainMask
    from WorkupT1T2BRAINSCut import CreateBRAINSCutWorkflow
    from utilities.distributed import modify_qsub_args
    from SEMTools import BRAINSSnapShotWriter

    baw200 = pe.Workflow(name=pipeline_name)

    # HACK: print for debugging
    for key, itme in master_config.items():
        print "-" * 30
        print key, ":", itme
    print "-" * 30
    #END HACK

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['t1_average',
                                                             't2_average',
                                                             'template_t1',
                                                             'hncma-atlas',
                                                             'LMIatlasToSubject_tx',
                                                             'inputLabels',
                                                             'inputHeadLabels',
                                                             'posteriorImages',
                                                             'TissueClassifyatlasToSubjectInverseTransform',
                                                             'UpdatedPosteriorsList']),
                         run_without_submitting=True, name='inputspec')

    # outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
    #                       run_without_submitting=True, name='outputspec')

    currentClipT1ImageWithBrainMaskName = 'ClipT1ImageWithBrainMask_' + str(subjectid) + "_" + str(sessionid)
    ClipT1ImageWithBrainMaskNode = pe.Node(interface=Function(function=ClipT1ImageWithBrainMask,
                                                              input_names=['t1_image', 'brain_labels',
                                                                           'clipped_file_name'],
                                                              output_names=['clipped_file']),
                                            name=currentClipT1ImageWithBrainMaskName)
    ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = 'clipped_from_BABC_labels_t1.nii.gz'

    baw200.connect([(inputsSpec, ClipT1ImageWithBrainMaskNode, [('t1_average', 't1_image'),
                                                                ('inputLabels', 'brain_labels')])])

    currentAtlasToSubjectantsRegistration = 'AtlasToSubjectANTsRegistration_' + str(subjectid) + "_" + str(sessionid)
    AtlasToSubjectantsRegistration = pe.Node(interface=ants.Registration(), name=currentAtlasToSubjectantsRegistration)

    AtlasToSubjectantsRegistration.inputs.dimension = 3
    AtlasToSubjectantsRegistration.inputs.transforms = ["Affine", "SyN"]
    AtlasToSubjectantsRegistration.inputs.transform_parameters = [[0.1], [0.15, 3.0, 0.0]]
    AtlasToSubjectantsRegistration.inputs.metric = ['Mattes', 'CC']
    AtlasToSubjectantsRegistration.inputs.sampling_strategy = ['Regular', None]
    AtlasToSubjectantsRegistration.inputs.sampling_percentage = [1.0, 1.0]
    AtlasToSubjectantsRegistration.inputs.metric_weight = [1.0, 1.0]
    AtlasToSubjectantsRegistration.inputs.radius_or_number_of_bins = [32, 4]
    AtlasToSubjectantsRegistration.inputs.number_of_iterations = [[1000, 1000, 1000], [10000, 500, 500, 200]]
    AtlasToSubjectantsRegistration.inputs.convergence_threshold = [5e-7, 5e-7]
    AtlasToSubjectantsRegistration.inputs.convergence_window_size = [25, 25]
    AtlasToSubjectantsRegistration.inputs.use_histogram_matching = [True, True]
    AtlasToSubjectantsRegistration.inputs.shrink_factors = [[4, 2, 1], [5, 4, 2, 1]]
    AtlasToSubjectantsRegistration.inputs.smoothing_sigmas = [[4, 2, 0], [5, 4, 2, 0]]
    AtlasToSubjectantsRegistration.inputs.sigma_units = ["vox","vox"]
    AtlasToSubjectantsRegistration.inputs.use_estimate_learning_rate_once = [False, False]
    AtlasToSubjectantsRegistration.inputs.write_composite_transform = True
    AtlasToSubjectantsRegistration.inputs.collapse_output_transforms = True
    AtlasToSubjectantsRegistration.inputs.output_transform_prefix = 'AtlasToSubject_'
    AtlasToSubjectantsRegistration.inputs.winsorize_lower_quantile = 0.025
    AtlasToSubjectantsRegistration.inputs.winsorize_upper_quantile = 0.975
    AtlasToSubjectantsRegistration.inputs.collapse_linear_transforms_to_fixed_image_header = False
    AtlasToSubjectantsRegistration.inputs.output_warped_image = 'atlas2subject.nii.gz'
    AtlasToSubjectantsRegistration.inputs.output_inverse_warped_image = 'subject2atlas.nii.gz'

    baw200.connect([(inputsSpec, AtlasToSubjectantsRegistration, [('LMIatlasToSubject_tx', 'initial_moving_transform'),
                                                                  ('t1_average', 'fixed_image'),
                                                                  ('template_t1', 'moving_image')])
                   ])

    myLocalSegWF = CreateBRAINSCutWorkflow(projectid,
                                           subjectid,
                                           sessionid,
                                           master_config['queue'],
                                           master_config['long_q'],
                                           t1Only=onlyT1)
    MergeStage2AverageImagesName = "99_mergeAvergeStage2Images_" + str(sessionid)
    MergeStage2AverageImages = pe.Node(interface=Merge(2), run_without_submitting=True,
                                       name=MergeStage2AverageImagesName)

    baw200.connect([(inputsSpec, myLocalSegWF, [('t1_average', 'inputspec.T1Volume'),
                                                ('posteriorImages', "inputspec.posteriorDictionary"),
                                                ('inputLabels', 'inputspec.RegistrationROI'),]),
                    (inputsSpec, MergeStage2AverageImages, [('t1_average', 'in1')]),
                    (AtlasToSubjectantsRegistration, myLocalSegWF, [('composite_transform',
                                                                     'inputspec.atlasToSubjectTransform')])
                   ])

    if not onlyT1:
        baw200.connect([(inputsSpec, myLocalSegWF, [('t2_average', 'inputspec.T2Volume')]),
                        (inputsSpec, MergeStage2AverageImages, [('t2_average', 'in2')])])
        file_count = 15  # Count of files to merge into MergeSessionSubjectToAtlas
    else:
        file_count = 14  # Count of files to merge into MergeSessionSubjectToAtlas


    ## NOTE: Element 0 of AccumulatePriorsList is the accumulated GM tissue
    # baw200.connect([(AccumulateLikeTissuePosteriorsNode, myLocalSegWF,
    #               [(('AccumulatePriorsList', getListIndex, 0), "inputspec.TotalGM")]),
    #               ])

    ### Now define where the final organized outputs should go.
    DataSink = pe.Node(nio.DataSink(), name="CleanedDenoisedSegmentation_DS_" + str(subjectid) + "_" + str(sessionid))
    DataSink.overwrite = master_config['ds_overwrite']
    DataSink.inputs.base_directory = master_config['resultdir']
    # DataSink.inputs.regexp_substitutions = GenerateOutputPattern(projectid, subjectid, sessionid,'BRAINSCut')
    # DataSink.inputs.regexp_substitutions = GenerateBRAINSCutImagesOutputPattern(projectid, subjectid, sessionid)
    DataSink.inputs.substitutions = [('Segmentations', os.path.join(projectid, subjectid, sessionid, 'CleanedDenoisedRFSegmentations')),
                                     ('subjectANNLabel_', ''),
                                     ('ANNContinuousPrediction', ''),
                                     ('subject.nii.gz', '.nii.gz'),
                                     ('_seg.nii.gz', '_seg.nii.gz'),
                                     ('.nii.gz', '_seg.nii.gz'),
                                     ('_seg_seg', '_seg')]

    baw200.connect([(myLocalSegWF, DataSink, [('outputspec.outputBinaryLeftCaudate', 'Segmentations.@LeftCaudate'),
                                              ('outputspec.outputBinaryRightCaudate', 'Segmentations.@RightCaudate'),
                                              ('outputspec.outputBinaryLeftHippocampus', 'Segmentations.@LeftHippocampus'),
                                              ('outputspec.outputBinaryRightHippocampus', 'Segmentations.@RightHippocampus'),
                                              ('outputspec.outputBinaryLeftPutamen', 'Segmentations.@LeftPutamen'),
                                              ('outputspec.outputBinaryRightPutamen', 'Segmentations.@RightPutamen'),
                                              ('outputspec.outputBinaryLeftThalamus', 'Segmentations.@LeftThalamus'),
                                              ('outputspec.outputBinaryRightThalamus', 'Segmentations.@RightThalamus'),
                                              ('outputspec.outputBinaryLeftAccumben', 'Segmentations.@LeftAccumben'),
                                              ('outputspec.outputBinaryRightAccumben', 'Segmentations.@RightAccumben'),
                                              ('outputspec.outputBinaryLeftGlobus', 'Segmentations.@LeftGlobus'),
                                              ('outputspec.outputBinaryRightGlobus', 'Segmentations.@RightGlobus'),
                                              ('outputspec.outputLabelImageName', 'Segmentations.@LabelImageName'),
                                              ('outputspec.outputCSVFileName', 'Segmentations.@CSVFileName')]),
                    # (myLocalSegWF, DataSink, [('outputspec.cleaned_labels', 'Segmentations.@cleaned_labels')])
                   ])


    MergeStage2BinaryVolumesName = "99_MergeStage2BinaryVolumes_" + str(sessionid)
    MergeStage2BinaryVolumes = pe.Node(interface=Merge(12), run_without_submitting=True,
                                       name=MergeStage2BinaryVolumesName)

    baw200.connect([(myLocalSegWF, MergeStage2BinaryVolumes, [('outputspec.outputBinaryLeftAccumben', 'in1'),
                                                              ('outputspec.outputBinaryLeftCaudate', 'in2'),
                                                              ('outputspec.outputBinaryLeftPutamen', 'in3'),
                                                              ('outputspec.outputBinaryLeftGlobus', 'in4'),
                                                              ('outputspec.outputBinaryLeftThalamus', 'in5'),
                                                              ('outputspec.outputBinaryLeftHippocampus', 'in6'),
                                                              ('outputspec.outputBinaryRightAccumben', 'in7'),
                                                              ('outputspec.outputBinaryRightCaudate', 'in8'),
                                                              ('outputspec.outputBinaryRightPutamen', 'in9'),
                                                              ('outputspec.outputBinaryRightGlobus', 'in10'),
                                                              ('outputspec.outputBinaryRightThalamus', 'in11'),
                                                              ('outputspec.outputBinaryRightHippocampus', 'in12')])
                   ])

    ## SnapShotWriter for Segmented result checking:
    SnapShotWriterNodeName = "SnapShotWriter_" + str(sessionid)
    SnapShotWriter = pe.Node(interface=BRAINSSnapShotWriter(), name=SnapShotWriterNodeName)

    SnapShotWriter.inputs.outputFilename = 'snapShot' + str(sessionid) + '.png'  # output specification
    SnapShotWriter.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0]
    SnapShotWriter.inputs.inputSliceToExtractInPhysicalPoint = [-3, -7, -3, 5, 7, 22, -22]

    baw200.connect([(MergeStage2AverageImages, SnapShotWriter, [('out', 'inputVolumes')]),
                    (MergeStage2BinaryVolumes, SnapShotWriter, [('out', 'inputBinaryVolumes')]),
                    (SnapShotWriter, DataSink, [('outputFilename', 'Segmentations.@outputSnapShot')])
                    ])

    currentAntsLabelWarpToSubject = 'AntsLabelWarpToSubject' + str(subjectid) + "_" + str(sessionid)
    AntsLabelWarpToSubject = pe.Node(interface=ants.ApplyTransforms(), name=currentAntsLabelWarpToSubject)

    AntsLabelWarpToSubject.inputs.dimension = 3
    AntsLabelWarpToSubject.inputs.output_image = 'warped_hncma_atlas_seg.nii.gz'
    AntsLabelWarpToSubject.inputs.interpolation = "MultiLabel"

    baw200.connect([(AtlasToSubjectantsRegistration, AntsLabelWarpToSubject, [('composite_transform', 'transforms')]),
                    (inputsSpec, AntsLabelWarpToSubject, [('t1_average', 'reference_image'),
                                                          ('hncma-atlas', 'input_image')])
                    ])
    #####
    ### Now define where the final organized outputs should go.
    AntsLabelWarpedToSubject_DSName = "AntsLabelWarpedToSubject_DS_" + str(sessionid)
    AntsLabelWarpedToSubject_DS = pe.Node(nio.DataSink(), name=AntsLabelWarpedToSubject_DSName)
    AntsLabelWarpedToSubject_DS.overwrite = master_config['ds_overwrite']
    AntsLabelWarpedToSubject_DS.inputs.base_directory = master_config['resultdir']
    AntsLabelWarpedToSubject_DS.inputs.substitutions = [('AntsLabelWarpedToSubject', os.path.join(projectid, subjectid, sessionid, 'AntsLabelWarpedToSubject'))]

    baw200.connect([(AntsLabelWarpToSubject, AntsLabelWarpedToSubject_DS, [('output_image', 'AntsLabelWarpedToSubject')])])

    MergeSessionSubjectToAtlasName = "99_MergeSessionSubjectToAtlas_" + str(sessionid)
    MergeSessionSubjectToAtlas = pe.Node(interface=Merge(file_count), run_without_submitting=True,
                                         name=MergeSessionSubjectToAtlasName)

    baw200.connect([(myLocalSegWF, MergeSessionSubjectToAtlas, [('outputspec.outputBinaryLeftAccumben', 'in1'),
                                                                ('outputspec.outputBinaryLeftCaudate', 'in2'),
                                                                ('outputspec.outputBinaryLeftPutamen', 'in3'),
                                                                ('outputspec.outputBinaryLeftGlobus', 'in4'),
                                                                ('outputspec.outputBinaryLeftThalamus', 'in5'),
                                                                ('outputspec.outputBinaryLeftHippocampus', 'in6'),
                                                                ('outputspec.outputBinaryRightAccumben', 'in7'),
                                                                ('outputspec.outputBinaryRightCaudate', 'in8'),
                                                                ('outputspec.outputBinaryRightPutamen', 'in9'),
                                                                ('outputspec.outputBinaryRightGlobus', 'in10'),
                                                                ('outputspec.outputBinaryRightThalamus', 'in11'),
                                                                ('outputspec.outputBinaryRightHippocampus', 'in12')]),
                    # (FixWMPartitioningNode, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList', 'in13')]),
                    (inputsSpec, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList', 'in13')]),
                    (inputsSpec, MergeSessionSubjectToAtlas, [('t1_average', 'in14')])
                    ])

    if not onlyT1:
        assert file_count == 15
        baw200.connect([(inputsSpec, MergeSessionSubjectToAtlas, [('t2_average', 'in15')])])

    LinearSubjectToAtlasANTsApplyTransformsName = 'LinearSubjectToAtlasANTsApplyTransforms_' + str(sessionid)
    LinearSubjectToAtlasANTsApplyTransforms = pe.MapNode(interface=ants.ApplyTransforms(), iterfield=['input_image'],
                                                         name=LinearSubjectToAtlasANTsApplyTransformsName)
    LinearSubjectToAtlasANTsApplyTransforms.inputs.interpolation = 'Linear'

    baw200.connect([(AtlasToSubjectantsRegistration, LinearSubjectToAtlasANTsApplyTransforms, [('inverse_composite_transform',
                                                                                              'transforms')]),
                    (inputsSpec, LinearSubjectToAtlasANTsApplyTransforms, [('template_t1', 'reference_image')]),
                    (MergeSessionSubjectToAtlas, LinearSubjectToAtlasANTsApplyTransforms, [('out', 'input_image')])
                    ])

    MergeMultiLabelSessionSubjectToAtlasName = "99_MergeMultiLabelSessionSubjectToAtlas_" + str(sessionid)
    MergeMultiLabelSessionSubjectToAtlas = pe.Node(interface=Merge(2), run_without_submitting=True,
                                                   name=MergeMultiLabelSessionSubjectToAtlasName)

    baw200.connect([(inputsSpec, MergeMultiLabelSessionSubjectToAtlas, [('inputLabels', 'in1'),
                                                                        ('inputHeadLabels', 'in2')])
                   ])

    ### This is taking this sessions RF label map back into NAC atlas space.
    #{
    MultiLabelSubjectToAtlasANTsApplyTransformsName = 'MultiLabelSubjectToAtlasANTsApplyTransforms_' + str(sessionid) + '_map'
    MultiLabelSubjectToAtlasANTsApplyTransforms = pe.MapNode(interface=ants.ApplyTransforms(), iterfield=['input_image'],
                                                             name=MultiLabelSubjectToAtlasANTsApplyTransformsName)
    MultiLabelSubjectToAtlasANTsApplyTransforms.inputs.interpolation = 'MultiLabel'

    baw200.connect([(AtlasToSubjectantsRegistration, MultiLabelSubjectToAtlasANTsApplyTransforms,
                     [('inverse_composite_transform', 'transforms')]),
                      (inputsSpec, MultiLabelSubjectToAtlasANTsApplyTransforms, [('template_t1', 'reference_image')]),
                      (MergeMultiLabelSessionSubjectToAtlas, MultiLabelSubjectToAtlasANTsApplyTransforms,
                       [('out', 'input_image')])
                   ])
    #}
    ### Now we must take the sessions to THIS SUBJECTS personalized atlas.
    #{
    #}

    ### Now define where the final organized outputs should go.
    Subj2Atlas_DSName = "SubjectToAtlas_DS_" + str(sessionid)
    Subj2Atlas_DS = pe.Node(nio.DataSink(), name=Subj2Atlas_DSName)
    Subj2Atlas_DS.overwrite = master_config['ds_overwrite']
    Subj2Atlas_DS.inputs.base_directory = master_config['resultdir']
    Subj2Atlas_DS.inputs.regexp_substitutions = [(r'_LinearSubjectToAtlasANTsApplyTransforms_[^/]*',
                                                  r'' + sessionid + '/')]

    baw200.connect([(LinearSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS,
                     [('output_image', 'SubjectToAtlasWarped.@linear_output_images')])])

    Subj2AtlasTransforms_DSName = "SubjectToAtlasTransforms_DS_" + str(sessionid)
    Subj2AtlasTransforms_DS = pe.Node(nio.DataSink(), name=Subj2AtlasTransforms_DSName)
    Subj2AtlasTransforms_DS.overwrite = master_config['ds_overwrite']
    Subj2AtlasTransforms_DS.inputs.base_directory = master_config['resultdir']
    Subj2AtlasTransforms_DS.inputs.regexp_substitutions = [(r'SubjectToAtlasWarped',
                                                            r'SubjectToAtlasWarped/' + sessionid + '/')]

    baw200.connect([(AtlasToSubjectantsRegistration, Subj2AtlasTransforms_DS,
                     [('composite_transform', 'SubjectToAtlasWarped.@composite_transform'),
                      ('inverse_composite_transform', 'SubjectToAtlasWarped.@inverse_composite_transform')])])
    # baw200.connect([(MultiLabelSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS, [('output_image', 'SubjectToAtlasWarped.@multilabel_output_images')])])

    if master_config['execution']['plugin'] == 'SGE':  # for some nodes, the qsub call needs to be modified on the cluster
        AtlasToSubjectantsRegistration.plugin_args = {'template': master_config['plugin_args']['template'], 'overwrite': True,
                                                      'qsub_args': modify_qsub_args(master_config['queue'], '9000M', 4,
                                                                                    hard=False)}
        SnapShotWriter.plugin_args = {'template': master_config['plugin_args']['template'], 'overwrite': True,
                                      'qsub_args': modify_qsub_args(master_config['queue'], '1000M', 1, 1, hard=False)}
        LinearSubjectToAtlasANTsApplyTransforms.plugin_args = {'template': master_config['plugin_args']['template'],
                                                               'overwrite': True,
                                                               'qsub_args': modify_qsub_args(master_config['queue'], '1000M',
                                                                                             1, hard=True)}
        MultiLabelSubjectToAtlasANTsApplyTransforms.plugin_args = {'template': master_config['plugin_args']['template'],
                                                                   'overwrite': True,
                                                                   'qsub_args': modify_qsub_args(master_config['queue'], '1000M',
                                                                                                 1, hard=True)}

    return baw200
            outputs["resized_file"] = self._gen_filename(self.inputs.in_file)
        else:
            outputs["resized_file"] = os.path.abspath(self.inputs.out_file)
        return outputs

    def _gen_filename(self, name):
        pth, fn, ext = filemanip.split_filename(self.inputs.in_file)
        return os.path.join(os.getcwd(), fn + "_resized" + ext)


from nipype.utils.filemanip import fname_presuffix

from nipype import config

config.set("execution", "display_variable", os.environ["DISPLAY"])
print config.get("execution", "display_variable")


class MP2RageSkullStripInputSpec(CommandLineInputSpec):
    in_filter_image = traits.File(mandatory=False, argstr="-inFilter %s", desc=" Filter Image")
    in_inv2 = traits.File(exists=True, argstr="-inInv2 %s", desc="Inv2 Image")
    in_t1 = traits.File(exists=True, argstr="-inT1 %s", desc="T1 Map image")
    in_t1_weighted = traits.File(exists=True, argstr="-inT1weighted %s", desc="T1-Weighted Image")
    out_brain_mask = traits.File(
        "brain_mask.nii.gz", usedefault=True, argstr="-outBrain %s", desc="Path/name of brain mask"
    )
    out_masked_t1 = traits.Bool(True, usedefault=True, argstr="-outMasked %s", desc="Create masked T1")
    out_masked_t1_weighted = traits.Bool(
        True, usedefault=True, argstr="-outMasked2 %s", desc="Path/name of masked T1-weighted image"
    )
    out_masked_filter_image = traits.Bool(