コード例 #1
0
def _template_runner(argv, environment, experiment, pipeline_options, cluster):
    print("Getting subjects from database...")
    # subjects = argv["--subjects"].split(',')
    subjects, subjects_sessions_dictionary = get_subjects_sessions_dictionary(argv['SUBJECTS'],
            experiment['cachedir'],
            experiment['resultdir'],
            environment['prefix'],
            experiment['dbfile'],
            argv['--use-sentinal'], argv['--use-shuffle']
            ) # Build database before parallel section
    useSentinal = argv['--use-sentinal']

    # Quick preliminary sanity check
    for thisSubject in subjects:
        if len(subjects_sessions_dictionary[thisSubject]) == 0:
            print("ERROR: subject {0} has no sessions found.  Did you supply a valid subject id on the command line?".format(thisSubject) )
            sys.exit(-1)

    for thisSubject in subjects:
        print("Processing atlas generation for this subject: {0}".format(thisSubject))
        print("="*80)
        print("Copying Atlas directory and determining appropriate Nipype options...")
        subj_pipeline_options = nipype_options(argv, pipeline_options, cluster, experiment, environment)  # Generate Nipype options
        print("Dispatching jobs to the system...")
        ######
        ###### Now start workflow construction
        ######
        # Set universal pipeline options
        nipype_config.update_config(subj_pipeline_options)

        ready_for_template_building = True
        for thisSession in subjects_sessions_dictionary[thisSubject]:
            path_test = os.path.join(experiment['previousresult'],'*/{0}/{1}/TissueClassify/t1_average_BRAINSABC.nii.gz'.format(thisSubject,thisSession))
            t1_file_result = glob.glob(path_test)
            if len(t1_file_result) != 1:
                print("Incorrect number of t1 images found for data grabber {0}".format(t1_file_result))
                print("     at path {0}".format(path_test))
                ready_for_template_building = False
        if not ready_for_template_building:
            print("TEMPORARY SKIPPING:  Not ready to process {0}".format(thisSubject))
            continue

        base_output_directory = os.path.join(subj_pipeline_options['logging']['log_directory'],thisSubject)
        template = pe.Workflow(name='SubjectAtlas_Template_'+thisSubject)
        template.base_dir = base_output_directory

        subjectNode = pe.Node(interface=IdentityInterface(fields=['subject']), run_without_submitting=True, name='99_subjectIterator')
        subjectNode.inputs.subject = thisSubject

        sessionsExtractorNode = pe.Node(Function(function=getSessionsFromSubjectDictionary,
                                                          input_names=['subject_session_dictionary','subject'],
                                                          output_names=['sessions']),
                                       run_without_submitting=True, name="99_sessionsExtractor")
        sessionsExtractorNode.inputs.subject_session_dictionary = subjects_sessions_dictionary



        baselineOptionalDG = pe.MapNode(nio.DataGrabber(infields=['subject','session'],
                                                        outfields=[ 't2_average', 'pd_average',
                                                                   'fl_average'],
                                                       run_without_submitting=True
                                                       ),
                                        run_without_submitting=True,
                                        iterfield=['session'], name='BaselineOptional_DG')

        baselineOptionalDG.inputs.base_directory = experiment['previousresult']
        baselineOptionalDG.inputs.sort_filelist = True
        baselineOptionalDG.inputs.raise_on_empty = False
        baselineOptionalDG.inputs.template = '*'

        baselineOptionalDG.inputs.field_template = {
                                            't2_average':'*/%s/%s/TissueClassify/t2_average_BRAINSABC.nii.gz',
                                            'pd_average':'*/%s/%s/TissueClassify/pd_average_BRAINSABC.nii.gz',
                                            'fl_average':'*/%s/%s/TissueClassify/fl_average_BRAINSABC.nii.gz'
                                       }
        baselineOptionalDG.inputs.template_args  = {
                                            't2_average':[['subject','session']],
                                            'pd_average':[['subject','session']],
                                            'fl_average':[['subject','session']]
                                       }



        baselineRequiredDG = pe.MapNode(nio.DataGrabber(infields=['subject','session'],
                                                outfields=['t1_average', 'brainMaskLabels',
                                                           'posteriorImages','passive_intensities','passive_masks',
                                                           'BCD_ACPC_Landmarks_fcsv'],
                                run_without_submitting=True
                                ),
                                run_without_submitting=True,
                                iterfield=['session'], name='Baseline_DG')

        baselineRequiredDG.inputs.base_directory = experiment['previousresult']
        baselineRequiredDG.inputs.sort_filelist = True
        baselineRequiredDG.inputs.raise_on_empty = True
        baselineRequiredDG.inputs.template = '*'
        posterior_files = ['AIR', 'BASAL', 'CRBLGM', 'CRBLWM', 'CSF', 'GLOBUS', 'HIPPOCAMPUS',
                           'NOTCSF', 'NOTGM', 'NOTVB', 'NOTWM', 'SURFGM', 'THALAMUS', 'VB', 'WM']
        passive_intensities_files = [
            'rho.nii.gz',
            'phi.nii.gz',
            'theta.nii.gz',
            'l_thalamus_ProbabilityMap.nii.gz',
            'r_accumben_ProbabilityMap.nii.gz',
            'l_globus_ProbabilityMap.nii.gz',
            'l_accumben_ProbabilityMap.nii.gz',
            'l_caudate_ProbabilityMap.nii.gz',
            'l_putamen_ProbabilityMap.nii.gz',
            'r_thalamus_ProbabilityMap.nii.gz',
            'r_putamen_ProbabilityMap.nii.gz',
            'r_caudate_ProbabilityMap.nii.gz',
            'r_hippocampus_ProbabilityMap.nii.gz',
            'r_globus_ProbabilityMap.nii.gz',
            'l_hippocampus_ProbabilityMap.nii.gz'
            ]
        passive_mask_files = [
            'template_WMPM2_labels.nii.gz',
            'hncma_atlas.nii.gz',
            'template_nac_labels.nii.gz',
            'template_leftHemisphere.nii.gz',
            'template_rightHemisphere.nii.gz',
            'template_ventricles.nii.gz',
            'template_headregion.nii.gz'
            ]

        baselineRequiredDG.inputs.field_template = {'t1_average':'*/%s/%s/TissueClassify/t1_average_BRAINSABC.nii.gz',
                                       'brainMaskLabels':'*/%s/%s/TissueClassify/complete_brainlabels_seg.nii.gz',
                               'BCD_ACPC_Landmarks_fcsv':'*/%s/%s/ACPCAlign/BCD_ACPC_Landmarks.fcsv',
                                       'posteriorImages':'*/%s/%s/TissueClassify/POSTERIOR_%s.nii.gz',
                                   'passive_intensities':'*/%s/%s/WarpedAtlas2Subject/%s',
                                         'passive_masks':'*/%s/%s/WarpedAtlas2Subject/%s',
                                       }
        baselineRequiredDG.inputs.template_args  = {'t1_average':[['subject','session']],
                                       'brainMaskLabels':[['subject','session']],
                               'BCD_ACPC_Landmarks_fcsv':[['subject','session']],
                                       'posteriorImages':[['subject','session', posterior_files]],
                                   'passive_intensities':[['subject','session', passive_intensities_files]],
                                         'passive_masks':[['subject','session', passive_mask_files]]
                                       }

        MergeByExtendListElementsNode = pe.Node(Function(function=MergeByExtendListElements,
                                                         input_names=['t1s', 't2s',
                                                                      'pds', 'fls',
                                                                      'labels', 'posteriors',
                                                                      'passive_intensities', 'passive_masks'
                                                                      ],
                                                         output_names=['ListOfImagesDictionaries', 'registrationImageTypes',
                                                                       'interpolationMapping']),
                                                run_without_submitting=True, name="99_MergeByExtendListElements")

        template.connect([(subjectNode, baselineRequiredDG, [('subject', 'subject')]),
                          (subjectNode, baselineOptionalDG, [('subject', 'subject')]),
                          (subjectNode, sessionsExtractorNode, [('subject','subject')]),
                          (sessionsExtractorNode, baselineRequiredDG, [('sessions', 'session')]),
                          (sessionsExtractorNode, baselineOptionalDG, [('sessions', 'session')]),
                          (baselineRequiredDG, MergeByExtendListElementsNode,
                                    [('t1_average', 't1s'),
                                     ('brainMaskLabels', 'labels'),
                                     (('posteriorImages',
                                        ConvertSessionsListOfPosteriorListToDictionaryOfSessionLists), 'posteriors')
                                     ]),
                          (baselineOptionalDG, MergeByExtendListElementsNode,
                                    [
                                     ('t2_average', 't2s'),
                                     ('pd_average', 'pds'),
                                     ('fl_average', 'fls')
                                     ]),
                          (baselineRequiredDG, MergeByExtendListElementsNode,
                                     [
                                      (('passive_intensities',
                                        ConvertSessionsListOfPosteriorListToDictionaryOfSessionLists), 'passive_intensities')
                                     ]),
                          (baselineRequiredDG, MergeByExtendListElementsNode,
                                     [
                                     (('passive_masks',
                                        ConvertSessionsListOfPosteriorListToDictionaryOfSessionLists), 'passive_masks')
                                     ])
                        ])

        myInitAvgWF = pe.Node(interface=ants.AverageImages(), name='Atlas_antsSimpleAverage')  # was 'Phase1_antsSimpleAverage'
        myInitAvgWF.inputs.dimension = 3
        myInitAvgWF.inputs.normalize = True
        myInitAvgWF.inputs.num_threads = -1
        template.connect(baselineRequiredDG, 't1_average', myInitAvgWF, "images")
        ####################################################################################################
        # TEMPLATE_BUILD_RUN_MODE = 'MULTI_IMAGE'
        # if numSessions == 1:
        #     TEMPLATE_BUILD_RUN_MODE = 'SINGLE_IMAGE'
        ####################################################################################################
        CLUSTER_QUEUE=cluster['queue']
        CLUSTER_QUEUE_LONG=cluster['long_q']
        buildTemplateIteration1 = BAWantsRegistrationTemplateBuildSingleIterationWF('iteration01',CLUSTER_QUEUE,CLUSTER_QUEUE_LONG)
        # buildTemplateIteration2 = buildTemplateIteration1.clone(name='buildTemplateIteration2')
        buildTemplateIteration2 = BAWantsRegistrationTemplateBuildSingleIterationWF('Iteration02',CLUSTER_QUEUE,CLUSTER_QUEUE_LONG)

        CreateAtlasXMLAndCleanedDeformedAveragesNode = pe.Node(interface=Function(function=CreateAtlasXMLAndCleanedDeformedAverages,
                                                              input_names=['t1_image', 'deformed_list', 'AtlasTemplate', 'outDefinition'],
                                                              output_names=['outAtlasFullPath', 'clean_deformed_list']),
                                           # This is a lot of work, so submit it run_without_submitting=True,
                                           run_without_submitting=True,  # HACK:  THIS NODE REALLY SHOULD RUN ON THE CLUSTER!
                                           name='99_CreateAtlasXMLAndCleanedDeformedAverages')

        if subj_pipeline_options['plugin_name'].startswith('SGE'):  # for some nodes, the qsub call needs to be modified on the cluster

            CreateAtlasXMLAndCleanedDeformedAveragesNode.plugin_args = {'template': subj_pipeline_options['plugin_args']['template'],
                                                    'qsub_args': modify_qsub_args(cluster['queue'], 1, 1, 1),
                                                    'overwrite': True}
            for bt in [buildTemplateIteration1, buildTemplateIteration2]:
                BeginANTS = bt.get_node("BeginANTS")
                BeginANTS.plugin_args = {'template': subj_pipeline_options['plugin_args']['template'], 'overwrite': True,
                                         'qsub_args': modify_qsub_args(cluster['queue'], 7, 4, 16)}
                wimtdeformed = bt.get_node("wimtdeformed")
                wimtdeformed.plugin_args = {'template': subj_pipeline_options['plugin_args']['template'], 'overwrite': True,
                                            'qsub_args': modify_qsub_args(cluster['queue'], 2, 2, 2)}

                #AvgAffineTransform = bt.get_node("AvgAffineTransform")
                #AvgAffineTransform.plugin_args = {'template': subj_pipeline_options['plugin_args']['template'], 'overwrite': True,
                #                                  'qsub_args': modify_qsub_args(cluster['queue'], 2, 1, 1)}

                wimtPassivedeformed = bt.get_node("wimtPassivedeformed")
                wimtPassivedeformed.plugin_args = {'template': subj_pipeline_options['plugin_args']['template'], 'overwrite': True,
                                                    'qsub_args': modify_qsub_args(cluster['queue'], 2, 2, 4)}

        # Running off previous baseline experiment
        NACCommonAtlas = MakeAtlasNode(experiment['atlascache'], 'NACCommonAtlas_{0}'.format('subject'),
                ['S_BRAINSABCSupport'] ) ## HACK : replace 'subject' with subject id once this is a loop rather than an iterable.
        template.connect([(myInitAvgWF, buildTemplateIteration1, [('output_average_image', 'inputspec.fixed_image')]),
                          (MergeByExtendListElementsNode, buildTemplateIteration1, [('ListOfImagesDictionaries', 'inputspec.ListOfImagesDictionaries'),
                                                                                    ('registrationImageTypes', 'inputspec.registrationImageTypes'),
                                                                                    ('interpolationMapping','inputspec.interpolationMapping')]),
                          (buildTemplateIteration1, buildTemplateIteration2, [('outputspec.template', 'inputspec.fixed_image')]),
                          (MergeByExtendListElementsNode, buildTemplateIteration2, [('ListOfImagesDictionaries', 'inputspec.ListOfImagesDictionaries'),
                                                                                    ('registrationImageTypes','inputspec.registrationImageTypes'),
                                                                                    ('interpolationMapping', 'inputspec.interpolationMapping')]),
                          (subjectNode, CreateAtlasXMLAndCleanedDeformedAveragesNode, [(('subject', xml_filename), 'outDefinition')]),
                          (NACCommonAtlas, CreateAtlasXMLAndCleanedDeformedAveragesNode, [('ExtendedAtlasDefinition_xml_in', 'AtlasTemplate')]),
                          (buildTemplateIteration2, CreateAtlasXMLAndCleanedDeformedAveragesNode, [('outputspec.template', 't1_image'),
                                                                               ('outputspec.passive_deformed_templates', 'deformed_list')]),
                          ])


        ## Genearate an average lmks file.
        myAverageLmk = pe.Node(interface = GenerateAverageLmkFile(), name="myAverageLmk" )
        myAverageLmk.inputs.outputLandmarkFile = "AVG_LMKS.fcsv"
        template.connect(baselineRequiredDG,'BCD_ACPC_Landmarks_fcsv',myAverageLmk,'inputLandmarkFiles')

        # Create DataSinks
        SubjectAtlas_DataSink = pe.Node(nio.DataSink(), name="Subject_DS")
        SubjectAtlas_DataSink.overwrite = subj_pipeline_options['ds_overwrite']
        SubjectAtlas_DataSink.inputs.base_directory = experiment['resultdir']

        template.connect([(subjectNode, SubjectAtlas_DataSink, [('subject', 'container')]),
                          (CreateAtlasXMLAndCleanedDeformedAveragesNode, SubjectAtlas_DataSink, [('outAtlasFullPath', 'Atlas.@definitions')]),
                          (CreateAtlasXMLAndCleanedDeformedAveragesNode, SubjectAtlas_DataSink, [('clean_deformed_list', 'Atlas.@passive_deformed_templates')]),

                          (subjectNode, SubjectAtlas_DataSink, [(('subject', outputPattern), 'regexp_substitutions')]),
                          (buildTemplateIteration2, SubjectAtlas_DataSink, [('outputspec.template', 'Atlas.@template')]),
                          (myAverageLmk,SubjectAtlas_DataSink,[('outputLandmarkFile','Atlas.@outputLandmarkFile')]),
                         ])

        dotfilename = argv['--dotfilename']
        if dotfilename is not None:
            print("WARNING: Printing workflow, but not running pipeline")
            print_workflow(template, plugin=subj_pipeline_options['plugin_name'], dotfilename=dotfilename)
        else:
            run_workflow(template, plugin=subj_pipeline_options['plugin_name'], plugin_args=subj_pipeline_options['plugin_args'])
コード例 #2
0
ファイル: singleSession.py プロジェクト: ihnorton/BRAINSTools
def createAndRun(sessions, environment, experiment, pipeline, cluster, useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(missing) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(missing,all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = {}
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
            _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
            _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
            _dict['OTs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(
                master_config['resultdir'],
                _dict['project'],
                _dict['subject'],
                _dict['session']
            )

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "ACPCAlign",
                    "landmarkInitializer_atlas_to_subject_transform.h5"
                ))

            if 'tissue_classify' in master_config['components']:
                for tc_file in ["complete_brainlabels_seg.nii.gz", "t1_average_BRAINSABC.nii.gz"]:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "TissueClassify",
                        tc_file
                    ))

            if 'warp_atlas_to_subject' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "WarpedAtlas2Subject",
                    "rho.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "WarpedAtlas2Subject",
                    "left_hemisphere_wm.nii.gz"
                ))

            if 'malf_2012_neuro' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_fs_standard_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_lobar_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_CSFVBInjected_label.nii.gz"
                ))
            if 'malf_2015_wholebrain' in master_config['components']:
                pass

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'], 'spatialImages', 'rho.nii.gz')
            else:
                atlasDirectory = os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_rho.nii.gz')

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print("MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "CleanedDenoisedRFSegmentations",
                    "allLabels_seg.nii.gz"
                ))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print("PROCESSING INCOMPLETE: at least 1 required file does not exists")
                if dryRun == False:
                    workflow = _create_singleSession(_dict, master_config, 'Linear',
                                                     'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow, plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
コード例 #3
0
def createAndRun(sessions, environment, experiment, pipeline, cluster,
                 useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'],
                                   environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(
                missing
            ) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(
                missing, all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = {}
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = database.getFilenamesByScantype(
                session, ['T1-15', 'T1-30'])
            _dict['T2s'] = database.getFilenamesByScantype(
                session, ['T2-15', 'T2-30'])
            _dict['PDs'] = database.getFilenamesByScantype(
                session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(
                session, ['FL-15', 'FL-30'])
            _dict['OTs'] = database.getFilenamesByScantype(
                session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(master_config['resultdir'],
                                                 _dict['project'],
                                                 _dict['subject'],
                                                 _dict['session'])

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "ACPCAlign",
                        "landmarkInitializer_atlas_to_subject_transform.h5"))

            if 'tissue_classify' in master_config['components']:
                for tc_file in [
                        "complete_brainlabels_seg.nii.gz",
                        "t1_average_BRAINSABC.nii.gz"
                ]:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir, "TissueClassify",
                                     tc_file))

            if 'warp_atlas_to_subject' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "WarpedAtlas2Subject",
                                 "rho.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "WarpedAtlas2Subject",
                                 "left_hemisphere_wm.nii.gz"))

            if 'malf_2012_neuro' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "MALF_HDAtlas20_2015_fs_standard_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "MALF_HDAtlas20_2015_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "MALF_HDAtlas20_2015_lobar_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "MALF_HDAtlas20_2015_CSFVBInjected_label.nii.gz"))
            if 'malf_2015_wholebrain' in master_config['components']:
                pass

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'],
                                              'spatialImages', 'rho.nii.gz')
            else:
                atlasDirectory = os.path.join(master_config['previousresult'],
                                              subject, 'Atlas',
                                              'AVG_rho.nii.gz')

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print(
                    "MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(
                master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir,
                                 "CleanedDenoisedRFSegmentations",
                                 "allLabels_seg.nii.gz"))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print(
                    "PROCESSING INCOMPLETE: at least 1 required file does not exists"
                )
                if dryRun == False:
                    workflow = _create_singleSession(
                        _dict, master_config, 'Linear',
                        'singleSession_{0}_{1}'.format(_dict['subject'],
                                                       _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow,
                                 plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
コード例 #4
0
def createAndRun(sessions, environment, experiment, pipeline, cluster,
                 useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict
    from collections import OrderedDict
    import sys

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'],
                                   environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(
                missing
            ) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(
                missing, all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = OrderedDict()
            t1_list = database.getFilenamesByScantype(session,
                                                      ['T1-15', 'T1-30'])
            if len(t1_list) == 0:
                print(
                    "ERROR: Skipping session {0} for subject {1} due to missing T1's"
                    .format(session, subject))
                print("REMOVE OR FIX BEFORE CONTINUING")
                continue
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = t1_list
            _dict['T2s'] = database.getFilenamesByScantype(
                session, ['T2-15', 'T2-30'])
            _dict['BadT2'] = False
            if _dict['T2s'] == database.getFilenamesByScantype(
                    session, ['T2-15']):
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print(_dict['T2s'])
                _dict['BadT2'] = True
            _dict['PDs'] = database.getFilenamesByScantype(
                session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(
                session, ['FL-15', 'FL-30'])
            _dict['EMSP'] = database.getFilenamesByScantype(session, ['EMSP'])
            _dict['OTHERs'] = database.getFilenamesByScantype(
                session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(master_config['resultdir'],
                                                 _dict['project'],
                                                 _dict['subject'],
                                                 _dict['session'])

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "ACPCAlign",
                        "landmarkInitializer_atlas_to_subject_transform.h5"))

            if 'tissue_classify' in master_config['components']:
                for tc_file in [
                        "complete_brainlabels_seg.nii.gz",
                        "t1_average_BRAINSABC.nii.gz"
                ]:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir, "TissueClassify",
                                     tc_file))

            if 'warp_atlas_to_subject' in master_config['components']:
                warp_atlas_file_list = [
                    "hncma_atlas.nii.gz", "l_accumben_ProbabilityMap.nii.gz",
                    "l_caudate_ProbabilityMap.nii.gz",
                    "l_globus_ProbabilityMap.nii.gz",
                    "l_hippocampus_ProbabilityMap.nii.gz",
                    "l_putamen_ProbabilityMap.nii.gz",
                    "l_thalamus_ProbabilityMap.nii.gz",
                    "left_hemisphere_wm.nii.gz", "phi.nii.gz",
                    "r_accumben_ProbabilityMap.nii.gz",
                    "r_caudate_ProbabilityMap.nii.gz",
                    "r_globus_ProbabilityMap.nii.gz",
                    "r_hippocampus_ProbabilityMap.nii.gz",
                    "r_putamen_ProbabilityMap.nii.gz",
                    "r_thalamus_ProbabilityMap.nii.gz", "rho.nii.gz",
                    "right_hemisphere_wm.nii.gz",
                    "template_WMPM2_labels.nii.gz",
                    "template_headregion.nii.gz",
                    "template_leftHemisphere.nii.gz",
                    "template_nac_labels.nii.gz",
                    "template_rightHemisphere.nii.gz",
                    "template_ventricles.nii.gz", "theta.nii.gz"
                ]
                for ff in warp_atlas_file_list:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir,
                                     "WarpedAtlas2Subject", ff))

            if 'jointfusion_2015_wholebrain' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "JointFusion_HDAtlas20_2015_lobar_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "lobeVolumes_JSON.json"))

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'],
                                              'spatialImages', 'rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
            else:
                atlasDirectory = os.path.join(master_config['previousresult'],
                                              subject, 'Atlas',
                                              'AVG_rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
                sentinal_file_list.append(
                    os.path.join(master_config['previousresult'], subject,
                                 'Atlas', 'AVG_template_headregion.nii.gz'))

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print(
                    "MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(
                master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir,
                                 "CleanedDenoisedRFSegmentations",
                                 "allLabels_seg.nii.gz"))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print(
                    "PROCESSING INCOMPLETE: at least 1 required file does not exists"
                )
                if dryRun == False:
                    workflow = _create_singleSession(
                        _dict, master_config, 'Linear',
                        'singleSession_{0}_{1}'.format(_dict['subject'],
                                                       _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow,
                                 plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
コード例 #5
0
def _template_runner(argv, environment, experiment, pipeline_options, cluster):
    print("Getting subjects from database...")
    # subjects = argv["--subjects"].split(',')
    subjects, subjects_sessions_dictionary = get_subjects_sessions_dictionary(
        argv['SUBJECTS'], experiment['cachedir'], experiment['resultdir'],
        environment['prefix'], experiment['dbfile'], argv['--use-sentinal'],
        argv['--use-shuffle'])  # Build database before parallel section
    useSentinal = argv['--use-sentinal']

    # Quick preliminary sanity check
    for thisSubject in subjects:
        if len(subjects_sessions_dictionary[thisSubject]) == 0:
            print(
                "ERROR: subject {0} has no sessions found.  Did you supply a valid subject id on the command line?"
                .format(thisSubject))
            sys.exit(-1)

    for thisSubject in subjects:
        print("Processing atlas generation for this subject: {0}".format(
            thisSubject))
        print("=" * 80)
        print(
            "Copying Atlas directory and determining appropriate Nipype options..."
        )
        subj_pipeline_options = nipype_options(
            argv, pipeline_options, cluster, experiment,
            environment)  # Generate Nipype options
        print("Dispatching jobs to the system...")
        ######
        ###### Now start workflow construction
        ######
        # Set universal pipeline options
        nipype_config.update_config(subj_pipeline_options)

        ready_for_template_building = True
        for thisSession in subjects_sessions_dictionary[thisSubject]:
            path_test = os.path.join(
                experiment['previousresult'],
                '*/{0}/{1}/TissueClassify/t1_average_BRAINSABC.nii.gz'.format(
                    thisSubject, thisSession))
            t1_file_result = glob.glob(path_test)
            if len(t1_file_result) != 1:
                print(
                    "Incorrect number of t1 images found for data grabber {0}".
                    format(t1_file_result))
                print("     at path {0}".format(path_test))
                ready_for_template_building = False
        if not ready_for_template_building:
            print("TEMPORARY SKIPPING:  Not ready to process {0}".format(
                thisSubject))
            continue

        base_output_directory = os.path.join(
            subj_pipeline_options['logging']['log_directory'], thisSubject)
        template = pe.Workflow(name='SubjectAtlas_Template_' + thisSubject)
        template.base_dir = base_output_directory

        subjectNode = pe.Node(interface=IdentityInterface(fields=['subject']),
                              run_without_submitting=True,
                              name='99_subjectIterator')
        subjectNode.inputs.subject = thisSubject

        sessionsExtractorNode = pe.Node(Function(
            function=getSessionsFromSubjectDictionary,
            input_names=['subject_session_dictionary', 'subject'],
            output_names=['sessions']),
                                        run_without_submitting=True,
                                        name="99_sessionsExtractor")
        sessionsExtractorNode.inputs.subject_session_dictionary = subjects_sessions_dictionary

        baselineOptionalDG = pe.MapNode(nio.DataGrabber(
            infields=['subject', 'session'],
            outfields=['t2_average', 'pd_average', 'fl_average'],
            run_without_submitting=True),
                                        run_without_submitting=True,
                                        iterfield=['session'],
                                        name='BaselineOptional_DG')

        baselineOptionalDG.inputs.base_directory = experiment['previousresult']
        baselineOptionalDG.inputs.sort_filelist = True
        baselineOptionalDG.inputs.raise_on_empty = False
        baselineOptionalDG.inputs.template = '*'

        baselineOptionalDG.inputs.field_template = {
            't2_average': '*/%s/%s/TissueClassify/t2_average_BRAINSABC.nii.gz',
            'pd_average': '*/%s/%s/TissueClassify/pd_average_BRAINSABC.nii.gz',
            'fl_average': '*/%s/%s/TissueClassify/fl_average_BRAINSABC.nii.gz'
        }
        baselineOptionalDG.inputs.template_args = {
            't2_average': [['subject', 'session']],
            'pd_average': [['subject', 'session']],
            'fl_average': [['subject', 'session']]
        }

        baselineRequiredDG = pe.MapNode(nio.DataGrabber(
            infields=['subject', 'session'],
            outfields=[
                't1_average', 'brainMaskLabels', 'posteriorImages',
                'passive_intensities', 'passive_masks',
                'BCD_ACPC_Landmarks_fcsv'
            ],
            run_without_submitting=True),
                                        run_without_submitting=True,
                                        iterfield=['session'],
                                        name='Baseline_DG')

        baselineRequiredDG.inputs.base_directory = experiment['previousresult']
        baselineRequiredDG.inputs.sort_filelist = True
        baselineRequiredDG.inputs.raise_on_empty = True
        baselineRequiredDG.inputs.template = '*'
        posterior_files = [
            'AIR', 'BASAL', 'CRBLGM', 'CRBLWM', 'CSF', 'GLOBUS', 'HIPPOCAMPUS',
            'NOTCSF', 'NOTGM', 'NOTVB', 'NOTWM', 'SURFGM', 'THALAMUS', 'VB',
            'WM'
        ]
        passive_intensities_files = [
            'rho.nii.gz', 'phi.nii.gz', 'theta.nii.gz',
            'l_thalamus_ProbabilityMap.nii.gz',
            'r_accumben_ProbabilityMap.nii.gz',
            'l_globus_ProbabilityMap.nii.gz',
            'l_accumben_ProbabilityMap.nii.gz',
            'l_caudate_ProbabilityMap.nii.gz',
            'l_putamen_ProbabilityMap.nii.gz',
            'r_thalamus_ProbabilityMap.nii.gz',
            'r_putamen_ProbabilityMap.nii.gz',
            'r_caudate_ProbabilityMap.nii.gz',
            'r_hippocampus_ProbabilityMap.nii.gz',
            'r_globus_ProbabilityMap.nii.gz',
            'l_hippocampus_ProbabilityMap.nii.gz'
        ]
        passive_mask_files = [
            'template_WMPM2_labels.nii.gz', 'hncma_atlas.nii.gz',
            'template_nac_labels.nii.gz', 'template_leftHemisphere.nii.gz',
            'template_rightHemisphere.nii.gz', 'template_ventricles.nii.gz',
            'template_headregion.nii.gz'
        ]

        baselineRequiredDG.inputs.field_template = {
            't1_average': '*/%s/%s/TissueClassify/t1_average_BRAINSABC.nii.gz',
            'brainMaskLabels':
            '*/%s/%s/TissueClassify/complete_brainlabels_seg.nii.gz',
            'BCD_ACPC_Landmarks_fcsv':
            '*/%s/%s/ACPCAlign/BCD_ACPC_Landmarks.fcsv',
            'posteriorImages': '*/%s/%s/TissueClassify/POSTERIOR_%s.nii.gz',
            'passive_intensities': '*/%s/%s/WarpedAtlas2Subject/%s',
            'passive_masks': '*/%s/%s/WarpedAtlas2Subject/%s',
        }
        baselineRequiredDG.inputs.template_args = {
            't1_average': [['subject', 'session']],
            'brainMaskLabels': [['subject', 'session']],
            'BCD_ACPC_Landmarks_fcsv': [['subject', 'session']],
            'posteriorImages': [['subject', 'session', posterior_files]],
            'passive_intensities':
            [['subject', 'session', passive_intensities_files]],
            'passive_masks': [['subject', 'session', passive_mask_files]]
        }

        MergeByExtendListElementsNode = pe.Node(
            Function(function=MergeByExtendListElements,
                     input_names=[
                         't1s', 't2s', 'pds', 'fls', 'labels', 'posteriors',
                         'passive_intensities', 'passive_masks'
                     ],
                     output_names=[
                         'ListOfImagesDictionaries', 'registrationImageTypes',
                         'interpolationMapping'
                     ]),
            run_without_submitting=True,
            name="99_MergeByExtendListElements")

        template.connect([
            (subjectNode, baselineRequiredDG, [('subject', 'subject')]),
            (subjectNode, baselineOptionalDG, [('subject', 'subject')]),
            (subjectNode, sessionsExtractorNode, [('subject', 'subject')]),
            (sessionsExtractorNode, baselineRequiredDG, [('sessions',
                                                          'session')]),
            (sessionsExtractorNode, baselineOptionalDG, [('sessions',
                                                          'session')]),
            (baselineRequiredDG, MergeByExtendListElementsNode,
             [('t1_average', 't1s'), ('brainMaskLabels', 'labels'),
              (('posteriorImages',
                ConvertSessionsListOfPosteriorListToDictionaryOfSessionLists),
               'posteriors')]),
            (baselineOptionalDG, MergeByExtendListElementsNode,
             [('t2_average', 't2s'), ('pd_average', 'pds'),
              ('fl_average', 'fls')]),
            (baselineRequiredDG, MergeByExtendListElementsNode,
             [(('passive_intensities',
                ConvertSessionsListOfPosteriorListToDictionaryOfSessionLists),
               'passive_intensities')]),
            (baselineRequiredDG, MergeByExtendListElementsNode,
             [(('passive_masks',
                ConvertSessionsListOfPosteriorListToDictionaryOfSessionLists),
               'passive_masks')])
        ])

        myInitAvgWF = pe.Node(
            interface=ants.AverageImages(),
            name='Atlas_antsSimpleAverage')  # was 'Phase1_antsSimpleAverage'
        myInitAvgWF.inputs.dimension = 3
        myInitAvgWF.inputs.normalize = True
        myInitAvgWF.inputs.num_threads = -1
        template.connect(baselineRequiredDG, 't1_average', myInitAvgWF,
                         "images")
        ####################################################################################################
        # TEMPLATE_BUILD_RUN_MODE = 'MULTI_IMAGE'
        # if numSessions == 1:
        #     TEMPLATE_BUILD_RUN_MODE = 'SINGLE_IMAGE'
        ####################################################################################################
        CLUSTER_QUEUE = cluster['queue']
        CLUSTER_QUEUE_LONG = cluster['long_q']
        buildTemplateIteration1 = BAWantsRegistrationTemplateBuildSingleIterationWF(
            'iteration01', CLUSTER_QUEUE, CLUSTER_QUEUE_LONG)
        # buildTemplateIteration2 = buildTemplateIteration1.clone(name='buildTemplateIteration2')
        buildTemplateIteration2 = BAWantsRegistrationTemplateBuildSingleIterationWF(
            'Iteration02', CLUSTER_QUEUE, CLUSTER_QUEUE_LONG)

        CreateAtlasXMLAndCleanedDeformedAveragesNode = pe.Node(
            interface=Function(
                function=CreateAtlasXMLAndCleanedDeformedAverages,
                input_names=[
                    't1_image', 'deformed_list', 'AtlasTemplate',
                    'outDefinition'
                ],
                output_names=['outAtlasFullPath', 'clean_deformed_list']),
            # This is a lot of work, so submit it run_without_submitting=True,
            run_without_submitting=
            True,  # HACK:  THIS NODE REALLY SHOULD RUN ON THE CLUSTER!
            name='99_CreateAtlasXMLAndCleanedDeformedAverages')

        if subj_pipeline_options['plugin_name'].startswith(
                'SGE'
        ):  # for some nodes, the qsub call needs to be modified on the cluster

            CreateAtlasXMLAndCleanedDeformedAveragesNode.plugin_args = {
                'template': subj_pipeline_options['plugin_args']['template'],
                'qsub_args': modify_qsub_args(cluster['queue'], 1, 1, 1),
                'overwrite': True
            }
            for bt in [buildTemplateIteration1, buildTemplateIteration2]:
                BeginANTS = bt.get_node("BeginANTS")
                BeginANTS.plugin_args = {
                    'template':
                    subj_pipeline_options['plugin_args']['template'],
                    'overwrite': True,
                    'qsub_args': modify_qsub_args(cluster['queue'], 7, 4, 16)
                }
                wimtdeformed = bt.get_node("wimtdeformed")
                wimtdeformed.plugin_args = {
                    'template':
                    subj_pipeline_options['plugin_args']['template'],
                    'overwrite': True,
                    'qsub_args': modify_qsub_args(cluster['queue'], 2, 2, 2)
                }

                #AvgAffineTransform = bt.get_node("AvgAffineTransform")
                #AvgAffineTransform.plugin_args = {'template': subj_pipeline_options['plugin_args']['template'], 'overwrite': True,
                #                                  'qsub_args': modify_qsub_args(cluster['queue'], 2, 1, 1)}

                wimtPassivedeformed = bt.get_node("wimtPassivedeformed")
                wimtPassivedeformed.plugin_args = {
                    'template':
                    subj_pipeline_options['plugin_args']['template'],
                    'overwrite': True,
                    'qsub_args': modify_qsub_args(cluster['queue'], 2, 2, 4)
                }

        # Running off previous baseline experiment
        NACCommonAtlas = MakeAtlasNode(
            experiment['atlascache'], 'NACCommonAtlas_{0}'.format('subject'),
            ['S_BRAINSABCSupport']
        )  ## HACK : replace 'subject' with subject id once this is a loop rather than an iterable.
        template.connect([
            (myInitAvgWF, buildTemplateIteration1,
             [('output_average_image', 'inputspec.fixed_image')]),
            (MergeByExtendListElementsNode, buildTemplateIteration1,
             [('ListOfImagesDictionaries',
               'inputspec.ListOfImagesDictionaries'),
              ('registrationImageTypes', 'inputspec.registrationImageTypes'),
              ('interpolationMapping', 'inputspec.interpolationMapping')]),
            (buildTemplateIteration1, buildTemplateIteration2,
             [('outputspec.template', 'inputspec.fixed_image')]),
            (MergeByExtendListElementsNode, buildTemplateIteration2,
             [('ListOfImagesDictionaries',
               'inputspec.ListOfImagesDictionaries'),
              ('registrationImageTypes', 'inputspec.registrationImageTypes'),
              ('interpolationMapping', 'inputspec.interpolationMapping')]),
            (subjectNode, CreateAtlasXMLAndCleanedDeformedAveragesNode,
             [(('subject', xml_filename), 'outDefinition')]),
            (NACCommonAtlas, CreateAtlasXMLAndCleanedDeformedAveragesNode,
             [('ExtendedAtlasDefinition_xml_in', 'AtlasTemplate')]),
            (buildTemplateIteration2,
             CreateAtlasXMLAndCleanedDeformedAveragesNode, [
                 ('outputspec.template', 't1_image'),
                 ('outputspec.passive_deformed_templates', 'deformed_list')
             ]),
        ])

        ## Genearate an average lmks file.
        myAverageLmk = pe.Node(interface=GenerateAverageLmkFile(),
                               name="myAverageLmk")
        myAverageLmk.inputs.outputLandmarkFile = "AVG_LMKS.fcsv"
        template.connect(baselineRequiredDG, 'BCD_ACPC_Landmarks_fcsv',
                         myAverageLmk, 'inputLandmarkFiles')

        # Create DataSinks
        SubjectAtlas_DataSink = pe.Node(nio.DataSink(), name="Subject_DS")
        SubjectAtlas_DataSink.overwrite = subj_pipeline_options['ds_overwrite']
        SubjectAtlas_DataSink.inputs.base_directory = experiment['resultdir']

        template.connect([
            (subjectNode, SubjectAtlas_DataSink, [('subject', 'container')]),
            (CreateAtlasXMLAndCleanedDeformedAveragesNode,
             SubjectAtlas_DataSink, [('outAtlasFullPath', 'Atlas.@definitions')
                                     ]),
            (CreateAtlasXMLAndCleanedDeformedAveragesNode,
             SubjectAtlas_DataSink, [('clean_deformed_list',
                                      'Atlas.@passive_deformed_templates')]),
            (subjectNode, SubjectAtlas_DataSink, [(('subject', outputPattern),
                                                   'regexp_substitutions')]),
            (buildTemplateIteration2, SubjectAtlas_DataSink,
             [('outputspec.template', 'Atlas.@template')]),
            (myAverageLmk, SubjectAtlas_DataSink,
             [('outputLandmarkFile', 'Atlas.@outputLandmarkFile')]),
        ])

        dotfilename = argv['--dotfilename']
        if dotfilename is not None:
            print("WARNING: Printing workflow, but not running pipeline")
            print_workflow(template,
                           plugin=subj_pipeline_options['plugin_name'],
                           dotfilename=dotfilename)
        else:
            run_workflow(template,
                         plugin=subj_pipeline_options['plugin_name'],
                         plugin_args=subj_pipeline_options['plugin_args'])
コード例 #6
0
ファイル: singleSession.py プロジェクト: Slicer/BRAINSTools
def createAndRun(sessions, environment, experiment, pipeline, cluster, useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict
    from collections import OrderedDict
    import sys

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(missing) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(missing,
                                                                                                            all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = OrderedDict()
            t1_list = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
            if len(t1_list) == 0:
                print("ERROR: Skipping session {0} for subject {1} due to missing T1's".format(session, subject))
                print("REMOVE OR FIX BEFORE CONTINUING")
                continue
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = t1_list
            _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
            _dict['BadT2'] = False
            if _dict['T2s'] == database.getFilenamesByScantype(session, ['T2-15']):
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print(_dict['T2s'])
                _dict['BadT2'] = True
            _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
            _dict['EMSP'] = database.getFilenamesByScantype(session, ['EMSP'])
            _dict['OTHERs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(
                master_config['resultdir'],
                _dict['project'],
                _dict['subject'],
                _dict['session']
            )

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "ACPCAlign",
                    "landmarkInitializer_atlas_to_subject_transform.h5"
                ))

            if 'tissue_classify' in master_config['components']:
                for tc_file in ["complete_brainlabels_seg.nii.gz", "t1_average_BRAINSABC.nii.gz"]:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "TissueClassify",
                        tc_file
                    ))

            if 'warp_atlas_to_subject' in master_config['components']:
                warp_atlas_file_list = [
                    "hncma_atlas.nii.gz",
                    "l_accumben_ProbabilityMap.nii.gz",
                    "l_caudate_ProbabilityMap.nii.gz",
                    "l_globus_ProbabilityMap.nii.gz",
                    "l_hippocampus_ProbabilityMap.nii.gz",
                    "l_putamen_ProbabilityMap.nii.gz",
                    "l_thalamus_ProbabilityMap.nii.gz",
                    "left_hemisphere_wm.nii.gz",
                    "phi.nii.gz",
                    "r_accumben_ProbabilityMap.nii.gz",
                    "r_caudate_ProbabilityMap.nii.gz",
                    "r_globus_ProbabilityMap.nii.gz",
                    "r_hippocampus_ProbabilityMap.nii.gz",
                    "r_putamen_ProbabilityMap.nii.gz",
                    "r_thalamus_ProbabilityMap.nii.gz",
                    "rho.nii.gz",
                    "right_hemisphere_wm.nii.gz",
                    "template_WMPM2_labels.nii.gz",
                    "template_headregion.nii.gz",
                    "template_leftHemisphere.nii.gz",
                    "template_nac_labels.nii.gz",
                    "template_rightHemisphere.nii.gz",
                    "template_ventricles.nii.gz",
                    "theta.nii.gz"
                ]
                for ff in warp_atlas_file_list:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "WarpedAtlas2Subject",
                        ff
                    ))

            if 'jointfusion_2015_wholebrain' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "JointFusion_HDAtlas20_2015_lobar_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "lobeVolumes_JSON.json"
                ))

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'], 'spatialImages', 'rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
            else:
                atlasDirectory = os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
                sentinal_file_list.append(
                    os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_template_headregion.nii.gz'))

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print("MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "CleanedDenoisedRFSegmentations",
                    "allLabels_seg.nii.gz"
                ))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print("PROCESSING INCOMPLETE: at least 1 required file does not exists")
                if dryRun == False:
                    workflow = _create_singleSession(_dict, master_config, 'Linear',
                                                     'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow, plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass