Esempio n. 1
0
def get_subjects_sessions_dictionary(input_subjects,
                                     cache,
                                     resultdir,
                                     prefix,
                                     dbfile,
                                     useSentinal,
                                     shuffle=False):
    import random
    _temp = OpenSubjectDatabase(cache, ['all'], prefix, dbfile)
    if "all" in input_subjects:
        input_subjects = _temp.getAllSubjects()
    if useSentinal:
        print("=" * 80)
        print("Using Sentinal Files to Limit Jobs Run")
        _all_subjects = set(input_subjects)
        _processed_subjects = set(
            get_processed_subjects(resultdir, input_subjects))
        subjects = list(_all_subjects - _processed_subjects
                        )  #NOTE - in set operation notation removes values
    else:
        subjects = input_subjects

    if shuffle:
        random.shuffle(
            subjects)  # randomly shuffle to get max cluster efficiency
    subject_sessions_dictionary = dict()
    for subject in subjects:
        subject_sessions_dictionary[subject] = _temp.getSessionsFromSubject(
            subject)
    return subjects, subject_sessions_dictionary
Esempio n. 2
0
def get_subjects(subjects, cache, prefix, dbfile, shuffle=True):
    import random
    _temp = OpenSubjectDatabase(cache, ['all'], prefix, dbfile)
    if "all" in subjects:
        subjects = _temp.getAllSubjects()
    if shuffle:
        random.shuffle(subjects)  # randomly shuffle to get max
    return subjects
Esempio n. 3
0
def get_subjects(argv, cache, prefix, dbfile, shuffle=True):
    import random
    _temp = OpenSubjectDatabase(cache, ['all'], prefix, dbfile)
    subjects = argv["--subject"]  #.split(',')
    if "all" in subjects:
        subjects = _temp.getAllSubjects()
    if shuffle:
        random.shuffle(subjects)  # randomly shuffle to get max
    return subjects
Esempio n. 4
0
def dispatcher(master_config, subjects):
    from multiprocessing import Pool
    import time

    from workflows import singleSubject as ss
    from workflows import template

    sp_args_list = []
    current_time = time.time()
    index = 0
    delay = 2.5
    for subject in subjects:
        index += 1
        print("START DELAY: {0}".format(delay))
        database = OpenSubjectDatabase(
            master_config['cachedir'], [subject], master_config['prefix'],
            master_config['dbfile'])  # Get DB object before parallel section
        # print database.getAllSessions()
        start_time = current_time + (index * delay)
        sp_args = (database, start_time, subject, master_config)
        sp_args_list.append(sp_args)

    print "Running workflow(s) now..."
    if master_config['execution']['plugin'] in ['Linear', 'MultiProc'
                                                ]:  # TODO: DS_runner?
        if 'baseline' in master_config['components']:
            for args in sp_args_list:
                ss.RunSubjectWorkflow(args)
            master_config['components'].remove('baseline')
        if 'template' in master_config['components']:
            template.main((subjects, master_config))
        if 'longitudinal' in master_config['components']:
            for args in sp_args_list:
                ss.RunSubjectWorkflow(args)
    else:
        # HACK
        print master_config['execution']['plugin']
        raise Exception
        # END HACK
        myPool = Pool(processes=64, maxtasksperchild=1)
        try:
            if 'baseline' in master_config['components']:
                all_results = myPool.map_async(ss.RunSubjectWorkflow,
                                               sp_args_list).get(1e100)
                master_config['components'].remove('baseline')
            if 'template' in master_config['components']:
                all_results = myPool.map_async(
                    template.main, ((subjects, master_config), )).get(1e100)
            if 'longitudinal' in master_config['components']:
                all_results = myPool.map_async(ss.RunSubjectWorkflow,
                                               sp_args_list).get(1e100)
        except ValueError, err:
            err.msg += "\nArgs to map_async: {0}".format(sp_args_list)
            raise err
        for index in range(len(sp_args_list)):
            if all_results[index] == False:
                print "FAILED for {0}".format(sp_args_list[index][-1])
                return False
Esempio n. 5
0
def get_subjects_sessions_dictionary(input_subjects, cache, resultdir, prefix, dbfile, useSentinal, shuffle=False):
    import random
    _temp = OpenSubjectDatabase(cache, ['all'], prefix, dbfile)
    if "all" in input_subjects:
        input_subjects =  _temp.getAllSubjects();
    if useSentinal:
        print("="*80)
        print("Using Sentinal Files to Limit Jobs Run")
        _all_subjects = set(input_subjects )
        _processed_subjects = set( get_processed_subjects( resultdir, input_subjects ) )
        subjects = list( _all_subjects - _processed_subjects ) #NOTE - in set operation notation removes values
    else:
        subjects = input_subjects

    if shuffle:
        random.shuffle(subjects)  # randomly shuffle to get max cluster efficiency
    subject_sessions_dictionary = dict()
    for subject in subjects:
        subject_sessions_dictionary[subject]=_temp.getSessionsFromSubject(subject)
    return subjects,subject_sessions_dictionary
Esempio n. 6
0
def createAndRun(sessions, environment, experiment, pipeline, cluster, useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(missing) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(missing,all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = {}
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
            _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
            _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
            _dict['OTs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(
                master_config['resultdir'],
                _dict['project'],
                _dict['subject'],
                _dict['session']
            )

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "ACPCAlign",
                    "landmarkInitializer_atlas_to_subject_transform.h5"
                ))

            if 'tissue_classify' in master_config['components']:
                for tc_file in ["complete_brainlabels_seg.nii.gz", "t1_average_BRAINSABC.nii.gz"]:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "TissueClassify",
                        tc_file
                    ))

            if 'warp_atlas_to_subject' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "WarpedAtlas2Subject",
                    "rho.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "WarpedAtlas2Subject",
                    "left_hemisphere_wm.nii.gz"
                ))

            if 'malf_2012_neuro' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_fs_standard_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_lobar_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_CSFVBInjected_label.nii.gz"
                ))
            if 'malf_2015_wholebrain' in master_config['components']:
                pass

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'], 'spatialImages', 'rho.nii.gz')
            else:
                atlasDirectory = os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_rho.nii.gz')

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print("MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "CleanedDenoisedRFSegmentations",
                    "allLabels_seg.nii.gz"
                ))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print("PROCESSING INCOMPLETE: at least 1 required file does not exists")
                if dryRun == False:
                    workflow = _create_singleSession(_dict, master_config, 'Linear',
                                                     'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow, plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
Esempio n. 7
0
def createAndRun(sessions, environment, experiment, pipeline, cluster,
                 useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'],
                                   environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(
                missing
            ) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(
                missing, all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = {}
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = database.getFilenamesByScantype(
                session, ['T1-15', 'T1-30'])
            _dict['T2s'] = database.getFilenamesByScantype(
                session, ['T2-15', 'T2-30'])
            _dict['PDs'] = database.getFilenamesByScantype(
                session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(
                session, ['FL-15', 'FL-30'])
            _dict['OTs'] = database.getFilenamesByScantype(
                session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(master_config['resultdir'],
                                                 _dict['project'],
                                                 _dict['subject'],
                                                 _dict['session'])

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "ACPCAlign",
                        "landmarkInitializer_atlas_to_subject_transform.h5"))

            if 'tissue_classify' in master_config['components']:
                for tc_file in [
                        "complete_brainlabels_seg.nii.gz",
                        "t1_average_BRAINSABC.nii.gz"
                ]:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir, "TissueClassify",
                                     tc_file))

            if 'warp_atlas_to_subject' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "WarpedAtlas2Subject",
                                 "rho.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "WarpedAtlas2Subject",
                                 "left_hemisphere_wm.nii.gz"))

            if 'malf_2012_neuro' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "MALF_HDAtlas20_2015_fs_standard_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "MALF_HDAtlas20_2015_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "MALF_HDAtlas20_2015_lobar_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "MALF_HDAtlas20_2015_CSFVBInjected_label.nii.gz"))
            if 'malf_2015_wholebrain' in master_config['components']:
                pass

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'],
                                              'spatialImages', 'rho.nii.gz')
            else:
                atlasDirectory = os.path.join(master_config['previousresult'],
                                              subject, 'Atlas',
                                              'AVG_rho.nii.gz')

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print(
                    "MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(
                master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir,
                                 "CleanedDenoisedRFSegmentations",
                                 "allLabels_seg.nii.gz"))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print(
                    "PROCESSING INCOMPLETE: at least 1 required file does not exists"
                )
                if dryRun == False:
                    workflow = _create_singleSession(
                        _dict, master_config, 'Linear',
                        'singleSession_{0}_{1}'.format(_dict['subject'],
                                                       _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow,
                                 plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
Esempio n. 8
0
def createAndRun(sessions, environment, experiment, pipeline, cluster,
                 useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict
    from collections import OrderedDict
    import sys

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'],
                                   environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(
                missing
            ) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(
                missing, all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = OrderedDict()
            t1_list = database.getFilenamesByScantype(session,
                                                      ['T1-15', 'T1-30'])
            if len(t1_list) == 0:
                print(
                    "ERROR: Skipping session {0} for subject {1} due to missing T1's"
                    .format(session, subject))
                print("REMOVE OR FIX BEFORE CONTINUING")
                continue
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = t1_list
            _dict['T2s'] = database.getFilenamesByScantype(
                session, ['T2-15', 'T2-30'])
            _dict['BadT2'] = False
            if _dict['T2s'] == database.getFilenamesByScantype(
                    session, ['T2-15']):
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print(_dict['T2s'])
                _dict['BadT2'] = True
            _dict['PDs'] = database.getFilenamesByScantype(
                session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(
                session, ['FL-15', 'FL-30'])
            _dict['EMSP'] = database.getFilenamesByScantype(session, ['EMSP'])
            _dict['OTHERs'] = database.getFilenamesByScantype(
                session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(master_config['resultdir'],
                                                 _dict['project'],
                                                 _dict['subject'],
                                                 _dict['session'])

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "ACPCAlign",
                        "landmarkInitializer_atlas_to_subject_transform.h5"))

            if 'tissue_classify' in master_config['components']:
                for tc_file in [
                        "complete_brainlabels_seg.nii.gz",
                        "t1_average_BRAINSABC.nii.gz"
                ]:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir, "TissueClassify",
                                     tc_file))

            if 'warp_atlas_to_subject' in master_config['components']:
                warp_atlas_file_list = [
                    "hncma_atlas.nii.gz", "l_accumben_ProbabilityMap.nii.gz",
                    "l_caudate_ProbabilityMap.nii.gz",
                    "l_globus_ProbabilityMap.nii.gz",
                    "l_hippocampus_ProbabilityMap.nii.gz",
                    "l_putamen_ProbabilityMap.nii.gz",
                    "l_thalamus_ProbabilityMap.nii.gz",
                    "left_hemisphere_wm.nii.gz", "phi.nii.gz",
                    "r_accumben_ProbabilityMap.nii.gz",
                    "r_caudate_ProbabilityMap.nii.gz",
                    "r_globus_ProbabilityMap.nii.gz",
                    "r_hippocampus_ProbabilityMap.nii.gz",
                    "r_putamen_ProbabilityMap.nii.gz",
                    "r_thalamus_ProbabilityMap.nii.gz", "rho.nii.gz",
                    "right_hemisphere_wm.nii.gz",
                    "template_WMPM2_labels.nii.gz",
                    "template_headregion.nii.gz",
                    "template_leftHemisphere.nii.gz",
                    "template_nac_labels.nii.gz",
                    "template_rightHemisphere.nii.gz",
                    "template_ventricles.nii.gz", "theta.nii.gz"
                ]
                for ff in warp_atlas_file_list:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir,
                                     "WarpedAtlas2Subject", ff))

            if 'jointfusion_2015_wholebrain' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "JointFusion_HDAtlas20_2015_lobar_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "lobeVolumes_JSON.json"))

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'],
                                              'spatialImages', 'rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
            else:
                atlasDirectory = os.path.join(master_config['previousresult'],
                                              subject, 'Atlas',
                                              'AVG_rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
                sentinal_file_list.append(
                    os.path.join(master_config['previousresult'], subject,
                                 'Atlas', 'AVG_template_headregion.nii.gz'))

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print(
                    "MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(
                master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir,
                                 "CleanedDenoisedRFSegmentations",
                                 "allLabels_seg.nii.gz"))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print(
                    "PROCESSING INCOMPLETE: at least 1 required file does not exists"
                )
                if dryRun == False:
                    workflow = _create_singleSession(
                        _dict, master_config, 'Linear',
                        'singleSession_{0}_{1}'.format(_dict['subject'],
                                                       _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow,
                                 plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
Esempio n. 9
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    start_time, subject, master_config = args
    assert 'baseline' in master_config[
        'components'] or 'longitudinal' in master_config[
            'components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution',
                      'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName

    while time.time() < start_time:
        time.sleep(start_time - time.time() + 1)
        print "Delaying start for {subject}".format(subject=subject)
    print("===================== SUBJECT: {0} ===========================".
          format(subject))

    subjectWorkflow = pe.Workflow(
        name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')

    sessionWorkflow = dict()
    inputsSpec = dict()
    # To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), instantiate database here
    database = OpenSubjectDatabase(master_config['cachedir'], [subject],
                                   master_config['prefix'],
                                   master_config['dbfile'])
    # print database.getAllSessions()
    database.open_connection()

    sessions = database.getSessionsFromSubject(subject)
    # print "These are the sessions: ", sessions
    if 'baseline' in master_config['components']:
        current_phase = 'baseline'
        from baseline import baseline_workflow as create_wkfl
    elif 'longitudinal' in master_config['components']:
        current_phase = 'longitudinal'
        from longitudinal import create_longitudinal as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_{1}".format(
            session,
            current_phase)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, current_phase)
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(
            name='inputspec_{0}'.format(session),
            interface=IdentityInterface(
                fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(
            session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(
            session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(
            session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(
            session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(
            session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project,
                                               subject,
                                               session,
                                               master_config,
                                               interpMode='Linear',
                                               pipeline_name=pname)

        subjectWorkflow.connect([
            (inputsSpec[session], sessionWorkflow[session], [
                ('T1s', 'inputspec.T1s'),
                ('T2s', 'inputspec.T2s'),
                ('PDs', 'inputspec.PDs'),
                ('FLs', 'inputspec.FLs'),
                ('OTs', 'inputspec.OTHERs'),
            ]),
            (atlasNode, sessionWorkflow[session],
             [('template_landmarks_50Lmks_fcsv',
               'inputspec.atlasLandmarkFilename'),
              ('template_weights_50Lmks_wts', 'inputspec.atlasWeightFilename'),
              ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
              ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
        ])
        if current_phase == 'baseline':
            subjectWorkflow.connect([
                (atlasNode, sessionWorkflow[session],
                 [('template_t1', 'inputspec.template_t1'),
                  ('ExtendedAtlasDefinition_xml', 'inputspec.atlasDefinition')
                  ]),
            ])
        else:
            assert current_phase == 'longitudinal', "Phase value is unknown: {0}".format(
                current_phase)

    from utils import run_workflow, print_workflow
    if False:
        print_workflow(subjectWorkflow,
                       plugin=master_config['execution']['plugin'],
                       dotfilename='subjectWorkflow')
    return run_workflow(subjectWorkflow,
                        plugin=master_config['execution']['plugin'],
                        plugin_args=master_config['plugin_args'])
Esempio n. 10
0
def createAndRun(sessions, environment, experiment, pipeline, cluster):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict
    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(missing) == 0, "Requested sessions are missing from the database: {0}".format(missing)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        for session in sessions:
            _dict = {}
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = database.getSubjFromSession(session)
            _dict['T1s'] = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
            _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
            _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
            _dict['OTs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
            workflow = create_singleSession(_dict, master_config, 'Linear', 'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))
            workflow.run(plugin='SGEGraph', plugin_args=master_config['plugin_args'])
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
Esempio n. 11
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    start_time, subject, master_config = args
    assert 'tissue_classify' in master_config['components'] or 'auxlmk' in master_config['components'] or 'segmentation' in master_config['components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName
    from utils import run_workflow, print_workflow

    # while time.time() < start_time:
        # time.sleep(start_time - time.time() + 1)
        # print "Delaying start for {subject}".format(subject=subject)
    # print("===================== SUBJECT: {0} ===========================".format(subject))

    subjectWorkflow = pe.Workflow(name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG


    sessionWorkflow = dict()
    inputsSpec = dict()
    # To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), instantiate database here
    database = OpenSubjectDatabase(master_config['cachedir'], [subject], master_config['prefix'], master_config['dbfile'])
    # print database.getAllSessions()
    database.open_connection()

    sessions = database.getSessionsFromSubject(subject)
    print "These are the sessions: ", sessions
    # TODO: atlas input csv read
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')
    # atlasNode = GetAtlasNode(master_config['previouscache'], 'BAtlas')
    from singleSession import create_singleSession as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_singleSession".format(session)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, 'singleSession')
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),
                                      interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,
                                               interpMode='Linear', pipeline_name=pname)

        subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),
                                                                                  ('T2s', 'inputspec.T2s'),
                                                                                  ('PDs', 'inputspec.PDs'),
                                                                                  ('FLs', 'inputspec.FLs'),
                                                                                  ('OTs', 'inputspec.OTHERs'),
                                                                                  ]),
                                 (atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',
                                                                         'inputspec.atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'inputspec.atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
                                ])
        if 'segmentation' in master_config['components']:
            from WorkupT1T2BRAINSCut import GenerateWFName
            try:
                bCutInputName = ".".join(['segmentation', GenerateWFName(project, subject, session, 'Segmentation'), 'inputspec'])
            except:
                print project, subject, session
                raise
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session],
                                      [('hncma-atlas', 'segmentation.inputspec.hncma-atlas'),
                                       ('template_t1', 'segmentation.inputspec.template_t1'),
                                       ('template_t1', bCutInputName + '.template_t1'),
                                       ('rho', bCutInputName + '.rho'),
                                       ('phi', bCutInputName + '.phi'),
                                       ('theta', bCutInputName + '.theta'),
                                       ('l_caudate_ProbabilityMap', bCutInputName + '.l_caudate_ProbabilityMap'),
                                       ('r_caudate_ProbabilityMap', bCutInputName + '.r_caudate_ProbabilityMap'),
                                       ('l_hippocampus_ProbabilityMap', bCutInputName + '.l_hippocampus_ProbabilityMap'),
                                       ('r_hippocampus_ProbabilityMap', bCutInputName + '.r_hippocampus_ProbabilityMap'),
                                       ('l_putamen_ProbabilityMap', bCutInputName + '.l_putamen_ProbabilityMap'),
                                       ('r_putamen_ProbabilityMap', bCutInputName + '.r_putamen_ProbabilityMap'),
                                       ('l_thalamus_ProbabilityMap', bCutInputName + '.l_thalamus_ProbabilityMap'),
                                       ('r_thalamus_ProbabilityMap', bCutInputName + '.r_thalamus_ProbabilityMap'),
                                       ('l_accumben_ProbabilityMap', bCutInputName + '.l_accumben_ProbabilityMap'),
                                       ('r_accumben_ProbabilityMap', bCutInputName + '.r_accumben_ProbabilityMap'),
                                       ('l_globus_ProbabilityMap', bCutInputName + '.l_globus_ProbabilityMap'),
                                       ('r_globus_ProbabilityMap', bCutInputName + '.r_globus_ProbabilityMap'),
                                       ('trainModelFile_txtD0060NT0060_gz',
                                        bCutInputName + '.trainModelFile_txtD0060NT0060_gz')])])
        if True:  # FIXME: current_phase == 'baseline':
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),
                                                                            ('ExtendedAtlasDefinition_xml',
                                                                             'inputspec.atlasDefinition')]),
                                 ])
        else:
            template_DG = pe.Node(interface=nio.DataGrabber(infields=['subject'],
                                  outfields=['template_t1', 'outAtlasFullPath']),
                                  name='Template_DG')
            template_DG.inputs.base_directory = master_config['previousresult']
            template_DG.inputs.subject = subject
            template_DG.inputs.template = 'SUBJECT_TEMPLATES/%s/AVG_%s.nii.gz'
            template_DG.inputs.template_args['template_t1'] = [['subject', 'T1']]
            template_DG.inputs.field_template = {'outAtlasFullPath': 'Atlas/definitions/AtlasDefinition_%s.xml'}
            template_DG.inputs.template_args['outAtlasFullPath'] = [['subject']]
            template_DG.inputs.sort_filelist = True
            template_DG.inputs.raise_on_empty = True

            baw201.connect([(template_DG, sessionWorkflow[session], [('outAtlasFullPath', 'inputspec.atlasDefinition'),
                                                                     ('template_t1', 'inputspec.template_t1')]),
                           ])
        # HACK: only run first subject
        break
        # END HACK
        if not True:
            return print_workflow(subjectWorkflow,
                                  plugin=master_config['execution']['plugin'], dotfilename='subjectWorkflow') #, graph2use='flat')
    try:
        return subjectWorkflow.run(plugin='SGEGraph', plugin_args=master_config['plugin_args'])
    except:
        return 1
Esempio n. 12
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    start_time, subject, master_config = args
    assert 'baseline' in master_config['components'] or 'longitudinal' in master_config['components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName

    while time.time() < start_time:
        time.sleep(start_time - time.time() + 1)
        print "Delaying start for {subject}".format(subject=subject)
    print("===================== SUBJECT: {0} ===========================".format(subject))

    subjectWorkflow = pe.Workflow(name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')

    sessionWorkflow = dict()
    inputsSpec = dict()
    # To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), instantiate database here
    database = OpenSubjectDatabase(master_config['cachedir'], [subject], master_config['prefix'], master_config['dbfile'])
    # print database.getAllSessions()
    database.open_connection()

    sessions = database.getSessionsFromSubject(subject)
    # print "These are the sessions: ", sessions
    if 'baseline' in master_config['components']:
        current_phase = 'baseline'
        from baseline import baseline_workflow as create_wkfl
    elif 'longitudinal' in master_config['components']:
        current_phase = 'longitudinal'
        from longitudinal import create_longitudinal as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_{1}".format(session, current_phase)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, current_phase)
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),
                                      interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,
                                               interpMode='Linear', pipeline_name=pname)

        subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),
                                                                                  ('T2s', 'inputspec.T2s'),
                                                                                  ('PDs', 'inputspec.PDs'),
                                                                                  ('FLs', 'inputspec.FLs'),
                                                                                  ('OTs', 'inputspec.OTHERs'),
                                                                                  ]),
                                 (atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',
                                                                         'inputspec.atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'inputspec.atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
                                ])
        if current_phase == 'baseline':
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),
                                                                            ('ExtendedAtlasDefinition_xml',
                                                                             'inputspec.atlasDefinition')]),
                                 ])
        else:
            assert current_phase == 'longitudinal', "Phase value is unknown: {0}".format(current_phase)

    from utils import run_workflow, print_workflow
    if False:
        print_workflow(subjectWorkflow, plugin=master_config['execution']['plugin'], dotfilename='subjectWorkflow')
    return run_workflow(subjectWorkflow, plugin=master_config['execution']['plugin'], plugin_args=master_config['plugin_args'])
Esempio n. 13
0
def createAndRun(sessions, environment, experiment, pipeline, cluster, useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict
    from collections import OrderedDict
    import sys

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(missing) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(missing,
                                                                                                            all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = OrderedDict()
            t1_list = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
            if len(t1_list) == 0:
                print("ERROR: Skipping session {0} for subject {1} due to missing T1's".format(session, subject))
                print("REMOVE OR FIX BEFORE CONTINUING")
                continue
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = t1_list
            _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
            _dict['BadT2'] = False
            if _dict['T2s'] == database.getFilenamesByScantype(session, ['T2-15']):
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print(_dict['T2s'])
                _dict['BadT2'] = True
            _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
            _dict['EMSP'] = database.getFilenamesByScantype(session, ['EMSP'])
            _dict['OTHERs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(
                master_config['resultdir'],
                _dict['project'],
                _dict['subject'],
                _dict['session']
            )

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "ACPCAlign",
                    "landmarkInitializer_atlas_to_subject_transform.h5"
                ))

            if 'tissue_classify' in master_config['components']:
                for tc_file in ["complete_brainlabels_seg.nii.gz", "t1_average_BRAINSABC.nii.gz"]:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "TissueClassify",
                        tc_file
                    ))

            if 'warp_atlas_to_subject' in master_config['components']:
                warp_atlas_file_list = [
                    "hncma_atlas.nii.gz",
                    "l_accumben_ProbabilityMap.nii.gz",
                    "l_caudate_ProbabilityMap.nii.gz",
                    "l_globus_ProbabilityMap.nii.gz",
                    "l_hippocampus_ProbabilityMap.nii.gz",
                    "l_putamen_ProbabilityMap.nii.gz",
                    "l_thalamus_ProbabilityMap.nii.gz",
                    "left_hemisphere_wm.nii.gz",
                    "phi.nii.gz",
                    "r_accumben_ProbabilityMap.nii.gz",
                    "r_caudate_ProbabilityMap.nii.gz",
                    "r_globus_ProbabilityMap.nii.gz",
                    "r_hippocampus_ProbabilityMap.nii.gz",
                    "r_putamen_ProbabilityMap.nii.gz",
                    "r_thalamus_ProbabilityMap.nii.gz",
                    "rho.nii.gz",
                    "right_hemisphere_wm.nii.gz",
                    "template_WMPM2_labels.nii.gz",
                    "template_headregion.nii.gz",
                    "template_leftHemisphere.nii.gz",
                    "template_nac_labels.nii.gz",
                    "template_rightHemisphere.nii.gz",
                    "template_ventricles.nii.gz",
                    "theta.nii.gz"
                ]
                for ff in warp_atlas_file_list:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "WarpedAtlas2Subject",
                        ff
                    ))

            if 'jointfusion_2015_wholebrain' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "JointFusion_HDAtlas20_2015_lobar_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "lobeVolumes_JSON.json"
                ))

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'], 'spatialImages', 'rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
            else:
                atlasDirectory = os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
                sentinal_file_list.append(
                    os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_template_headregion.nii.gz'))

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print("MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "CleanedDenoisedRFSegmentations",
                    "allLabels_seg.nii.gz"
                ))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print("PROCESSING INCOMPLETE: at least 1 required file does not exists")
                if dryRun == False:
                    workflow = _create_singleSession(_dict, master_config, 'Linear',
                                                     'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow, plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass