示例#1
0
def writeCVSubsetFile( environment, experiment, pipeline, cluster, csv_file, test_size, hasHeader):

    from utilities.misc import add_dict
    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)

    """
    read in csv file
    """
    import csv
    csv_data=[]
    with open(csv_file, mode='r') as infile:
          reader = csv.DictReader(infile, skipinitialspace=True)
          for row in reader:
            csv_data.append(row)
    print csv_data

    totalSampleSize = len(csv_data)
    print totalSampleSize
    cv_subsets = subsample_crossValidationSet( totalSampleSize, test_size)

    """
    global variable
    """
    BASE_DATA_GRABBER_DIR='/Shared/johnsonhj/HDNI/Neuromorphometrics/20141116_Neuromorphometrics_base_Results/Neuromorphometrics/2012Subscription'
    #master_config = {'queue':'HJ',
    #    'long_q':'HJ'}

    """
    workflow
    """
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from WorkupT1T2MALF import CreateMALFWorkflow
    CV_MALF_WF = pe.Workflow(name="CV_MALF")
    CV_MALF_WF.base_dir = master_config['cachedir']


    subset_no = 1
    for subset in cv_subsets:
        print "-"*80
        print " Creat a subset workflow Set " + str(subset_no)
        print "-"*80
        trainData = [ csv_data[i] for i in subset['train'] ]
        testData = [ csv_data[i] for i in subset['test'] ]

        print [ (trainData[i])['id'] for i in range( len(trainData))]
示例#2
0
def run(argv, environment, experiment, pipeline, cluster):
    from utilities.configFileParser import nipype_options
    from utilities.misc import add_dict
    from utilities.distributed import create_global_sge_script
    print "Getting subjects from database..."
    subjects = get_subjects(argv, experiment['cachedir'], environment['prefix'], experiment['dbfile']) # Build database before parallel section
    if environment['cluster']:
        print "Creating SGE template string..."
        node_template = create_global_sge_script(cluster, environment)
    else:
        node_template = None
    print "Copying Atlas directory and determining appropriate Nipype options..."
    pipeline = nipype_options(argv, pipeline, cluster, node_template, experiment)  # Generate Nipype options
    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    print "Dispatching jobs to the system..."
    return dispatcher(master_config, subjects)
示例#3
0
def run(argv, environment, experiment, pipeline, cluster):
    from utilities.configFileParser import nipype_options
    from utilities.misc import add_dict
    from utilities.distributed import create_global_sge_script
    print "Getting subjects from database..."
    subjects = get_subjects(
        argv, experiment['cachedir'], environment['prefix'],
        experiment['dbfile'])  # Build database before parallel section
    if environment['cluster']:
        print "Creating SGE template string..."
        node_template = create_global_sge_script(cluster, environment)
    else:
        node_template = None
    print "Copying Atlas directory and determining appropriate Nipype options..."
    pipeline = nipype_options(argv, pipeline, cluster, node_template,
                              experiment)  # Generate Nipype options
    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    print "Dispatching jobs to the system..."
    return dispatcher(master_config, subjects)
示例#4
0
def createAndRun(sessions, environment, experiment, pipeline, cluster):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict
    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(missing) == 0, "Requested sessions are missing from the database: {0}".format(missing)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        for session in sessions:
            _dict = {}
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = database.getSubjFromSession(session)
            _dict['T1s'] = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
            _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
            _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
            _dict['OTs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
            workflow = create_singleSession(_dict, master_config, 'Linear', 'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))
            workflow.run(plugin='SGEGraph', plugin_args=master_config['plugin_args'])
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
示例#5
0
def createAndRun(sessions, environment, experiment, pipeline, cluster, useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(missing) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(missing,all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = {}
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
            _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
            _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
            _dict['OTs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(
                master_config['resultdir'],
                _dict['project'],
                _dict['subject'],
                _dict['session']
            )

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "ACPCAlign",
                    "landmarkInitializer_atlas_to_subject_transform.h5"
                ))

            if 'tissue_classify' in master_config['components']:
                for tc_file in ["complete_brainlabels_seg.nii.gz", "t1_average_BRAINSABC.nii.gz"]:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "TissueClassify",
                        tc_file
                    ))

            if 'warp_atlas_to_subject' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "WarpedAtlas2Subject",
                    "rho.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "WarpedAtlas2Subject",
                    "left_hemisphere_wm.nii.gz"
                ))

            if 'malf_2012_neuro' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_fs_standard_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_lobar_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "MALF_HDAtlas20_2015_CSFVBInjected_label.nii.gz"
                ))
            if 'malf_2015_wholebrain' in master_config['components']:
                pass

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'], 'spatialImages', 'rho.nii.gz')
            else:
                atlasDirectory = os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_rho.nii.gz')

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print("MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "CleanedDenoisedRFSegmentations",
                    "allLabels_seg.nii.gz"
                ))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print("PROCESSING INCOMPLETE: at least 1 required file does not exists")
                if dryRun == False:
                    workflow = _create_singleSession(_dict, master_config, 'Linear',
                                                     'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow, plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
示例#6
0
def write_cvsubset_file(
    environment, experiment, pipeline, cluster, csv_file, test_size, hasHeader
):
    """
    This function...

    :param environment:
    :param experiment:
    :param pipeline:
    :param cluster:
    :param csv_file:
    :param test_size:
    :param hasHeader:
    :return:
    """
    from utilities.misc import add_dict
    from collections import (
        OrderedDict,
    )  # Need OrderedDict internally to ensure consistent ordering

    master_config = OrderedDict()
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)

    """
    read in csv file
    """
    import csv

    csv_data = []
    with open(csv_file, mode="r") as infile:
        reader = csv.DictReader(infile, skipinitialspace=True)
        for row in reader:
            csv_data.append(row)
    print(csv_data)

    totalSampleSize = len(csv_data)
    print(totalSampleSize)
    cv_subsets = sample_crossvalidation_set(totalSampleSize, test_size)

    """
    global variable
    """
    ## HACK FOR NOW SHOULD BE MORE ELEGANT FROM THE .config file
    BASE_DATA_GRABBER_DIR = (
        "/Shared/johnsonhj/HDNI/ReferenceData/Neuromorphometrics/2012Subscription"
    )
    # master_config = {'queue':'HJ',
    #    'long_q':'HJ'}

    """
    workflow
    """
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from .WorkupJointFusion import create_joint_fusion_workflow

    CV_JointFusion_WF = pe.Workflow(name="CV_JointFusion")
    CV_JointFusion_WF.base_dir = master_config["cachedir"]

    subset_no = 1
    for subset in cv_subsets:
        print(("-" * 80))
        print((" Creat a subset workflow Set " + str(subset_no)))
        print(("-" * 80))
        trainData = [csv_data[i] for i in subset["train"]]
        testData = [csv_data[i] for i in subset["test"]]

        print([(trainData[i])["id"] for i in range(len(trainData))])

        for testSession in testData:
            JointFusionWFName = "JointFusion_Set{0}_{1}".format(
                subset_no, testSession["id"]
            )
            myJointFusion = create_joint_fusion_workflow(
                JointFusionWFName,
                master_config,
                [(trainData[i])["id"] for i in range(len(trainData))],
                BASE_DATA_GRABBER_DIR,
                runFixFusionLabelMap=False,
            )

            testSessionName = "testSessionSpec_Set{0}_{1}".format(
                subset_no, testSession["id"]
            )
            testSessionSpec = pe.Node(
                interface=IdentityInterface(
                    fields=[
                        "t1_average",
                        "tissueLabel",
                        "template_leftHemisphere",
                        "landmarkInACPCAlignedSpace",
                        "template_weights_50Lmks_wts",
                        "labelFilename",
                    ]
                ),
                run_without_submitting=True,
                name=testSessionName,
            )

            CV_JointFusion_WF.connect(
                testSessionSpec, "t1_average", myJointFusion, "inputspec.subj_t1_image"
            )
            CV_JointFusion_WF.connect(
                testSessionSpec,
                "tissueLabel",
                myJointFusion,
                "inputspec.subj_fixed_head_labels",
            )

            CV_JointFusion_WF.connect(
                testSessionSpec,
                "template_leftHemisphere",
                myJointFusion,
                "inputspec.subj_left_hemisphere",
            )
            CV_JointFusion_WF.connect(
                testSessionSpec,
                "landmarkInACPCAlignedSpace",
                myJointFusion,
                "inputspec.subj_lmks",
            )
            CV_JointFusion_WF.connect(
                testSessionSpec,
                "template_weights_50Lmks_wts",
                myJointFusion,
                "inputspec.atlasWeightFilename",
            )
            CV_JointFusion_WF.connect(
                testSessionSpec,
                "labelFilename",
                myJointFusion,
                "inputspec.labelBaseFilename",
            )

            """ set test image information
            """
            print(testSession)
            testSessionSpec.inputs.t1_average = testSession["t1"]
            testSessionSpec.inputs.tissueLabel = testSession["fixed_head_label"]
            testSessionSpec.inputs.template_leftHemisphere = testSession[
                "warpedAtlasLeftHemisphere"
            ]
            testSessionSpec.inputs.landmarkInACPCAlignedSpace = testSession["lmk"]
            testSessionSpec.inputs.template_weights_50Lmks_wts = "/Shared/sinapse/scratch/eunyokim/src/NamicExternal/build_Mac_201501/bin/Atlas/Atlas_20131115/20141004_BCD/template_landmarks_50Lmks.fcsv"
            testSessionSpec.inputs.labelFilename = "FS_wmparc.nii.gz"

            """
            DataSink
            """
            dsName = "DataSink_DS_Set{0}_{1}".format(subset_no, testSession["id"])
            DataSink = pe.Node(name=dsName, interface=nio.DataSink())
            DataSink.overwrite = master_config["ds_overwrite"]
            DataSink.inputs.container = "CV_Set{0}/{1}".format(
                subset_no, testSession["id"]
            )
            DataSink.inputs.base_directory = master_config["resultdir"]

            CV_JointFusion_WF.connect(
                myJointFusion,
                "outputspec.JointFusion_neuro2012_labelmap",
                DataSink,
                "Segmentation.@JointFusion_neuro2012_labelmap",
            )

            subset_no = subset_no + 1

    # CV_JointFusion_WF.write_graph()
    CV_JointFusion_WF.run(
        plugin=master_config["plugin_name"], plugin_args=master_config["plugin_args"]
    )
示例#7
0
def createAndRun(sessions, environment, experiment, pipeline, cluster,
                 useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'],
                                   environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(
                missing
            ) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(
                missing, all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = {}
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = database.getFilenamesByScantype(
                session, ['T1-15', 'T1-30'])
            _dict['T2s'] = database.getFilenamesByScantype(
                session, ['T2-15', 'T2-30'])
            _dict['PDs'] = database.getFilenamesByScantype(
                session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(
                session, ['FL-15', 'FL-30'])
            _dict['OTs'] = database.getFilenamesByScantype(
                session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(master_config['resultdir'],
                                                 _dict['project'],
                                                 _dict['subject'],
                                                 _dict['session'])

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "ACPCAlign",
                        "landmarkInitializer_atlas_to_subject_transform.h5"))

            if 'tissue_classify' in master_config['components']:
                for tc_file in [
                        "complete_brainlabels_seg.nii.gz",
                        "t1_average_BRAINSABC.nii.gz"
                ]:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir, "TissueClassify",
                                     tc_file))

            if 'warp_atlas_to_subject' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "WarpedAtlas2Subject",
                                 "rho.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "WarpedAtlas2Subject",
                                 "left_hemisphere_wm.nii.gz"))

            if 'malf_2012_neuro' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "MALF_HDAtlas20_2015_fs_standard_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "MALF_HDAtlas20_2015_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "MALF_HDAtlas20_2015_lobar_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "MALF_HDAtlas20_2015_CSFVBInjected_label.nii.gz"))
            if 'malf_2015_wholebrain' in master_config['components']:
                pass

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'],
                                              'spatialImages', 'rho.nii.gz')
            else:
                atlasDirectory = os.path.join(master_config['previousresult'],
                                              subject, 'Atlas',
                                              'AVG_rho.nii.gz')

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print(
                    "MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(
                master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir,
                                 "CleanedDenoisedRFSegmentations",
                                 "allLabels_seg.nii.gz"))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print(
                    "PROCESSING INCOMPLETE: at least 1 required file does not exists"
                )
                if dryRun == False:
                    workflow = _create_singleSession(
                        _dict, master_config, 'Linear',
                        'singleSession_{0}_{1}'.format(_dict['subject'],
                                                       _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow,
                                 plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
示例#8
0
def createAndRun(sessions, environment, experiment, pipeline, cluster,
                 useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict
    from collections import OrderedDict
    import sys

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'],
                                   environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(
                missing
            ) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(
                missing, all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = OrderedDict()
            t1_list = database.getFilenamesByScantype(session,
                                                      ['T1-15', 'T1-30'])
            if len(t1_list) == 0:
                print(
                    "ERROR: Skipping session {0} for subject {1} due to missing T1's"
                    .format(session, subject))
                print("REMOVE OR FIX BEFORE CONTINUING")
                continue
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = t1_list
            _dict['T2s'] = database.getFilenamesByScantype(
                session, ['T2-15', 'T2-30'])
            _dict['BadT2'] = False
            if _dict['T2s'] == database.getFilenamesByScantype(
                    session, ['T2-15']):
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print(_dict['T2s'])
                _dict['BadT2'] = True
            _dict['PDs'] = database.getFilenamesByScantype(
                session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(
                session, ['FL-15', 'FL-30'])
            _dict['EMSP'] = database.getFilenamesByScantype(session, ['EMSP'])
            _dict['OTHERs'] = database.getFilenamesByScantype(
                session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(master_config['resultdir'],
                                                 _dict['project'],
                                                 _dict['subject'],
                                                 _dict['session'])

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "ACPCAlign",
                        "landmarkInitializer_atlas_to_subject_transform.h5"))

            if 'tissue_classify' in master_config['components']:
                for tc_file in [
                        "complete_brainlabels_seg.nii.gz",
                        "t1_average_BRAINSABC.nii.gz"
                ]:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir, "TissueClassify",
                                     tc_file))

            if 'warp_atlas_to_subject' in master_config['components']:
                warp_atlas_file_list = [
                    "hncma_atlas.nii.gz", "l_accumben_ProbabilityMap.nii.gz",
                    "l_caudate_ProbabilityMap.nii.gz",
                    "l_globus_ProbabilityMap.nii.gz",
                    "l_hippocampus_ProbabilityMap.nii.gz",
                    "l_putamen_ProbabilityMap.nii.gz",
                    "l_thalamus_ProbabilityMap.nii.gz",
                    "left_hemisphere_wm.nii.gz", "phi.nii.gz",
                    "r_accumben_ProbabilityMap.nii.gz",
                    "r_caudate_ProbabilityMap.nii.gz",
                    "r_globus_ProbabilityMap.nii.gz",
                    "r_hippocampus_ProbabilityMap.nii.gz",
                    "r_putamen_ProbabilityMap.nii.gz",
                    "r_thalamus_ProbabilityMap.nii.gz", "rho.nii.gz",
                    "right_hemisphere_wm.nii.gz",
                    "template_WMPM2_labels.nii.gz",
                    "template_headregion.nii.gz",
                    "template_leftHemisphere.nii.gz",
                    "template_nac_labels.nii.gz",
                    "template_rightHemisphere.nii.gz",
                    "template_ventricles.nii.gz", "theta.nii.gz"
                ]
                for ff in warp_atlas_file_list:
                    sentinal_file_list.append(
                        os.path.join(sentinal_file_basedir,
                                     "WarpedAtlas2Subject", ff))

            if 'jointfusion_2015_wholebrain' in master_config['components']:
                sentinal_file_list.append(
                    os.path.join(
                        sentinal_file_basedir, "TissueClassify",
                        "JointFusion_HDAtlas20_2015_lobar_label.nii.gz"))
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir, "TissueClassify",
                                 "lobeVolumes_JSON.json"))

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'],
                                              'spatialImages', 'rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
            else:
                atlasDirectory = os.path.join(master_config['previousresult'],
                                              subject, 'Atlas',
                                              'AVG_rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
                sentinal_file_list.append(
                    os.path.join(master_config['previousresult'], subject,
                                 'Atlas', 'AVG_template_headregion.nii.gz'))

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print(
                    "MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(
                master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(
                    os.path.join(sentinal_file_basedir,
                                 "CleanedDenoisedRFSegmentations",
                                 "allLabels_seg.nii.gz"))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print(
                    "PROCESSING INCOMPLETE: at least 1 required file does not exists"
                )
                if dryRun == False:
                    workflow = _create_singleSession(
                        _dict, master_config, 'Linear',
                        'singleSession_{0}_{1}'.format(_dict['subject'],
                                                       _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow,
                                 plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass
示例#9
0
def writeCVSubsetFile(environment, experiment, pipeline, cluster, csv_file,
                      test_size, hasHeader):

    from utilities.misc import add_dict
    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    """
    read in csv file
    """
    import csv
    csv_data = []
    with open(csv_file, mode='r') as infile:
        reader = csv.DictReader(infile, skipinitialspace=True)
        for row in reader:
            csv_data.append(row)
    print(csv_data)

    totalSampleSize = len(csv_data)
    print(totalSampleSize)
    cv_subsets = subsample_crossValidationSet(totalSampleSize, test_size)
    """
    global variable
    """
    ## HACK FOR NOW SHOULD BE MORE ELEGANT FROM THE .config file
    BASE_DATA_GRABBER_DIR = '/Shared/johnsonhj/HDNI/ReferenceData/Neuromorphometrics/2012Subscription'
    #master_config = {'queue':'HJ',
    #    'long_q':'HJ'}
    """
    workflow
    """
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from .WorkupJointFusion import CreateJointFusionWorkflow
    CV_JointFusion_WF = pe.Workflow(name="CV_JointFusion")
    CV_JointFusion_WF.base_dir = master_config['cachedir']

    subset_no = 1
    for subset in cv_subsets:
        print("-" * 80)
        print(" Creat a subset workflow Set " + str(subset_no))
        print("-" * 80)
        trainData = [csv_data[i] for i in subset['train']]
        testData = [csv_data[i] for i in subset['test']]

        print([(trainData[i])['id'] for i in range(len(trainData))])

        for testSession in testData:
            JointFusionWFName = "JointFusion_Set{0}_{1}".format(
                subset_no, testSession['id'])
            myJointFusion = CreateJointFusionWorkflow(
                JointFusionWFName,
                master_config,
                [(trainData[i])['id'] for i in range(len(trainData))],
                BASE_DATA_GRABBER_DIR,
                runFixFusionLabelMap=False)

            testSessionName = "testSessionSpec_Set{0}_{1}".format(
                subset_no, testSession['id'])
            testSessionSpec = pe.Node(interface=IdentityInterface(fields=[
                't1_average', 'tissueLabel', 'template_leftHemisphere',
                'landmarkInACPCAlignedSpace', 'template_weights_50Lmks_wts',
                'labelFilename'
            ]),
                                      run_without_submitting=True,
                                      name=testSessionName)

            CV_JointFusion_WF.connect(testSessionSpec, 't1_average',
                                      myJointFusion, 'inputspec.subj_t1_image')
            CV_JointFusion_WF.connect(testSessionSpec, 'tissueLabel',
                                      myJointFusion,
                                      'inputspec.subj_fixed_head_labels')

            CV_JointFusion_WF.connect(testSessionSpec,
                                      'template_leftHemisphere', myJointFusion,
                                      'inputspec.subj_left_hemisphere')
            CV_JointFusion_WF.connect(testSessionSpec,
                                      'landmarkInACPCAlignedSpace',
                                      myJointFusion, 'inputspec.subj_lmks')
            CV_JointFusion_WF.connect(testSessionSpec,
                                      'template_weights_50Lmks_wts',
                                      myJointFusion,
                                      'inputspec.atlasWeightFilename')
            CV_JointFusion_WF.connect(testSessionSpec, 'labelFilename',
                                      myJointFusion,
                                      'inputspec.labelBaseFilename')
            """ set test image information
            """
            print(testSession)
            testSessionSpec.inputs.t1_average = testSession['t1']
            testSessionSpec.inputs.tissueLabel = testSession[
                'fixed_head_label']
            testSessionSpec.inputs.template_leftHemisphere = testSession[
                'warpedAtlasLeftHemisphere']
            testSessionSpec.inputs.landmarkInACPCAlignedSpace = testSession[
                'lmk']
            testSessionSpec.inputs.template_weights_50Lmks_wts = "/Shared/sinapse/scratch/eunyokim/src/NamicExternal/build_Mac_201501/bin/Atlas/Atlas_20131115/20141004_BCD/template_landmarks_50Lmks.fcsv"
            testSessionSpec.inputs.labelFilename = 'FS_wmparc.nii.gz'
            """
            DataSink
            """
            dsName = "DataSink_DS_Set{0}_{1}".format(subset_no,
                                                     testSession['id'])
            DataSink = pe.Node(name=dsName, interface=nio.DataSink())
            DataSink.overwrite = master_config['ds_overwrite']
            DataSink.inputs.container = 'CV_Set{0}/{1}'.format(
                subset_no, testSession['id'])
            DataSink.inputs.base_directory = master_config['resultdir']

            CV_JointFusion_WF.connect(
                myJointFusion, 'outputspec.JointFusion_neuro2012_labelmap',
                DataSink, 'Segmentation.@JointFusion_neuro2012_labelmap')

            subset_no = subset_no + 1

    #CV_JointFusion_WF.write_graph()
    CV_JointFusion_WF.run(plugin=master_config['plugin_name'],
                          plugin_args=master_config['plugin_args'])
示例#10
0
def writeCVSubsetFile( environment, experiment, pipeline, cluster, csv_file, test_size, hasHeader):

    from utilities.misc import add_dict
    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)

    """
    read in csv file
    """
    import csv
    csv_data=[]
    with open(csv_file, mode='r') as infile:
          reader = csv.DictReader(infile, skipinitialspace=True)
          for row in reader:
            csv_data.append(row)
    print(csv_data)

    totalSampleSize = len(csv_data)
    print(totalSampleSize)
    cv_subsets = subsample_crossValidationSet( totalSampleSize, test_size)

    """
    global variable
    """
    ## HACK FOR NOW SHOULD BE MORE ELEGANT FROM THE .config file
    BASE_DATA_GRABBER_DIR='/Shared/johnsonhj/HDNI/ReferenceData/Neuromorphometrics/2012Subscription'
    #master_config = {'queue':'HJ',
    #    'long_q':'HJ'}

    """
    workflow
    """
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from .WorkupJointFusion import CreateJointFusionWorkflow
    CV_JointFusion_WF = pe.Workflow(name="CV_JointFusion")
    CV_JointFusion_WF.base_dir = master_config['cachedir']


    subset_no = 1
    for subset in cv_subsets:
        print("-"*80)
        print(" Creat a subset workflow Set " + str(subset_no))
        print("-"*80)
        trainData = [ csv_data[i] for i in subset['train'] ]
        testData = [ csv_data[i] for i in subset['test'] ]

        print([ (trainData[i])['id'] for i in range( len(trainData))])

        for testSession in testData:
            JointFusionWFName = "JointFusion_Set{0}_{1}".format(subset_no, testSession['id'])
            myJointFusion = CreateJointFusionWorkflow( JointFusionWFName,
                                         master_config,
                                         [ (trainData[i])['id'] for i in range( len(trainData))],
                                         BASE_DATA_GRABBER_DIR,
                                         runFixFusionLabelMap=False)

            testSessionName= "testSessionSpec_Set{0}_{1}".format(subset_no, testSession['id'])
            testSessionSpec = pe.Node( interface=IdentityInterface( fields=['t1_average',
                                                                         'tissueLabel',
                                                                         'template_leftHemisphere',
                                                                         'landmarkInACPCAlignedSpace',
                                                                         'template_weights_50Lmks_wts',
                                                                         'labelFilename']),
                                    run_without_submitting = True,
                                    name=testSessionName)

            CV_JointFusion_WF.connect(testSessionSpec,'t1_average', myJointFusion,'inputspec.subj_t1_image')
            CV_JointFusion_WF.connect(testSessionSpec,'tissueLabel',myJointFusion,'inputspec.subj_fixed_head_labels')

            CV_JointFusion_WF.connect(testSessionSpec,'template_leftHemisphere', myJointFusion,'inputspec.subj_left_hemisphere')
            CV_JointFusion_WF.connect(testSessionSpec,'landmarkInACPCAlignedSpace', myJointFusion,'inputspec.subj_lmks')
            CV_JointFusion_WF.connect(testSessionSpec,'template_weights_50Lmks_wts', myJointFusion,'inputspec.atlasWeightFilename')
            CV_JointFusion_WF.connect(testSessionSpec, 'labelFilename', myJointFusion, 'inputspec.labelBaseFilename')

            """ set test image information
            """
            print(testSession)
            testSessionSpec.inputs.t1_average = testSession['t1']
            testSessionSpec.inputs.tissueLabel = testSession['fixed_head_label']
            testSessionSpec.inputs.template_leftHemisphere = testSession['warpedAtlasLeftHemisphere']
            testSessionSpec.inputs.landmarkInACPCAlignedSpace = testSession['lmk']
            testSessionSpec.inputs.template_weights_50Lmks_wts = "/Shared/sinapse/scratch/eunyokim/src/NamicExternal/build_Mac_201501/bin/Atlas/Atlas_20131115/20141004_BCD/template_landmarks_50Lmks.fcsv"
            testSessionSpec.inputs.labelFilename='FS_wmparc.nii.gz'

            """
            DataSink
            """
            dsName = "DataSink_DS_Set{0}_{1}".format(subset_no,testSession['id'])
            DataSink = pe.Node(name=dsName, interface=nio.DataSink())
            DataSink.overwrite = master_config['ds_overwrite']
            DataSink.inputs.container = 'CV_Set{0}/{1}'.format(subset_no, testSession['id'])
            DataSink.inputs.base_directory = master_config['resultdir']

            CV_JointFusion_WF.connect(myJointFusion, 'outputspec.JointFusion_neuro2012_labelmap',
                               DataSink, 'Segmentation.@JointFusion_neuro2012_labelmap')

            subset_no=subset_no+1

    #CV_JointFusion_WF.write_graph()
    CV_JointFusion_WF.run( plugin=master_config['plugin_name'],
                    plugin_args=master_config['plugin_args'])
示例#11
0
def createAndRun(sessions, environment, experiment, pipeline, cluster, useSentinal, dryRun):
    from baw_exp import OpenSubjectDatabase
    from utilities.misc import add_dict
    from collections import OrderedDict
    import sys

    from workflows.utils import run_workflow

    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    database = OpenSubjectDatabase(experiment['cachedir'], ['all'], environment['prefix'], experiment['dbfile'])
    database.open_connection()
    try:
        all_sessions = database.getAllSessions()
        if not set(sessions) <= set(all_sessions) and 'all' not in sessions:
            missing = set(sessions) - set(all_sessions)
            assert len(missing) == 0, "Requested sessions are missing from the database: {0}\n\n{1}".format(missing,
                                                                                                            all_sessions)
        elif 'all' in sessions:
            sessions = set(all_sessions)
        else:
            sessions = set(sessions)
        print("!=" * 40)
        print("Doing sessions {0}".format(sessions))
        print("!=" * 40)
        for session in sessions:
            _dict = OrderedDict()
            t1_list = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
            if len(t1_list) == 0:
                print("ERROR: Skipping session {0} for subject {1} due to missing T1's".format(session, subject))
                print("REMOVE OR FIX BEFORE CONTINUING")
                continue
            subject = database.getSubjFromSession(session)
            _dict['session'] = session
            _dict['project'] = database.getProjFromSession(session)
            _dict['subject'] = subject
            _dict['T1s'] = t1_list
            _dict['T2s'] = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
            _dict['BadT2'] = False
            if _dict['T2s'] == database.getFilenamesByScantype(session, ['T2-15']):
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print("This T2 is not going to be used for JointFusion")
                print(_dict['T2s'])
                _dict['BadT2'] = True
            _dict['PDs'] = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
            _dict['FLs'] = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
            _dict['EMSP'] = database.getFilenamesByScantype(session, ['EMSP'])
            _dict['OTHERs'] = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
            sentinal_file_basedir = os.path.join(
                master_config['resultdir'],
                _dict['project'],
                _dict['subject'],
                _dict['session']
            )

            sentinal_file_list = list()
            sentinal_file_list.append(os.path.join(sentinal_file_basedir))
            if 'denoise' in master_config['components']:
                # # NO SENTINAL FILE
                pass

            # # Use t1 average sentinal file if  specified.
            if 'landmark' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "ACPCAlign",
                    "landmarkInitializer_atlas_to_subject_transform.h5"
                ))

            if 'tissue_classify' in master_config['components']:
                for tc_file in ["complete_brainlabels_seg.nii.gz", "t1_average_BRAINSABC.nii.gz"]:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "TissueClassify",
                        tc_file
                    ))

            if 'warp_atlas_to_subject' in master_config['components']:
                warp_atlas_file_list = [
                    "hncma_atlas.nii.gz",
                    "l_accumben_ProbabilityMap.nii.gz",
                    "l_caudate_ProbabilityMap.nii.gz",
                    "l_globus_ProbabilityMap.nii.gz",
                    "l_hippocampus_ProbabilityMap.nii.gz",
                    "l_putamen_ProbabilityMap.nii.gz",
                    "l_thalamus_ProbabilityMap.nii.gz",
                    "left_hemisphere_wm.nii.gz",
                    "phi.nii.gz",
                    "r_accumben_ProbabilityMap.nii.gz",
                    "r_caudate_ProbabilityMap.nii.gz",
                    "r_globus_ProbabilityMap.nii.gz",
                    "r_hippocampus_ProbabilityMap.nii.gz",
                    "r_putamen_ProbabilityMap.nii.gz",
                    "r_thalamus_ProbabilityMap.nii.gz",
                    "rho.nii.gz",
                    "right_hemisphere_wm.nii.gz",
                    "template_WMPM2_labels.nii.gz",
                    "template_headregion.nii.gz",
                    "template_leftHemisphere.nii.gz",
                    "template_nac_labels.nii.gz",
                    "template_rightHemisphere.nii.gz",
                    "template_ventricles.nii.gz",
                    "theta.nii.gz"
                ]
                for ff in warp_atlas_file_list:
                    sentinal_file_list.append(os.path.join(
                        sentinal_file_basedir,
                        "WarpedAtlas2Subject",
                        ff
                    ))

            if 'jointfusion_2015_wholebrain' in master_config['components']:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "JointFusion_HDAtlas20_2015_lobar_label.nii.gz"
                ))
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "TissueClassify",
                    "lobeVolumes_JSON.json"
                ))

            if master_config['workflow_phase'] == 'atlas-based-reference':
                atlasDirectory = os.path.join(master_config['atlascache'], 'spatialImages', 'rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
            else:
                atlasDirectory = os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_rho.nii.gz')
                sentinal_file_list.append(atlasDirectory)
                sentinal_file_list.append(
                    os.path.join(master_config['previousresult'], subject, 'Atlas', 'AVG_template_headregion.nii.gz'))

            if os.path.exists(atlasDirectory):
                print("LOOKING FOR DIRECTORY {0}".format(atlasDirectory))
            else:
                print("MISSING REQUIRED ATLAS INPUT {0}".format(atlasDirectory))
                print("SKIPPING: {0} prerequisites missing".format(session))
                continue

            ## Use different sentinal file if segmentation specified.
            from workflows.baseline import DetermineIfSegmentationShouldBeDone

            do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(master_config)
            if do_BRAINSCut_Segmentation:
                sentinal_file_list.append(os.path.join(
                    sentinal_file_basedir,
                    "CleanedDenoisedRFSegmentations",
                    "allLabels_seg.nii.gz"
                ))

            def allPathsExists(list_of_paths):
                is_missing = False
                for ff in list_of_paths:
                    if not os.path.exists(ff):
                        is_missing = True
                        print("MISSING: {0}".format(ff))
                return not is_missing

            if useSentinal and allPathsExists(sentinal_file_list):
                print("SKIPPING: {0} exists".format(sentinal_file_list))
            else:
                print("PROCESSING INCOMPLETE: at least 1 required file does not exists")
                if dryRun == False:
                    workflow = _create_singleSession(_dict, master_config, 'Linear',
                                                     'singleSession_{0}_{1}'.format(_dict['subject'], _dict['session']))
                    print("Starting session {0}".format(session))
                    # HACK Hard-coded to SGEGraph, but --wfrun is ignored completely
                    run_workflow(workflow, plugin=master_config['plugin_name'],
                                 plugin_args=master_config['plugin_args'])
                else:
                    print("EXITING WITHOUT WORK DUE TO dryRun flag")
    except:
        raise
    finally:
        try:
            database.close_connection()
        except:
            pass