示例#1
0
def gen_parc_masks(rois, roiNums, parcVol, outDir,
                   doVolStats=True, redo=False, logFN=None):
    import os
    import numpy as np
    from scipy.io import savemat as savemat
    from scai_utils import check_bin_path, check_file, \
                           cmd_stdout, saydo, info_log, error_log

    check_bin_path("fslstats", logFN=logFN)

    volStats = {"roiName": [], "nVoxels": [], "mm3": []}
    for (i0, roi) in enumerate(rois):
        roiNum = roiNums[i0]
            
        maskFN = os.path.join(outDir, "%s.diff.nii.gz" % roi)
        binCmd = "mri_binarize --i %s --match %d --o %s" % \
                 (parcVol, roiNum, maskFN)
            
        if not os.path.isfile(maskFN) or redo:
            saydo(binCmd, logFN=logFN)
        check_file(maskFN, logFN=logFN)

        #= Volume stats =#
        (so, se) = cmd_stdout("fslstats %s -V" % maskFN)
        assert(len(se) == 0)
            
        so = so.split(" ")
        assert(len(so) >= 2)
        
        volStats["nVoxels"].append(int(so[0]))
        volStats["mm3"].append(float(so[1]))
        volStats["roiName"].append(roi)
            
    if doVolStats:
        volStats["roiName"] = np.array(volStats["roiName"])
        volStats["nVoxels"] = np.array(volStats["nVoxels"])
        volStats["mm3"] = np.array(volStats["mm3"])
    
        volStatsFN = os.path.join(outDir, "vol_stats.mat")
        savemat(volStatsFN, volStats)
        check_file(volStatsFN, logFN=logFN)

        info_log("INFO: %s: Saved volume stats of the mask files at\n\t%s" \
                 % (gen_parc_masks.__name__, volStatsFN), logFN=logFN)
def check_bedp_complete(bedpDir):
    import os
    from scai_utils import info_log
    
    #print("Checking bedp completion in directory: %s" % bedpDir)

    # Removed: mean_f1samples, mean_S0samples
    
    r = True
    r = r and os.path.isdir(bedpDir)

    for (i0, efn) in enumerate(expectFiles):
        r = r and os.path.isfile(os.path.join(bedpDir, efn))

        if not r:
            info_log("INFO: Cannot find expected file: %s" % efn)
            return r

    return r
示例#3
0
def check_bedp_complete(bedpDir):
    import os
    from scai_utils import info_log

    #print("Checking bedp completion in directory: %s" % bedpDir)

    # Removed: mean_f1samples, mean_S0samples

    r = True
    r = r and os.path.isdir(bedpDir)

    for (i0, efn) in enumerate(expectFiles):
        r = r and os.path.isfile(os.path.join(bedpDir, efn))

        if not r:
            info_log("INFO: Cannot find expected file: %s" % efn)
            return r

    return r
def format_bvals_bvecs(bvalsFN, bvecsFN, logFN=None):
    from scai_utils import cmd_stdout, info_log, error_log
    import numpy as np

    #=== Check the format of bvals and bvecs ===#

    import numpy as np

    (so, se) = cmd_stdout("wc -l %s" % bvecsFN)
    if len(se) > 0 or len(so) == 0:
        error_log("Cannot perform wc on bvecs file: %s" % bvecsFN, logFN=logFN)
    
    ln = int(so.split(" ")[0])
    info_log("ln = %d" % ln, logFN=logFN)
        
    if ln < 3:
        error_log("Unrecognized format in bvecs file: %s" % bvecsFN, \
                  logFN=logFN)
    elif ln == 3:
        #== Convert bvecs file ==#
        bvecs = np.genfromtxt(bvecsFN)
            
        assert(len(bvecs) == ln)
        bvecs = bvecs.T
            
        np.savetxt(bvecsFN, bvecs, fmt="%.15f")
        
        bvecs = np.genfromtxt(bvecsFN)
        lbv = len(bvecs)
        assert(lbv > 3)
        
        info_log("INFO: Swapped rows and columns in bvecs file: %s\n" \
                 % bvecsFN,
                 logFN=logFN)

        #== Convert bvals file ==#
        bvals = np.genfromtxt(bvalsFN).T
        np.savetxt(bvalsFN, bvals, fmt="%.15f")
        info_log("INFO: Swapped rows and columns in bvecs file: %s\nc" \
                 % bvecsFN,
                 logFN=logFN)
示例#5
0
def format_bvals_bvecs(bvalsFN, bvecsFN, logFN=None):
    from scai_utils import cmd_stdout, info_log, error_log
    import numpy as np

    #=== Check the format of bvals and bvecs ===#

    import numpy as np

    (so, se) = cmd_stdout("wc -l %s" % bvecsFN)
    if len(se) > 0 or len(so) == 0:
        error_log("Cannot perform wc on bvecs file: %s" % bvecsFN, logFN=logFN)

    ln = int(so.split(" ")[0])
    info_log("ln = %d" % ln, logFN=logFN)

    if ln < 3:
        error_log("Unrecognized format in bvecs file: %s" % bvecsFN, \
                  logFN=logFN)
    elif ln == 3:
        #== Convert bvecs file ==#
        bvecs = np.genfromtxt(bvecsFN)

        assert (len(bvecs) == ln)
        bvecs = bvecs.T

        np.savetxt(bvecsFN, bvecs, fmt="%.15f")

        bvecs = np.genfromtxt(bvecsFN)
        lbv = len(bvecs)
        assert (lbv > 3)

        info_log("INFO: Swapped rows and columns in bvecs file: %s\n" \
                 % bvecsFN,
                 logFN=logFN)

        #== Convert bvals file ==#
        bvals = np.genfromtxt(bvalsFN).T
        np.savetxt(bvalsFN, bvals, fmt="%.15f")
        info_log("INFO: Swapped rows and columns in bvecs file: %s\nc" \
                 % bvecsFN,
                 logFN=logFN)
                    help="Verbose mode")

    if len(sys.argv) == 1:
        ap.print_help()
        sys.exit(0)

    args = ap.parse_args()

    studyIDs = args.studyIDs.split(",")
    subjIDs = args.subjIDs.split(",")

    if len(studyIDs) != len(subjIDs):
        raise Exception, "Unequal number of entries in studyIDs and subjIDs"

    if args.bVerbose:
        info_log("# of subjects queried = %d" % len(subjIDs))

    #=== Establish SQL server connection ===#
    sqlSettingsFN = os.path.join(os.getenv("HOME"), SQL_SETTINGS_FN)
    check_file(sqlSettingsFN)
    sf = open(sqlSettingsFN, "rt")
    settings = sf.read().replace("\r", "\n").split("\n")
    sf.close()

    
    sqlSettings = {"SQL_SERVER": settings[0],
                   "DATABASE_NAME": settings[1], 
                   "SQL_USER": settings[2], 
                   "pw": settings[3]}
        
    masterCodes = get_subject_master_code(sqlSettings, studyIDs, subjIDs, 
WM_DEPTHS = [1, 2, 3]

# List of tensor measures to be extracted in steps including roi_tensor
# Need to match dtifit_*.nii.gz file names in dmri directory (generated by dtifit)
TENSOR_MEASURES = ["FA", "MD"]

SUBJECT_MASTER_CODE_FILE = "/speechlab/2/jtour/SID/Master_Code.xls"

FNIRT_TEMPLATE = "/speechlab/software/fsl64/data/standard/MNI152_T1_1mm_brain.nii.gz"
FNIRT_CNF = "/speechlab/5/scai/RHY/rhythm-fmri/fmri_code/MNI152_T1_1mm_brain.cnf"

SUBCORT_LABEL_LIST = "ASAP_subcortical_labels.txt"

SUBCORT_TRACT_SEEDS = ["Left-Thalamus-Proper", "Left-Caudate",
                       "Left-Putamen", "Left-Pallidum",
                       "Right-Thalamus-Proper", "Right-Caudate",
                       "Right-Putamen", "Right-Pallidum"]

if __name__ == "__main__":
    from scipy.io import savemat
    from scai_utils import check_file, info_log

    matFN = __file__.replace(".py", ".mat")

    analysisSettings = {"DWI_ANALYSIS_DIR": DWI_ANALYSIS_DIR,
                        "BASE_TRACULA_CFG": BASE_TRACULA_CFG,
                        "SUBJECT_MASTER_CODE_FILE": SUBJECT_MASTER_CODE_FILE}
    savemat(matFN, analysisSettings)
    check_file(matFN)
    info_log("Saved analysis settings to mat file: %s" % matFN)
示例#8
0
def get_parc_stats(fsDir, subjID, parcName, bVerbose=False):
    import sys, os
    import tempfile
    from scai_utils import (
        check_dir,
        check_file,
        check_bin_path,
        info_log,
        cmd_stdout,
        saydo,
        read_text_file,
        remove_empty_strings,
    )

    # === Constants ===#
    hemis = ["lh", "rh"]
    segStatsBin = "mri_segstats"

    # === Implementation ===#
    check_bin_path(segStatsBin)

    sDir = os.path.join(fsDir, subjID)
    check_dir(sDir)

    lblDir = os.path.join(sDir, "label")
    check_dir(sDir)

    if bVerbose:
        info_log("Label directory = %s" % lblDir)

    morphInfo = {}

    rois = []
    area_mm2 = []

    for (i0, hemi) in enumerate(hemis):
        if bVerbose:
            info_log("Working on hemisphere: %s" % hemi)

        annot = os.path.join(lblDir, "%s.%s.annot" % (hemi, parcName))
        check_file(annot)

        tmpSum = tempfile.mktemp()
        cmd = "%s --annot %s %s %s --sum %s" % (segStatsBin, subjID, hemi, parcName, tmpSum)
        saydo(cmd)

        check_file(tmpSum)
        if bVerbose:
            print("Intermediate results saved at: %s" % tmpSum)

        t = read_text_file(tmpSum)

        for (i0, tline) in enumerate(t):
            tline = tline.strip()
            if len(tline) == 0:
                continue
            elif tline.startswith("#"):
                continue

            t_items = remove_empty_strings(tline.split())
            if len(t_items) != 5:
                continue

            t_roi = t_items[4]
            if t_roi.lower() == "none":
                continue

            rois.append("%s_%s" % (hemi, t_roi))
            area_mm2.append(float(t_items[3]))

        saydo("rm -rf %s" % tmpSum)
    morphInfo["rois"] = rois
    morphInfo["area_mm2"] = area_mm2

    return morphInfo
示例#9
0
def get_dpath_track_data(trackDir):
    # List of data fields to extract
    dataFlds = [
        "Count", "Volume", "Len_Min", "Len_Max", "Len_Avg", "Len_Center",
        "AD_Avg", "AD_Avg_Weight", "AD_Avg_Center", "RD_Avg", "RD_Avg_Weight",
        "RD_Avg_Center", "MD_Avg", "MD_Avg_Weight", "MD_Avg_Center", "FA_Avg",
        "FA_Avg_Weight", "FA_Avg_Center"
    ]

    dat = {}

    import os
    from scai_utils import check_dir, check_file, read_text_file, info_log

    check_dir(trackDir)

    # Read from pathstats.overall.txt
    pso = os.path.join(trackDir, "pathstats.overall.txt")
    check_file(pso)

    txt = read_text_file(pso)

    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()

        if t_line.startswith("#"):
            continue

        t_items = t_line.split(" ")
        if len(t_items) == 2:
            if dataFlds.count(t_items[0]) == 1:
                dat[t_items[0]] = float(t_items[1])

    # Check for completeness
    for (i0, t_fld) in enumerate(dataFlds):
        if not (t_fld in dat):
            info_log("WARNING: could not find data field %s in file %s" % \
                     (t_fld, pso), bWarn=True)

    # Read from pathstats.byvoxel.txt
    vso = os.path.join(trackDir, "pathstats.byvoxel.txt")
    check_file(vso)

    txt = read_text_file(vso)

    import numpy as np
    bvDat = {}
    cols = {}

    # Determine the number of data points
    npts = 0
    lidx0 = -1
    bFoundHeader = False
    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        if len(t_line) == 0:
            continue

        if not (t_line.startswith("#") or t_line.startswith("x")):
            npts += 1
            if lidx0 == -1:
                lidx0 = i0

        if t_line.startswith("x"):
            bFoundHeader = True
            t_items = t_line.split(" ")
            cols["x"] = t_items.index("x")
            cols["y"] = t_items.index("y")
            cols["z"] = t_items.index("z")
            cols["AD"] = t_items.index("AD")
            cols["RD"] = t_items.index("RD")
            cols["MD"] = t_items.index("MD")
            cols["FA"] = t_items.index("FA")
            cols["AD_Avg"] = t_items.index("AD_Avg")
            cols["RD_Avg"] = t_items.index("RD_Avg")
            cols["MD_Avg"] = t_items.index("MD_Avg")
            cols["FA_Avg"] = t_items.index("FA_Avg")

    if not bFoundHeader:
        raise Exception, "Cannot find header column in file %s" % vso

    txt = txt[lidx0:lidx0 + npts]
    #print(txt)

    # Allocate space
    bvDat["x"] = np.zeros(npts)
    bvDat["y"] = np.zeros(npts)
    bvDat["z"] = np.zeros(npts)
    bvDat["AD"] = np.zeros(npts)
    bvDat["RD"] = np.zeros(npts)
    bvDat["MD"] = np.zeros(npts)
    bvDat["FA"] = np.zeros(npts)
    bvDat["AD_Avg"] = np.zeros(npts)
    bvDat["RD_Avg"] = np.zeros(npts)
    bvDat["MD_Avg"] = np.zeros(npts)
    bvDat["FA_Avg"] = np.zeros(npts)

    keys = bvDat.keys()

    for (i0, t_line) in enumerate(txt):
        t_items = t_line.split(" ")

        for t_fld in keys:
            bvDat[t_fld][i0] = float(t_items[cols[t_fld]])

    dat["byVoxel"] = bvDat

    return dat
示例#10
0
    ds.sort()
    for (i0, dn) in enumerate(ds):
        subjIDs.append(os.path.split(dn)[1])
        
        #== Check the existence of the FreeSurfer directory ==#
        check_dir(os.path.join(args.fsDir, subjIDs[-1]))

        if bAll:
            if subjIDs[-1].startswith("AWS_"):
                subjIsAWS.append(1)
            elif subjIDs[-1].startswith("ANS_"):
                subjIsAWS.append(0)
            else:
                error_log("Unrecognized subject ID prefix in %s" % subjIDs[-1])

    info_log("Found %d subjects" % len(subjIDs))
    if bAll:
        info_log("Number of AWS = %d" % subjIsAWS.count(1))
        info_log("Number of ANS = %d" % subjIsAWS.count(0))

        # Determine if between-group comparison is to be performed
        bGrpComp = not (subjIsAWS.count(1) == 0 or subjIsAWS.count(0) == 0)

    if args.bVol:
        #=== Prepare volumetric registered 4D ===#
        cvConFNs = [] # Common-volume contrast files

        check_file(VOL_TEMPLATE)
        templateSuffix = os.path.split(VOL_TEMPLATE)[1]\
            .replace(".nii.gz", "").replace(".mgz", "").replace(".nii", "")
    else:
示例#11
0
def generate_conn_mat(roiList, sc_roiList, 
                      parcTypeDir, parcTracksDir, hemi, 
                      arg_bSpeech, maskType, connFN, logFN=None):    
    import os
    import sys
    import numpy as np
    import nibabel as nb
    from scai_utils import check_file, check_dir, info_log, error_log

    bSC = sc_roiList != None # Subcortical mode flag

    # Process cortical ROIs (this is needed for both SC and C matrix types)
    mask_shapes = []
    roiNames = []
    nzIdx = []
    bSpeech = []
    for (i0, troi) in enumerate(roiList):
        targROI = troi[0]
        maskFN = os.path.join(parcTypeDir, \
                              "%s_%s.diff.nii.gz" % (hemi, targROI))
        check_file(maskFN, logFN=logFN)
        
        t_img = nb.load(maskFN)
        t_img_dat = t_img.get_data()

        mask_shapes.append(np.shape(t_img_dat))
            
        t_img_dat = np.ndarray.flatten(t_img_dat)
            
        nzIdx.append(np.nonzero(t_img_dat)[0])
        roiNames.append(troi[0])
        if troi[2] == 'N':
            bSpeech.append(0)
        else:
            bSpeech.append(1)

    roiNames = np.array(roiNames)
    bSpeech = np.array(bSpeech)
    nzIdx = np.array(nzIdx)
    if arg_bSpeech:
        roiNames = roiNames[np.nonzero(bSpeech)[0]]
        nzIdx = nzIdx[np.nonzero(bSpeech)[0]]

    #print(roiNames) # DEBUG
    #print(bSpeech) # DEBUG

    # Process subcortical ROIs
    if bSC:
        parcSCDir = os.path.join(os.path.split(parcTypeDir)[0], "subcort")
        check_dir(parcSCDir)
        
        sc_roiNames = []
        sc_nzIdx = []
        for (i0, troi) in enumerate(sc_roiList):
            if (hemi == "lh" and troi.startswith("Left-")) or \
               (hemi == "rh" and troi.startswith("Right")):
                sc_roiNames.append(troi)

                maskFN = os.path.join(parcSCDir, \
                                      "%s.diff.nii.gz" % (troi))
                check_file(maskFN, logFN=logFN)
                
                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)

                sc_nzIdx.append(np.nonzero(t_img_dat)[0])
                #print(sc_nzIdx[-1]) # DEBUG
                #print(maskFN) # DEBUG

        sc_roiNames = np.array(sc_roiNames)
        sc_nzIdx = np.array(sc_nzIdx)
        
        #print(sc_roiNames) # DEBUG
        

    nROIs = len(roiNames)
    assert(len(nzIdx) == nROIs)
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)  
    imgShape = np.unique(mask_shapes)[0]

    if bSC:
        nROIs_sc = len(sc_roiNames)

    #=== Check the completion of seed-only probtrackx ===#
    #===     and calculate the conn matrix ===#
    if not bSC:
        d1_roiNames = roiNames
        d2_roiNames = roiNames
    else:
        d1_roiNames = sc_roiNames
        d2_roiNames = np.array(list(sc_roiNames) + list(roiNames))

    connMat = np.zeros([len(d1_roiNames), len(d2_roiNames)])

    #print(d2_roiNames) # DEBUG
    #print(len(connMat)) # DEBUG
    #print(len(connMat[0])) # DEBUG

    #print(parcTracksDir) # DEBUG

    
    if bSC:
        tmp_dir = os.path.split(parcTracksDir)[1]
        parcTracksSCDir = os.path.split(os.path.split(parcTracksDir)[0])[0]
        parcTracksSCDir = os.path.join(parcTracksSCDir, "tracks_sc", tmp_dir)
        #print(parcTracksSCDir) # DEBUG
        check_dir(parcTracksSCDir)
        
    for (i0, troi) in enumerate(d1_roiNames):
        seedROI = troi
        if not bSC:
            trackResDir = os.path.join(parcTracksDir, 
                                       "%s_%s_%s" % \
                                       (hemi, seedROI, maskType))
        else:
            trackResDir = os.path.join(parcTracksSCDir, seedROI)
                                   
        check_probtrackx_complete(trackResDir, "seedOnly", 
                                  doSeedNorm=True, doSize=True,
                                  logFN=logFN)
        
        fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz")
        t_img = nb.load(fdt_norm)
        t_img_dat = t_img.get_data()
            
        assert(list(np.shape(t_img_dat)) == list(imgShape))
        t_img_dat = np.ndarray.flatten(t_img_dat)

        for (i1, troi1) in enumerate(d2_roiNames):
            if not bSC:
                connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1]])
            else:
                if i1 < nROIs_sc:
                    connMat[i0, i1] = np.mean(t_img_dat[sc_nzIdx[i1]])
                else:
                    connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1 - nROIs_sc]])

    #=== Make symmetric ===#
    if not bSC:
        connMat = 0.5 * (connMat + connMat.T)

    #print(connMat) ## DEBUG

    #=== Write result .mat file ===#
    from scipy.io import savemat
    if not bSC:
        res = {"roiNames": roiNames,
               "connMat": connMat}
    else:
        res = {"d1_roiNames": d1_roiNames,
               "d2_roiNames": d2_roiNames,
               "connMat": connMat}
        
    savemat(connFN, res)
    print("connFN = " + connFN)
    check_file(connFN, logFN=logFN)
        
    info_log("Connectivity matrix and associated data were saved at: %s" \
             % (connFN),
             logFN=logFN)
def get_subject_master_code(sqlSettings, studyIDs, subjIDs, bVerbose):
    import MySQLdb

    SQL_SERVER = sqlSettings["SQL_SERVER"]
    DATABASE_NAME = sqlSettings["DATABASE_NAME"]
    SQL_USER = sqlSettings["SQL_USER"]
    pw = sqlSettings["pw"]

    if bVerbose:
        info_log("SQL settings:")
        info_log("\tSQL_SERVER = %s" % SQL_SERVER)
        info_log("\tDATABASE_NAME = %s" % DATABASE_NAME)
        info_log("\tSQL_USER = %s" % SQL_USER)

    db = MySQLdb.connect(host=SQL_SERVER, db=DATABASE_NAME,
                         user=SQL_USER, passwd=pw)
    if bVerbose:
        info_log("Connectoin to database %s at %s under user name %s has been established successfully." % (DATABASE_NAME, SQL_SERVER, SQL_USER))

    #=== SQL Query ===#
    cursor = db.cursor()
    cursor.execute("""SELECT `Study Code`, `Lab Code` FROM `Master Code`""")
    qRes = cursor.fetchall()

    #=== (Conditional) load the "Total Scans" table for SDAP ===#
    if studyIDs.count("SDAP") > 0:
        cursor.execute("""SELECT `Subject Data Directory Name`, `Lab Code` FROM `Total Scans`""")
        tsRes = cursor.fetchall()
        #print(tsRes)
        
    #=== SQL clean up ===#
    db.close()
    if bVerbose:
        info_log("Database connect closed.")
    
    #=== Get master codes ===#
    from get_alt_ids import get_alt_ids

    masterCodes = []
    for (i0, t_studyID) in enumerate(studyIDs):
        t_subjID = subjIDs[i0]
        
        altIDs = get_alt_ids(t_studyID, t_subjID)

        bFound = False
        for (i1, t_row) in enumerate(qRes):
            if t_row[0] == None or t_row[1] == None:
                continue

            for (i2, t_altID) in enumerate(altIDs):
                if t_row[0] == t_altID:
                    bFound = True
                    foundRow = t_row
                    break

            if bFound:
                break

        if (not bFound) and t_studyID == "SDAP": # Try "Total Scans" entries
            for (i1, t_row) in enumerate(tsRes):
                if t_row[0] == None or t_row[1] == None:
                    continue
                
                for (i2, t_altID) in enumerate(altIDs):
                    if t_row[0] == t_altID:
                        bFound = True
                        foundRow = t_row
                        break

                if bFound:
                    break
        
        if not bFound:
            masterCodes.append(-1)
            continue

        

        masterCodes.append(int(foundRow[1].replace("SL", "")))

    return masterCodes
示例#13
0
def get_dpath_track_data(trackDir):
    # List of data fields to extract
    dataFlds = ["Count", "Volume", 
                "Len_Min", "Len_Max", "Len_Avg", "Len_Center", 
                "AD_Avg", "AD_Avg_Weight", "AD_Avg_Center", 
                "RD_Avg", "RD_Avg_Weight", "RD_Avg_Center", 
                "MD_Avg", "MD_Avg_Weight", "MD_Avg_Center", 
                "FA_Avg", "FA_Avg_Weight", "FA_Avg_Center"]

    dat = {}

    import os
    from scai_utils import check_dir, check_file, read_text_file, info_log

    check_dir(trackDir)
    
    # Read from pathstats.overall.txt
    pso = os.path.join(trackDir, "pathstats.overall.txt")
    check_file(pso)

    txt = read_text_file(pso)

    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        
        if t_line.startswith("#"):
            continue

        t_items = t_line.split(" ")
        if len(t_items) == 2:
            if dataFlds.count(t_items[0]) == 1:
                dat[t_items[0]] = float(t_items[1])
                
    # Check for completeness
    for (i0, t_fld) in enumerate(dataFlds):
        if not (t_fld in dat):
            info_log("WARNING: could not find data field %s in file %s" % \
                     (t_fld, pso), bWarn=True)

    # Read from pathstats.byvoxel.txt
    vso = os.path.join(trackDir, "pathstats.byvoxel.txt")
    check_file(vso)

    txt = read_text_file(vso)

    import numpy as np
    bvDat = {}
    cols = {}
    
    # Determine the number of data points
    npts = 0
    lidx0 = -1
    bFoundHeader = False
    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        if len(t_line) == 0:
            continue
        
        if not (t_line.startswith("#") or t_line.startswith("x")):
            npts += 1
            if lidx0 == -1:
                lidx0 = i0
            
        if t_line.startswith("x"):
            bFoundHeader = True
            t_items = t_line.split(" ")
            cols["x"] = t_items.index("x")
            cols["y"] = t_items.index("y")
            cols["z"] = t_items.index("z")
            cols["AD"] = t_items.index("AD")
            cols["RD"] = t_items.index("RD")
            cols["MD"] = t_items.index("MD")
            cols["FA"] = t_items.index("FA")
            cols["AD_Avg"] = t_items.index("AD_Avg")
            cols["RD_Avg"] = t_items.index("RD_Avg")
            cols["MD_Avg"] = t_items.index("MD_Avg")
            cols["FA_Avg"] = t_items.index("FA_Avg")
            
    if not bFoundHeader:
        raise Exception, "Cannot find header column in file %s" % vso


    txt = txt[lidx0 : lidx0 + npts]
    #print(txt)

    # Allocate space
    bvDat["x"] = np.zeros(npts)
    bvDat["y"] = np.zeros(npts)
    bvDat["z"] = np.zeros(npts)
    bvDat["AD"] = np.zeros(npts)
    bvDat["RD"] = np.zeros(npts)
    bvDat["MD"] = np.zeros(npts)
    bvDat["FA"] = np.zeros(npts)
    bvDat["AD_Avg"] = np.zeros(npts)
    bvDat["RD_Avg"] = np.zeros(npts)
    bvDat["MD_Avg"] = np.zeros(npts)
    bvDat["FA_Avg"] = np.zeros(npts)

    keys = bvDat.keys()

    for (i0, t_line) in enumerate(txt):
        t_items = t_line.split(" ")
        
        for t_fld in keys:
            bvDat[t_fld][i0] = float(t_items[cols[t_fld]])

    dat["byVoxel"] = bvDat

    return dat
def calculate_roi_tensor_measures(parcName, roiList, wmDepths, TENSOR_MEASURES,
                                  HEMIS, dmriDir, annotDir, tensMeasMatFN,
                                  logFileName):
    #=== Load masks (gm and wm of different depths) ===#
    import os
    import nibabel as nb
    import numpy as np
    from scai_utils import check_file, check_dir, info_log, error_log

    mask_shapes = []

    nDepths = len(wmDepths)
    roiNames = []
    nzIdx = []
    for i0 in range(nDepths):
        nzIdx.append([])

    bSpeech = []

    parcDir = os.path.join(annotDir, parcName)
    for (i0, wmDepth) in enumerate(wmDepths):
        if wmDepth == -1:
            parcTypeDir = os.path.join(parcDir, "gm")
            info_log("Loading gray-matter masks")
        else:
            parcTypeDir = os.path.join(parcDir, "wm%dmm" % wmDepth)
            info_log("Loading white-matter masks of %d-mm depth" \
                     % wmDepth)

        for (i1, troi) in enumerate(roiList):
            for (i2, hemi) in enumerate(HEMIS):
                maskFN = os.path.join(parcTypeDir, \
                                      "%s_%s.diff.nii.gz" % (hemi, troi[0]))
                check_file(maskFN, logFN=logFileName)

                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)
                nzIdx[i0].append(np.nonzero(t_img_dat)[0])

                if wmDepth == -1:
                    if troi[2] == 'N':
                        bSpeech.append(0)
                    else:
                        bSpeech.append(1)

                    roiNames.append("%s_%s" % (hemi, troi[0]))

    #=== Check that the dimensions of all mask images match ===#
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)

    #=== Load the dtifit_* files and extract the measures ===#
    nROIs = len(roiNames)
    assert (len(bSpeech) == nROIs)

    tensMeas = {}

    mask_shape = np.unique(mask_shapes)

    check_dir(dmriDir, logFN=logFileName)
    for (i0, measName) in enumerate(TENSOR_MEASURES):
        tensMeas[measName] = np.zeros([nROIs, nDepths])

        for (i1, t_depth) in enumerate(wmDepths):
            if t_depth == -1:
                info_log("Extracting tensor measure %s from gray matter" \
                         % measName)
            else:
                info_log(
                    "Extracting tensor measure %s from %d-mm deep white matter"
                    % (measName, t_depth))

            assert (len(nzIdx[i1]) == nROIs)

            for (i2, troi) in enumerate(roiNames):
                measImg = os.path.join(dmriDir, \
                                       "dtifit_%s.nii.gz" % measName)
                check_file(measImg, logFN=logFileName)

                t_img = nb.load(measImg)
                if not list(mask_shape[0]) == list(np.shape(t_img)):
                    error_log("The diffusion tensor measure volume %s (%s) does not have a dimension that matches those of the mask files" \
                              % (measImg, measName))

                t_img_dat = np.ndarray.flatten(t_img.get_data())

                tensMeas[measName][i2, i1] = \
                                       np.mean(t_img_dat[nzIdx[i1][i2]])

    #=== Write data to file ===#
    from scipy.io import savemat
    res = {
        "roiNames": roiNames,
        "bSpeech": bSpeech,
        "parcName": parcName,
        "maskShape": mask_shape,
        "wmDepths": wmDepths,
        "tensMeas": tensMeas
    }

    savemat(tensMeasMatFN, res)
    check_file(tensMeasMatFN, logFN=logFileName)

    info_log(
        "Tensor measures (%d types) and associated data were saved at: %s" %
        (len(TENSOR_MEASURES), tensMeasMatFN),
        logFN=logFileName)
if __name__ == "__main__":
    ap = argparse.ArgumentParser("Prepare for batch analysis of RHY fMRI data")
    ap.add_argument("subjID", help="Subject ID, with the prefix MRI_ included (e.g., MRI_AWS_M01)")
    ap.add_argument("--run-batch", dest="runBatch", type=str, 
                    help="Run the batch commands automatically (any of the steps or all)")
    
    if len(sys.argv) == 1:
        ap.print_help()
        sys.exit(0)

    args = ap.parse_args()

    machInfo = cmd_stdout("uname -a")
    hostName = machInfo[0].split(" ")[1]

    info_log("hostName = %s" % hostName)
    
    #=== Check the validity of subjID ===#
    if not args.subjID.startswith("MRI_"):
        info_log("subjID should usually start with the prefix MRI_. This is not the case in the entered subject ID (%s)" % (args.subjID), 
                 bWarn=True)
    
    sID = args.subjID.replace("MRI_", "")

    if machineSettings.keys().count(hostName) == 0:
        info_log("Cannot find host name %s in machineSettings. _default_ settings will be used" % hostName)
        hostName = "_default_"
        
    #=== Set up some paths ===#
    dataDir = machineSettings[hostName]["dataDir"]
    batchDataDir = machineSettings[hostName]["batchDataDir"]
示例#16
0
def generate_conn_mat(roiList,
                      sc_roiList,
                      parcTypeDir,
                      parcTracksDir,
                      hemi,
                      arg_bSpeech,
                      maskType,
                      connFN,
                      logFN=None):
    import os
    import sys
    import numpy as np
    import nibabel as nb
    from scai_utils import check_file, check_dir, info_log, error_log

    bSC = sc_roiList != None  # Subcortical mode flag

    # Process cortical ROIs (this is needed for both SC and C matrix types)
    mask_shapes = []
    roiNames = []
    nzIdx = []
    bSpeech = []
    for (i0, troi) in enumerate(roiList):
        targROI = troi[0]
        maskFN = os.path.join(parcTypeDir, \
                              "%s_%s.diff.nii.gz" % (hemi, targROI))
        check_file(maskFN, logFN=logFN)

        t_img = nb.load(maskFN)
        t_img_dat = t_img.get_data()

        mask_shapes.append(np.shape(t_img_dat))

        t_img_dat = np.ndarray.flatten(t_img_dat)

        nzIdx.append(np.nonzero(t_img_dat)[0])
        roiNames.append(troi[0])
        if troi[2] == 'N':
            bSpeech.append(0)
        else:
            bSpeech.append(1)

    roiNames = np.array(roiNames)
    bSpeech = np.array(bSpeech)
    nzIdx = np.array(nzIdx)
    if arg_bSpeech:
        roiNames = roiNames[np.nonzero(bSpeech)[0]]
        nzIdx = nzIdx[np.nonzero(bSpeech)[0]]

    #print(roiNames) # DEBUG
    #print(bSpeech) # DEBUG

    # Process subcortical ROIs
    if bSC:
        parcSCDir = os.path.join(os.path.split(parcTypeDir)[0], "subcort")
        check_dir(parcSCDir)

        sc_roiNames = []
        sc_nzIdx = []
        for (i0, troi) in enumerate(sc_roiList):
            if (hemi == "lh" and troi.startswith("Left-")) or \
               (hemi == "rh" and troi.startswith("Right")):
                sc_roiNames.append(troi)

                maskFN = os.path.join(parcSCDir, \
                                      "%s.diff.nii.gz" % (troi))
                check_file(maskFN, logFN=logFN)

                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)

                sc_nzIdx.append(np.nonzero(t_img_dat)[0])
                #print(sc_nzIdx[-1]) # DEBUG
                #print(maskFN) # DEBUG

        sc_roiNames = np.array(sc_roiNames)
        sc_nzIdx = np.array(sc_nzIdx)

        #print(sc_roiNames) # DEBUG

    nROIs = len(roiNames)
    assert (len(nzIdx) == nROIs)
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)
    imgShape = np.unique(mask_shapes)[0]

    if bSC:
        nROIs_sc = len(sc_roiNames)

    #=== Check the completion of seed-only probtrackx ===#
    #===     and calculate the conn matrix ===#
    if not bSC:
        d1_roiNames = roiNames
        d2_roiNames = roiNames
    else:
        d1_roiNames = sc_roiNames
        d2_roiNames = np.array(list(sc_roiNames) + list(roiNames))

    connMat = np.zeros([len(d1_roiNames), len(d2_roiNames)])

    #print(d2_roiNames) # DEBUG
    #print(len(connMat)) # DEBUG
    #print(len(connMat[0])) # DEBUG

    #print(parcTracksDir) # DEBUG

    if bSC:
        tmp_dir = os.path.split(parcTracksDir)[1]
        parcTracksSCDir = os.path.split(os.path.split(parcTracksDir)[0])[0]
        parcTracksSCDir = os.path.join(parcTracksSCDir, "tracks_sc", tmp_dir)
        #print(parcTracksSCDir) # DEBUG
        check_dir(parcTracksSCDir)

    for (i0, troi) in enumerate(d1_roiNames):
        seedROI = troi
        if not bSC:
            trackResDir = os.path.join(parcTracksDir,
                                       "%s_%s_%s" % \
                                       (hemi, seedROI, maskType))
        else:
            trackResDir = os.path.join(parcTracksSCDir, seedROI)

        check_probtrackx_complete(trackResDir,
                                  "seedOnly",
                                  doSeedNorm=True,
                                  doSize=True,
                                  logFN=logFN)

        fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz")
        t_img = nb.load(fdt_norm)
        t_img_dat = t_img.get_data()

        assert (list(np.shape(t_img_dat)) == list(imgShape))
        t_img_dat = np.ndarray.flatten(t_img_dat)

        for (i1, troi1) in enumerate(d2_roiNames):
            if not bSC:
                connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1]])
            else:
                if i1 < nROIs_sc:
                    connMat[i0, i1] = np.mean(t_img_dat[sc_nzIdx[i1]])
                else:
                    connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1 - nROIs_sc]])

    #=== Make symmetric ===#
    if not bSC:
        connMat = 0.5 * (connMat + connMat.T)

    #print(connMat) ## DEBUG

    #=== Write result .mat file ===#
    from scipy.io import savemat
    if not bSC:
        res = {"roiNames": roiNames, "connMat": connMat}
    else:
        res = {
            "d1_roiNames": d1_roiNames,
            "d2_roiNames": d2_roiNames,
            "connMat": connMat
        }

    savemat(connFN, res)
    print("connFN = " + connFN)
    check_file(connFN, logFN=logFN)

    info_log("Connectivity matrix and associated data were saved at: %s" \
             % (connFN),
             logFN=logFN)
示例#17
0
def run_probtrackx(seedMask,
                   targMask,
                   bedpBase,
                   brainMask,
                   outDir,
                   doSeedNorm=True,
                   doSize=True,
                   doTargMaskedFDT=True,
                   ccStop=False,
                   bRedo=False,
                   logFN=None):
    #=========================================================#
    # Mode 1: from seed to targ
    #         Specify both seedMask and targMask
    #
    # Mode 2: from seed to all
    #         Specify only seedMask; set targMask=None
    #
    # Options:
    #         ccStop: Use corpus callosum stop mask
    #
    #=========================================================#
    import os
    from scai_utils import check_file, check_dir, check_bin_path, \
                           saydo, cmd_stdout, info_log, error_log
    from mri_utils import nz_voxels

    #== Get seed and targ nvox ==#
    check_file(seedMask, logFN=logFN)

    (seed_nVoxels, seed_mm3) = nz_voxels(seedMask)
    seed_nVoxels = float(seed_nVoxels)
    #assert(seed_nVoxels > 0)

    if targMask != None:
        check_file(targMask, logFN=logFN)

        (targ_nVoxels, targ_mm3) = nz_voxels(targMask)
        targ_nVoxels = float(targ_nVoxels)
        assert (targ_nVoxels > 0)

    check_bin_path("probtrackx", logFN=logFN)

    check_dir(outDir, logFN=logFN)

    if targMask != None:
        #= Prepare waypoint file =#
        wpfn = os.path.join(outDir, "waypoints.txt")
        wptext = os.path.abspath(targMask) + "\n"

        wpf = open(wpfn, "w")
        wpf.write(wptext)
        wpf.close()
        check_file(wpfn, logFN=logFN)

    cmd = 'probtrackx --mode=seedmask -x %s ' % seedMask + \
          '-s %s ' % bedpBase + \
          '-m %s ' % brainMask + \
          '-l -c 0.2 -S 2000 --steplength=0.5 ' + \
          '-P 5000 ' + \
          '--forcedir --opd --pd --dir=%s ' % outDir

    if targMask != None:
        cmd += "--stop=%s --waypoints=%s " % (targMask, wpfn)

    fdt_paths_fn = os.path.join(outDir, "fdt_paths.nii.gz")

    #== Get the size of fdt_paths.nii.gz. If the size is zero, start over. ==#

    if not os.path.isfile(fdt_paths_fn) \
            or os.path.getsize(fdt_paths_fn) <= 1 \
            or bRedo:
        saydo(cmd, logFN=logFN)

    #== Check for probtrackx completion ==#
    check_file(fdt_paths_fn, logFN=logFN)

    #== Save probtrackx command ==#
    cmd_fn = os.path.join(outDir, "command.txt")
    cmd_f = open(cmd_fn, "wt")
    cmd_f.write("%s\n" % cmd)
    cmd_f.close()
    check_file(cmd_fn, logFN=logFN)

    #== Generate seed size-normalized fdt_paths ==#
    fdt_paths_norm_fn = os.path.join(outDir, "fdt_paths_norm.nii.gz")
    check_bin_path("fslmaths", logFN=logFN)

    norm_cmd = "fslmaths -dt float %s -div %d %s -odt float" % \
               (fdt_paths_fn, seed_nVoxels, fdt_paths_norm_fn)
    if not os.path.isfile(fdt_paths_norm_fn) or bRedo:
        saydo(norm_cmd, logFN=logFN)

    check_file(fdt_paths_norm_fn, logFN=logFN)

    if doSize:
        #== Write to seed size file ==#
        seed_size_fn = os.path.join(outDir, 'seed_size.txt')
        seed_size_f = open(seed_size_fn, 'w')
        seed_size_f.write("%d %f" % (int(seed_nVoxels), seed_mm3))
        seed_size_f.close()
        check_file(seed_size_fn, logFN=logFN)

        info_log("INFO: Saved seed size data to file: %s" % seed_size_fn,
                 logFN=logFN)

        if targMask != None:
            #== Write to targ size file ==#
            targ_size_fn = os.path.join(outDir, 'targ_size.txt')
            targ_size_f = open(targ_size_fn, 'w')
            targ_size_f.write("%d %f" % (int(targ_nVoxels), targ_mm3))
            targ_size_f.close()
            check_file(targ_size_fn, logFN=logFN)

            info_log("INFO: Saved targ size data to file: %s" % targ_size_fn,
                     logFN=logFN)

    if (targMask != None) and doTargMaskedFDT:
        #== Get target masked tract density ==#
        check_bin_path("fslstats", logFN=logFN)
        (so, se) = cmd_stdout("fslstats %s -k %s -m" \
                              % (fdt_paths_norm_fn, targMask))
        assert (len(se) == 0)
        so = so.split()
        assert (len(so) >= 1)
        targ_masked_norm_fdt = float(so[0])

        targ_masked_norm_fdt_fn = \
            os.path.join(outDir, "targ_masked_norm_fdt.txt")
        tmnff = open(targ_masked_norm_fdt_fn, "wt")
        tmnff.write("%f" % targ_masked_norm_fdt)
        tmnff.close()

        check_file(targ_masked_norm_fdt_fn, logFN=logFN)

        info_log("INFO: Saved target-masked normalized FDT value tofile: %s" \
                 % targ_masked_norm_fdt_fn,
                 logFN=logFN)
def calculate_roi_tensor_measures(parcName, roiList, wmDepths,
                                  TENSOR_MEASURES, HEMIS,
                                  dmriDir, annotDir, 
                                  tensMeasMatFN, logFileName):
    #=== Load masks (gm and wm of different depths) ===#
    import os
    import nibabel as nb
    import numpy as np
    from scai_utils import check_file, check_dir, info_log, error_log
    
    mask_shapes = []
        
    nDepths = len(wmDepths)
    roiNames = []
    nzIdx = []
    for i0 in range(nDepths):
        nzIdx.append([])
            
    bSpeech = []

    parcDir = os.path.join(annotDir, parcName)
    for (i0, wmDepth) in enumerate(wmDepths):
        if wmDepth == -1:
            parcTypeDir = os.path.join(parcDir, "gm")
            info_log("Loading gray-matter masks")
        else:
            parcTypeDir = os.path.join(parcDir, "wm%dmm" % wmDepth)
            info_log("Loading white-matter masks of %d-mm depth" \
                     % wmDepth)
            
        for (i1, troi) in enumerate(roiList):
            for (i2, hemi) in enumerate(HEMIS):
                maskFN = os.path.join(parcTypeDir, \
                                      "%s_%s.diff.nii.gz" % (hemi, troi[0]))
                check_file(maskFN, logFN=logFileName)
        
                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)
                nzIdx[i0].append(np.nonzero(t_img_dat)[0])

                if wmDepth == -1:
                    if troi[2] == 'N':
                        bSpeech.append(0)
                    else:
                        bSpeech.append(1)
                        
                    roiNames.append("%s_%s" % (hemi, troi[0]))

    #=== Check that the dimensions of all mask images match ===#
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files",
                  logFN=logFN)

    #=== Load the dtifit_* files and extract the measures ===#
    nROIs = len(roiNames)
    assert(len(bSpeech) == nROIs)
        
    tensMeas = {}

    mask_shape = np.unique(mask_shapes)

    check_dir(dmriDir, logFN=logFileName)
    for (i0, measName) in enumerate(TENSOR_MEASURES):
        tensMeas[measName] = np.zeros([nROIs, nDepths])

        for (i1, t_depth) in enumerate(wmDepths):
            if t_depth == -1:
                info_log("Extracting tensor measure %s from gray matter" \
                         % measName)
            else:
                info_log("Extracting tensor measure %s from %d-mm deep white matter" % (measName, t_depth))
                
            assert(len(nzIdx[i1]) == nROIs)

            for (i2, troi) in enumerate(roiNames):
                measImg = os.path.join(dmriDir, \
                                       "dtifit_%s.nii.gz" % measName)
                check_file(measImg, logFN=logFileName)

                t_img = nb.load(measImg)
                if not list(mask_shape[0]) == list(np.shape(t_img)):
                    error_log("The diffusion tensor measure volume %s (%s) does not have a dimension that matches those of the mask files" \
                              % (measImg, measName))
                    
                t_img_dat = np.ndarray.flatten(t_img.get_data())

                tensMeas[measName][i2, i1] = \
                                       np.mean(t_img_dat[nzIdx[i1][i2]])

    #=== Write data to file ===#
    from scipy.io import savemat
    res = {"roiNames": roiNames,
           "bSpeech": bSpeech,
           "parcName": parcName,
           "maskShape": mask_shape,
           "wmDepths": wmDepths, 
           "tensMeas": tensMeas}

    savemat(tensMeasMatFN, res)
    check_file(tensMeasMatFN, logFN=logFileName)

    info_log("Tensor measures (%d types) and associated data were saved at: %s"
             % (len(TENSOR_MEASURES), tensMeasMatFN),
             logFN=logFileName)
示例#19
0
    #=== Discover subjects ===#
    check_dir(args.fsDir)
    ds = glob.glob(os.path.join(args.fsDir, args.sNameWC))
    ds.sort()

    skipIDs = []
    if args.skipSubj != None:
        skipIDs = args.skipSubj.split(",")

    sIDs = []
    grps = []
    for (i0, d) in enumerate(ds):
        t_sID = os.path.split(d)[1]

        if skipIDs.count(t_sID) > 0:
            info_log("Skipping subject: %s" % t_sID)
            continue

        sIDs.append(t_sID)
        grps.append(get_qdec_info(t_sID, "diagnosis"))

    ugrps = list((np.unique(np.array(grps))))
    ugrps.sort()

    info_log("Discovered %s subjects" % len(sIDs))
    info_log("The subjects belong to %d groups:" % (len(ugrps)))
    for (i0, grp) in enumerate(ugrps):
        info_log("\t%s" % grp)
                 
    matFile = __file__.replace(".py", ".mat")
    from scipy.io import savemat, loadmat
示例#20
0
def run_probtrackx(seedMask, targMask, bedpBase, brainMask, outDir, 
                   doSeedNorm=True, doSize=True, 
                   doTargMaskedFDT=True, 
                   ccStop=False, 
                   bRedo=False,
                   logFN=None):
#=========================================================#
# Mode 1: from seed to targ
#         Specify both seedMask and targMask
#
# Mode 2: from seed to all
#         Specify only seedMask; set targMask=None
#
# Options:
#         ccStop: Use corpus callosum stop mask
#
#=========================================================#
    import os
    from scai_utils import check_file, check_dir, check_bin_path, \
                           saydo, cmd_stdout, info_log, error_log
    from mri_utils import nz_voxels

    #== Get seed and targ nvox ==#
    check_file(seedMask, logFN=logFN)

    (seed_nVoxels, seed_mm3) = nz_voxels(seedMask)
    seed_nVoxels = float(seed_nVoxels)
    #assert(seed_nVoxels > 0)
    
    if targMask != None:
        check_file(targMask, logFN=logFN)
        
        (targ_nVoxels, targ_mm3) = nz_voxels(targMask)
        targ_nVoxels = float(targ_nVoxels)
        assert(targ_nVoxels > 0)
        
    check_bin_path("probtrackx", logFN=logFN)
        
    check_dir(outDir, logFN=logFN)

    if targMask != None:
        #= Prepare waypoint file =#
        wpfn = os.path.join(outDir, "waypoints.txt")
        wptext = os.path.abspath(targMask) + "\n"

        wpf = open(wpfn, "w")
        wpf.write(wptext)
        wpf.close()
        check_file(wpfn, logFN=logFN)
        
    cmd = 'probtrackx --mode=seedmask -x %s ' % seedMask + \
          '-s %s ' % bedpBase + \
          '-m %s ' % brainMask + \
          '-l -c 0.2 -S 2000 --steplength=0.5 ' + \
          '-P 5000 ' + \
          '--forcedir --opd --pd --dir=%s ' % outDir

    if targMask != None:
        cmd += "--stop=%s --waypoints=%s " % (targMask, wpfn)
        
    fdt_paths_fn = os.path.join(outDir, "fdt_paths.nii.gz")
    
    #== Get the size of fdt_paths.nii.gz. If the size is zero, start over. ==#

    if not os.path.isfile(fdt_paths_fn) \
            or os.path.getsize(fdt_paths_fn) <= 1 \
            or bRedo:
        saydo(cmd, logFN=logFN)

    #== Check for probtrackx completion ==#
    check_file(fdt_paths_fn, logFN=logFN)

    #== Save probtrackx command ==#
    cmd_fn = os.path.join(outDir, "command.txt")
    cmd_f = open(cmd_fn, "wt")
    cmd_f.write("%s\n" % cmd)
    cmd_f.close()
    check_file(cmd_fn, logFN=logFN)

    #== Generate seed size-normalized fdt_paths ==#
    fdt_paths_norm_fn = os.path.join(outDir, "fdt_paths_norm.nii.gz")
    check_bin_path("fslmaths", logFN=logFN)

    norm_cmd = "fslmaths -dt float %s -div %d %s -odt float" % \
               (fdt_paths_fn, seed_nVoxels, fdt_paths_norm_fn)
    if not os.path.isfile(fdt_paths_norm_fn) or bRedo:
        saydo(norm_cmd, logFN=logFN)
        
    check_file(fdt_paths_norm_fn, logFN=logFN)

    if doSize:
        #== Write to seed size file ==#
        seed_size_fn = os.path.join(outDir, 'seed_size.txt')
        seed_size_f = open(seed_size_fn, 'w')
        seed_size_f.write("%d %f" % (int(seed_nVoxels), seed_mm3))
        seed_size_f.close()
        check_file(seed_size_fn, logFN=logFN)

        info_log("INFO: Saved seed size data to file: %s" % seed_size_fn, 
                 logFN=logFN)

        if targMask != None:
            #== Write to targ size file ==#
            targ_size_fn = os.path.join(outDir, 'targ_size.txt')
            targ_size_f = open(targ_size_fn, 'w')
            targ_size_f.write("%d %f" % (int(targ_nVoxels), targ_mm3))
            targ_size_f.close()
            check_file(targ_size_fn, logFN=logFN)

            info_log("INFO: Saved targ size data to file: %s" % targ_size_fn,
                     logFN=logFN)

    if (targMask != None) and doTargMaskedFDT:
        #== Get target masked tract density ==#
        check_bin_path("fslstats", logFN=logFN)
        (so, se) = cmd_stdout("fslstats %s -k %s -m" \
                              % (fdt_paths_norm_fn, targMask))
        assert(len(se) == 0)
        so = so.split()
        assert(len(so) >= 1)
        targ_masked_norm_fdt = float(so[0])

        targ_masked_norm_fdt_fn = \
            os.path.join(outDir, "targ_masked_norm_fdt.txt")
        tmnff = open(targ_masked_norm_fdt_fn, "wt")
        tmnff.write("%f" % targ_masked_norm_fdt)
        tmnff.close()

        check_file(targ_masked_norm_fdt_fn, logFN=logFN)

        info_log("INFO: Saved target-masked normalized FDT value tofile: %s" \
                 % targ_masked_norm_fdt_fn,
                 logFN=logFN)