Exemplo n.º 1
0
def check_probtrackx_complete(trackResDir,
                              mode,
                              doSeedNorm=True,
                              doSize=True,
                              logFN=None):
    # Mode: seedOnly or seedTarg
    import os
    from scai_utils import check_file

    ALL_MODES = ["seedOnly", "seedTarg"]
    assert (ALL_MODES.count(mode) == 1)

    fdt = os.path.join(trackResDir, "fdt_paths.nii.gz")
    check_file(fdt, logFN=logFN)

    if doSeedNorm:
        fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz")
        check_file(fdt_norm, logFN=logFN)

    if doSize:
        seed_size_fn = os.path.join(trackResDir, "seed_size.txt")
        check_file(seed_size_fn, logFN=logFN)

        if mode == "seedTarg":
            targ_size_fn = os.path.join(targResDir, "targ_size.txt")
            check_file(targ_size_fn, logFN=logFN)
Exemplo n.º 2
0
def get_tracula_settings(cfgFN):
    tracCfg = {}

    from scai_utils import check_file, read_text_file

    check_file(cfgFN)
    txt = read_text_file(cfgFN)

    bFoundPathList = False

    for (i0, t_line) in enumerate(txt):
        if t_line.strip().startswith("set pathlist ="):
            bFoundPathList = True
            break

    paths = []
    if bFoundPathList:
        i1 = i0
        ended = False
        while not ended:
            items = txt[i1].split(" ")

            if len(items) == 0:
                i1 += 1
                continue

            if items[-1] == "\\":
                items = items[:-1]

            ended = txt[i1].strip().endswith(")")
            i1 += 1

            for t_item in items:
                t_item = t_item.replace("(", "").replace(")", "")

                if t_item.startswith("lh.") or t_item.startswith("rh") or \
                   t_item.startswith("fmajor") or t_item.startswith("fminor") \
                   and len(t_item) > 6:
                    paths.append(t_item)

    tracCfg["paths"] = paths
    #print(txt)

    return tracCfg
Exemplo n.º 3
0
def get_tracula_settings(cfgFN):
    tracCfg = {}
    
    from scai_utils import check_file, read_text_file

    check_file(cfgFN)
    txt = read_text_file(cfgFN)

    bFoundPathList = False

    for (i0, t_line) in enumerate(txt):
        if t_line.strip().startswith("set pathlist ="):
            bFoundPathList = True
            break

    paths = []
    if bFoundPathList:
        i1 = i0
        ended = False
        while not ended:
            items = txt[i1].split(" ")

            if len(items) == 0:
                i1 += 1
                continue

            if items[-1] == "\\":
                items = items[:-1]

            ended = txt[i1].strip().endswith(")")
            i1 += 1

            for t_item in items:
                t_item = t_item.replace("(", "").replace(")", "")

                if t_item.startswith("lh.") or t_item.startswith("rh") or \
                   t_item.startswith("fmajor") or t_item.startswith("fminor") \
                   and len(t_item) > 6:
                    paths.append(t_item)
                    
    tracCfg["paths"] = paths
    #print(txt)
    
    return tracCfg
Exemplo n.º 4
0
def gen_parc_masks(rois, roiNums, parcVol, outDir,
                   doVolStats=True, redo=False, logFN=None):
    import os
    import numpy as np
    from scipy.io import savemat as savemat
    from scai_utils import check_bin_path, check_file, \
                           cmd_stdout, saydo, info_log, error_log

    check_bin_path("fslstats", logFN=logFN)

    volStats = {"roiName": [], "nVoxels": [], "mm3": []}
    for (i0, roi) in enumerate(rois):
        roiNum = roiNums[i0]
            
        maskFN = os.path.join(outDir, "%s.diff.nii.gz" % roi)
        binCmd = "mri_binarize --i %s --match %d --o %s" % \
                 (parcVol, roiNum, maskFN)
            
        if not os.path.isfile(maskFN) or redo:
            saydo(binCmd, logFN=logFN)
        check_file(maskFN, logFN=logFN)

        #= Volume stats =#
        (so, se) = cmd_stdout("fslstats %s -V" % maskFN)
        assert(len(se) == 0)
            
        so = so.split(" ")
        assert(len(so) >= 2)
        
        volStats["nVoxels"].append(int(so[0]))
        volStats["mm3"].append(float(so[1]))
        volStats["roiName"].append(roi)
            
    if doVolStats:
        volStats["roiName"] = np.array(volStats["roiName"])
        volStats["nVoxels"] = np.array(volStats["nVoxels"])
        volStats["mm3"] = np.array(volStats["mm3"])
    
        volStatsFN = os.path.join(outDir, "vol_stats.mat")
        savemat(volStatsFN, volStats)
        check_file(volStatsFN, logFN=logFN)

        info_log("INFO: %s: Saved volume stats of the mask files at\n\t%s" \
                 % (gen_parc_masks.__name__, volStatsFN), logFN=logFN)
Exemplo n.º 5
0
def generate_design_matrix(isAWS, matFN, bReverse=False):
# Input arguments:
#     isAWS: vector of 0's and 1's
#     matFN: output design matrix .mat file name 
#     bReverse: if AWS should be set to -1 and ANS to +1 (default: no)

    if len(isAWS) < 2:
        error_log("The design matrix cannot be generated because there are fewer than two subjects")

    if not matFN.endswith(".mat"):
        error_log("The input .mat file name %s has a wrong extension name" % matFN)

    X_line = 'X = ['
    for (i0, t_isAWS) in enumerate(isAWS):
        t_x = (float(t_isAWS) * 2.0 - 1.0)
        if bReverse:
            X_line += "%.1f; " % -t_x
        else:
            X_line += "%.1f; " % t_x

    X_line = X_line[:-2] + "];\n"
    X_line += "X = [ones(%d , 1), X];\n" % len(isAWS)
    X_line += "save('%s', 'X', '-v4');\n" % os.path.abspath(matFN)
    print(X_line) # DEBUG
    
    (t_path, t_fn) = os.path.split(os.path.abspath(matFN))
    mScriptGenX = os.path.join(t_path, 'gen_%s' % t_fn.replace(".mat", ".m"))

    mScriptGenX_f = open(mScriptGenX, "wt")
    mScriptGenX_f.write(X_line)
    mScriptGenX_f.close()
    check_file(mScriptGenX)


    matlabCmd = "%s -nosplash -nodesktop -r 'run %s; exit; '" % \
                (MATLAB_BIN, mScriptGenX)
    saydo(matlabCmd)
    check_file(matFN)
Exemplo n.º 6
0
def check_probtrackx_complete(trackResDir, mode, doSeedNorm=True, doSize=True,
                              logFN=None):
# Mode: seedOnly or seedTarg
    import os
    from scai_utils import check_file
    
    ALL_MODES = ["seedOnly", "seedTarg"]
    assert(ALL_MODES.count(mode) == 1)

    fdt = os.path.join(trackResDir, "fdt_paths.nii.gz")
    check_file(fdt, logFN=logFN)
    
    if doSeedNorm:
        fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz")
        check_file(fdt_norm, logFN=logFN)

    if doSize:
        seed_size_fn = os.path.join(trackResDir, "seed_size.txt")
        check_file(seed_size_fn, logFN=logFN)

        if mode == "seedTarg":
            targ_size_fn = os.path.join(targResDir, "targ_size.txt")
            check_file(targ_size_fn, logFN=logFN)
Exemplo n.º 7
0
def calculate_roi_tensor_measures(parcName, roiList, wmDepths, TENSOR_MEASURES,
                                  HEMIS, dmriDir, annotDir, tensMeasMatFN,
                                  logFileName):
    #=== Load masks (gm and wm of different depths) ===#
    import os
    import nibabel as nb
    import numpy as np
    from scai_utils import check_file, check_dir, info_log, error_log

    mask_shapes = []

    nDepths = len(wmDepths)
    roiNames = []
    nzIdx = []
    for i0 in range(nDepths):
        nzIdx.append([])

    bSpeech = []

    parcDir = os.path.join(annotDir, parcName)
    for (i0, wmDepth) in enumerate(wmDepths):
        if wmDepth == -1:
            parcTypeDir = os.path.join(parcDir, "gm")
            info_log("Loading gray-matter masks")
        else:
            parcTypeDir = os.path.join(parcDir, "wm%dmm" % wmDepth)
            info_log("Loading white-matter masks of %d-mm depth" \
                     % wmDepth)

        for (i1, troi) in enumerate(roiList):
            for (i2, hemi) in enumerate(HEMIS):
                maskFN = os.path.join(parcTypeDir, \
                                      "%s_%s.diff.nii.gz" % (hemi, troi[0]))
                check_file(maskFN, logFN=logFileName)

                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)
                nzIdx[i0].append(np.nonzero(t_img_dat)[0])

                if wmDepth == -1:
                    if troi[2] == 'N':
                        bSpeech.append(0)
                    else:
                        bSpeech.append(1)

                    roiNames.append("%s_%s" % (hemi, troi[0]))

    #=== Check that the dimensions of all mask images match ===#
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)

    #=== Load the dtifit_* files and extract the measures ===#
    nROIs = len(roiNames)
    assert (len(bSpeech) == nROIs)

    tensMeas = {}

    mask_shape = np.unique(mask_shapes)

    check_dir(dmriDir, logFN=logFileName)
    for (i0, measName) in enumerate(TENSOR_MEASURES):
        tensMeas[measName] = np.zeros([nROIs, nDepths])

        for (i1, t_depth) in enumerate(wmDepths):
            if t_depth == -1:
                info_log("Extracting tensor measure %s from gray matter" \
                         % measName)
            else:
                info_log(
                    "Extracting tensor measure %s from %d-mm deep white matter"
                    % (measName, t_depth))

            assert (len(nzIdx[i1]) == nROIs)

            for (i2, troi) in enumerate(roiNames):
                measImg = os.path.join(dmriDir, \
                                       "dtifit_%s.nii.gz" % measName)
                check_file(measImg, logFN=logFileName)

                t_img = nb.load(measImg)
                if not list(mask_shape[0]) == list(np.shape(t_img)):
                    error_log("The diffusion tensor measure volume %s (%s) does not have a dimension that matches those of the mask files" \
                              % (measImg, measName))

                t_img_dat = np.ndarray.flatten(t_img.get_data())

                tensMeas[measName][i2, i1] = \
                                       np.mean(t_img_dat[nzIdx[i1][i2]])

    #=== Write data to file ===#
    from scipy.io import savemat
    res = {
        "roiNames": roiNames,
        "bSpeech": bSpeech,
        "parcName": parcName,
        "maskShape": mask_shape,
        "wmDepths": wmDepths,
        "tensMeas": tensMeas
    }

    savemat(tensMeasMatFN, res)
    check_file(tensMeasMatFN, logFN=logFileName)

    info_log(
        "Tensor measures (%d types) and associated data were saved at: %s" %
        (len(TENSOR_MEASURES), tensMeasMatFN),
        logFN=logFileName)
Exemplo n.º 8
0
def get_dpath_track_data(trackDir):
    # List of data fields to extract
    dataFlds = [
        "Count", "Volume", "Len_Min", "Len_Max", "Len_Avg", "Len_Center",
        "AD_Avg", "AD_Avg_Weight", "AD_Avg_Center", "RD_Avg", "RD_Avg_Weight",
        "RD_Avg_Center", "MD_Avg", "MD_Avg_Weight", "MD_Avg_Center", "FA_Avg",
        "FA_Avg_Weight", "FA_Avg_Center"
    ]

    dat = {}

    import os
    from scai_utils import check_dir, check_file, read_text_file, info_log

    check_dir(trackDir)

    # Read from pathstats.overall.txt
    pso = os.path.join(trackDir, "pathstats.overall.txt")
    check_file(pso)

    txt = read_text_file(pso)

    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()

        if t_line.startswith("#"):
            continue

        t_items = t_line.split(" ")
        if len(t_items) == 2:
            if dataFlds.count(t_items[0]) == 1:
                dat[t_items[0]] = float(t_items[1])

    # Check for completeness
    for (i0, t_fld) in enumerate(dataFlds):
        if not (t_fld in dat):
            info_log("WARNING: could not find data field %s in file %s" % \
                     (t_fld, pso), bWarn=True)

    # Read from pathstats.byvoxel.txt
    vso = os.path.join(trackDir, "pathstats.byvoxel.txt")
    check_file(vso)

    txt = read_text_file(vso)

    import numpy as np
    bvDat = {}
    cols = {}

    # Determine the number of data points
    npts = 0
    lidx0 = -1
    bFoundHeader = False
    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        if len(t_line) == 0:
            continue

        if not (t_line.startswith("#") or t_line.startswith("x")):
            npts += 1
            if lidx0 == -1:
                lidx0 = i0

        if t_line.startswith("x"):
            bFoundHeader = True
            t_items = t_line.split(" ")
            cols["x"] = t_items.index("x")
            cols["y"] = t_items.index("y")
            cols["z"] = t_items.index("z")
            cols["AD"] = t_items.index("AD")
            cols["RD"] = t_items.index("RD")
            cols["MD"] = t_items.index("MD")
            cols["FA"] = t_items.index("FA")
            cols["AD_Avg"] = t_items.index("AD_Avg")
            cols["RD_Avg"] = t_items.index("RD_Avg")
            cols["MD_Avg"] = t_items.index("MD_Avg")
            cols["FA_Avg"] = t_items.index("FA_Avg")

    if not bFoundHeader:
        raise Exception, "Cannot find header column in file %s" % vso

    txt = txt[lidx0:lidx0 + npts]
    #print(txt)

    # Allocate space
    bvDat["x"] = np.zeros(npts)
    bvDat["y"] = np.zeros(npts)
    bvDat["z"] = np.zeros(npts)
    bvDat["AD"] = np.zeros(npts)
    bvDat["RD"] = np.zeros(npts)
    bvDat["MD"] = np.zeros(npts)
    bvDat["FA"] = np.zeros(npts)
    bvDat["AD_Avg"] = np.zeros(npts)
    bvDat["RD_Avg"] = np.zeros(npts)
    bvDat["MD_Avg"] = np.zeros(npts)
    bvDat["FA_Avg"] = np.zeros(npts)

    keys = bvDat.keys()

    for (i0, t_line) in enumerate(txt):
        t_items = t_line.split(" ")

        for t_fld in keys:
            bvDat[t_fld][i0] = float(t_items[cols[t_fld]])

    dat["byVoxel"] = bvDat

    return dat
Exemplo n.º 9
0
            ['adSTs',  'Temporal',     'N', []], \
            ['pvSTs',  'Temporal',     'N', []], \
            ['avSTs',  'Temporal',     'N', []], \
            ['aMTg',   'Temporal',     'N', []], \
            ['pMTg',   'Temporal',     'N', []], \
            ['pITg',   'Temporal',     'N', []], \
            ['aCGg',    'Cingulate',    'S', ["aCG"]], \
            ['midCGg',  'Cingulate',    'N', ["midCG"]], \
            ['pCGg',    'Cingulate',    'N', ["pCG"]], \
            ['OC',     'Occipital',     'N', []], \
            ['MTOg',    'Occipital',    'N', ["MTO"]], \
            ['ITOg',    'Occipital',    'N', ["ITO"]], \
            ['Lg',     'Occipital',     'N', ["LG"]], \
            ['pPHg',    'Parahippocampal',  'N', ["pPH"]], \
            ['aPHg',    'Parahippocampal',  'N', ["aPH"]], \
            ['SCC',    'Parahippocampal',   'N', []]]

if __name__ == "__main__":
    from scipy.io import savemat
    from scai_utils import check_file

    dat = {"aROIs": [], "speechROIs": []}

    
    for (i0, t_line) in enumerate(aROIs):
        dat["aROIs"].append(t_line[0])
        if t_line[2] == "S":
            dat["speechROIs"].append(t_line[0])
    savemat("aparc12.mat", dat)
    check_file("aparc12.mat")
Exemplo n.º 10
0
            else:
                error_log("Unrecognized subject ID prefix in %s" % subjIDs[-1])

    info_log("Found %d subjects" % len(subjIDs))
    if bAll:
        info_log("Number of AWS = %d" % subjIsAWS.count(1))
        info_log("Number of ANS = %d" % subjIsAWS.count(0))

        # Determine if between-group comparison is to be performed
        bGrpComp = not (subjIsAWS.count(1) == 0 or subjIsAWS.count(0) == 0)

    if args.bVol:
        #=== Prepare volumetric registered 4D ===#
        cvConFNs = [] # Common-volume contrast files

        check_file(VOL_TEMPLATE)
        templateSuffix = os.path.split(VOL_TEMPLATE)[1]\
            .replace(".nii.gz", "").replace(".mgz", "").replace(".nii", "")
    else:
        #=== Project the con values to the surface ===#
        commSurfConFNs = {}
        for hemi in HEMIS:
            commSurfConFNs[hemi] = []

    bNew = False
    for sID in subjIDs:
        #== Locate the con file ==#
        L1Dir = os.path.join(args.batchBase, sID, "firstlevel_%s" % modelName)
        check_dir(L1Dir)
        
        conFN = os.path.join(L1Dir, "con_%.4d.img" % args.contrastNum)
SQL_SETTINGS_FN = "mega_sql_settings"
TOTAL_SCANS_TABLE_NAME = "Total Scans"

if __name__ == "__main__":
    import os
    import sys
    import argparse
    from scai_utils import info_log, check_file

    ap = argparse.ArgumentParser(
        description="Find subjects who participated in multiple studies")

    #--- Read sql settings ---#
    sqlSettingsFN = os.path.join(os.getenv("HOME"), SQL_SETTINGS_FN)
    check_file(sqlSettingsFN)
    sf = open(sqlSettingsFN, "rt")
    settings = sf.read().replace("\r", "\n").split("\n")
    sf.close()

    sqlSettings = {
        "SQL_SERVER": settings[0],
        "DATABASE_NAME": settings[1],
        "SQL_USER": settings[2],
        "pw": settings[3]
    }

    #--- Get data from server ---#
    import MySQLdb

    db = MySQLdb.connect(host=sqlSettings["SQL_SERVER"],
        sys.exit(0)

    args = ap.parse_args()

    studyIDs = args.studyIDs.split(",")
    subjIDs = args.subjIDs.split(",")

    if len(studyIDs) != len(subjIDs):
        raise Exception, "Unequal number of entries in studyIDs and subjIDs"

    if args.bVerbose:
        info_log("# of subjects queried = %d" % len(subjIDs))

    #=== Establish SQL server connection ===#
    sqlSettingsFN = os.path.join(os.getenv("HOME"), SQL_SETTINGS_FN)
    check_file(sqlSettingsFN)
    sf = open(sqlSettingsFN, "rt")
    settings = sf.read().replace("\r", "\n").split("\n")
    sf.close()

    
    sqlSettings = {"SQL_SERVER": settings[0],
                   "DATABASE_NAME": settings[1], 
                   "SQL_USER": settings[2], 
                   "pw": settings[3]}
        
    masterCodes = get_subject_master_code(sqlSettings, studyIDs, subjIDs, 
                                          args.bVerbose)


    for (i0, t_mc) in enumerate(masterCodes):
Exemplo n.º 13
0
    fsDataDir = machineSettings[hostName]["fsDataDir"]

    sDataDir = os.path.join(dataDir, sID)
    sBatchDataDir = os.path.join(batchDataDir, sID)
    
    check_dir(dataDir)
    check_dir(sDataDir)
    check_dir(sBatchDataDir, bCreate=True)
    check_dir(fsDataDir)

    boldDir = os.path.join(sDataDir, "bold")
    check_dir(boldDir)

    #=== Look for the fmri_model.mat and fmri_contrasts.mat files ===#
    modelMat = os.path.join(sDataDir, "fmri_model.mat")
    check_file(modelMat)
    info_log("modelMat = %s" % modelMat)

    contrMat = os.path.join(sDataDir, "fmri_contrasts.mat")
    check_file(contrMat)
    info_log("contrMat = %s" % contrMat)

    #=== Copy over the .mat files ===#
    saydo("cp %s %s/" % (modelMat, sBatchDataDir))
    saydo("cp %s %s/" % (contrMat, sBatchDataDir))

    #=== Load model mat and determine how many runs were collected ===#
    from scipy.io import loadmat
    
    mdl = loadmat(modelMat)
    nRuns = len(mdl["sess"][0])
Exemplo n.º 14
0
def get_parc_stats(fsDir, subjID, parcName, bVerbose=False):
    import sys, os
    import tempfile
    from scai_utils import (
        check_dir,
        check_file,
        check_bin_path,
        info_log,
        cmd_stdout,
        saydo,
        read_text_file,
        remove_empty_strings,
    )

    # === Constants ===#
    hemis = ["lh", "rh"]
    segStatsBin = "mri_segstats"

    # === Implementation ===#
    check_bin_path(segStatsBin)

    sDir = os.path.join(fsDir, subjID)
    check_dir(sDir)

    lblDir = os.path.join(sDir, "label")
    check_dir(sDir)

    if bVerbose:
        info_log("Label directory = %s" % lblDir)

    morphInfo = {}

    rois = []
    area_mm2 = []

    for (i0, hemi) in enumerate(hemis):
        if bVerbose:
            info_log("Working on hemisphere: %s" % hemi)

        annot = os.path.join(lblDir, "%s.%s.annot" % (hemi, parcName))
        check_file(annot)

        tmpSum = tempfile.mktemp()
        cmd = "%s --annot %s %s %s --sum %s" % (segStatsBin, subjID, hemi, parcName, tmpSum)
        saydo(cmd)

        check_file(tmpSum)
        if bVerbose:
            print("Intermediate results saved at: %s" % tmpSum)

        t = read_text_file(tmpSum)

        for (i0, tline) in enumerate(t):
            tline = tline.strip()
            if len(tline) == 0:
                continue
            elif tline.startswith("#"):
                continue

            t_items = remove_empty_strings(tline.split())
            if len(t_items) != 5:
                continue

            t_roi = t_items[4]
            if t_roi.lower() == "none":
                continue

            rois.append("%s_%s" % (hemi, t_roi))
            area_mm2.append(float(t_items[3]))

        saydo("rm -rf %s" % tmpSum)
    morphInfo["rois"] = rois
    morphInfo["area_mm2"] = area_mm2

    return morphInfo
Exemplo n.º 15
0
def generate_conn_mat(roiList,
                      sc_roiList,
                      parcTypeDir,
                      parcTracksDir,
                      hemi,
                      arg_bSpeech,
                      maskType,
                      connFN,
                      logFN=None):
    import os
    import sys
    import numpy as np
    import nibabel as nb
    from scai_utils import check_file, check_dir, info_log, error_log

    bSC = sc_roiList != None  # Subcortical mode flag

    # Process cortical ROIs (this is needed for both SC and C matrix types)
    mask_shapes = []
    roiNames = []
    nzIdx = []
    bSpeech = []
    for (i0, troi) in enumerate(roiList):
        targROI = troi[0]
        maskFN = os.path.join(parcTypeDir, \
                              "%s_%s.diff.nii.gz" % (hemi, targROI))
        check_file(maskFN, logFN=logFN)

        t_img = nb.load(maskFN)
        t_img_dat = t_img.get_data()

        mask_shapes.append(np.shape(t_img_dat))

        t_img_dat = np.ndarray.flatten(t_img_dat)

        nzIdx.append(np.nonzero(t_img_dat)[0])
        roiNames.append(troi[0])
        if troi[2] == 'N':
            bSpeech.append(0)
        else:
            bSpeech.append(1)

    roiNames = np.array(roiNames)
    bSpeech = np.array(bSpeech)
    nzIdx = np.array(nzIdx)
    if arg_bSpeech:
        roiNames = roiNames[np.nonzero(bSpeech)[0]]
        nzIdx = nzIdx[np.nonzero(bSpeech)[0]]

    #print(roiNames) # DEBUG
    #print(bSpeech) # DEBUG

    # Process subcortical ROIs
    if bSC:
        parcSCDir = os.path.join(os.path.split(parcTypeDir)[0], "subcort")
        check_dir(parcSCDir)

        sc_roiNames = []
        sc_nzIdx = []
        for (i0, troi) in enumerate(sc_roiList):
            if (hemi == "lh" and troi.startswith("Left-")) or \
               (hemi == "rh" and troi.startswith("Right")):
                sc_roiNames.append(troi)

                maskFN = os.path.join(parcSCDir, \
                                      "%s.diff.nii.gz" % (troi))
                check_file(maskFN, logFN=logFN)

                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)

                sc_nzIdx.append(np.nonzero(t_img_dat)[0])
                #print(sc_nzIdx[-1]) # DEBUG
                #print(maskFN) # DEBUG

        sc_roiNames = np.array(sc_roiNames)
        sc_nzIdx = np.array(sc_nzIdx)

        #print(sc_roiNames) # DEBUG

    nROIs = len(roiNames)
    assert (len(nzIdx) == nROIs)
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)
    imgShape = np.unique(mask_shapes)[0]

    if bSC:
        nROIs_sc = len(sc_roiNames)

    #=== Check the completion of seed-only probtrackx ===#
    #===     and calculate the conn matrix ===#
    if not bSC:
        d1_roiNames = roiNames
        d2_roiNames = roiNames
    else:
        d1_roiNames = sc_roiNames
        d2_roiNames = np.array(list(sc_roiNames) + list(roiNames))

    connMat = np.zeros([len(d1_roiNames), len(d2_roiNames)])

    #print(d2_roiNames) # DEBUG
    #print(len(connMat)) # DEBUG
    #print(len(connMat[0])) # DEBUG

    #print(parcTracksDir) # DEBUG

    if bSC:
        tmp_dir = os.path.split(parcTracksDir)[1]
        parcTracksSCDir = os.path.split(os.path.split(parcTracksDir)[0])[0]
        parcTracksSCDir = os.path.join(parcTracksSCDir, "tracks_sc", tmp_dir)
        #print(parcTracksSCDir) # DEBUG
        check_dir(parcTracksSCDir)

    for (i0, troi) in enumerate(d1_roiNames):
        seedROI = troi
        if not bSC:
            trackResDir = os.path.join(parcTracksDir,
                                       "%s_%s_%s" % \
                                       (hemi, seedROI, maskType))
        else:
            trackResDir = os.path.join(parcTracksSCDir, seedROI)

        check_probtrackx_complete(trackResDir,
                                  "seedOnly",
                                  doSeedNorm=True,
                                  doSize=True,
                                  logFN=logFN)

        fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz")
        t_img = nb.load(fdt_norm)
        t_img_dat = t_img.get_data()

        assert (list(np.shape(t_img_dat)) == list(imgShape))
        t_img_dat = np.ndarray.flatten(t_img_dat)

        for (i1, troi1) in enumerate(d2_roiNames):
            if not bSC:
                connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1]])
            else:
                if i1 < nROIs_sc:
                    connMat[i0, i1] = np.mean(t_img_dat[sc_nzIdx[i1]])
                else:
                    connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1 - nROIs_sc]])

    #=== Make symmetric ===#
    if not bSC:
        connMat = 0.5 * (connMat + connMat.T)

    #print(connMat) ## DEBUG

    #=== Write result .mat file ===#
    from scipy.io import savemat
    if not bSC:
        res = {"roiNames": roiNames, "connMat": connMat}
    else:
        res = {
            "d1_roiNames": d1_roiNames,
            "d2_roiNames": d2_roiNames,
            "connMat": connMat
        }

    savemat(connFN, res)
    print("connFN = " + connFN)
    check_file(connFN, logFN=logFN)

    info_log("Connectivity matrix and associated data were saved at: %s" \
             % (connFN),
             logFN=logFN)
Exemplo n.º 16
0
def generate_conn_mat(roiList, sc_roiList, 
                      parcTypeDir, parcTracksDir, hemi, 
                      arg_bSpeech, maskType, connFN, logFN=None):    
    import os
    import sys
    import numpy as np
    import nibabel as nb
    from scai_utils import check_file, check_dir, info_log, error_log

    bSC = sc_roiList != None # Subcortical mode flag

    # Process cortical ROIs (this is needed for both SC and C matrix types)
    mask_shapes = []
    roiNames = []
    nzIdx = []
    bSpeech = []
    for (i0, troi) in enumerate(roiList):
        targROI = troi[0]
        maskFN = os.path.join(parcTypeDir, \
                              "%s_%s.diff.nii.gz" % (hemi, targROI))
        check_file(maskFN, logFN=logFN)
        
        t_img = nb.load(maskFN)
        t_img_dat = t_img.get_data()

        mask_shapes.append(np.shape(t_img_dat))
            
        t_img_dat = np.ndarray.flatten(t_img_dat)
            
        nzIdx.append(np.nonzero(t_img_dat)[0])
        roiNames.append(troi[0])
        if troi[2] == 'N':
            bSpeech.append(0)
        else:
            bSpeech.append(1)

    roiNames = np.array(roiNames)
    bSpeech = np.array(bSpeech)
    nzIdx = np.array(nzIdx)
    if arg_bSpeech:
        roiNames = roiNames[np.nonzero(bSpeech)[0]]
        nzIdx = nzIdx[np.nonzero(bSpeech)[0]]

    #print(roiNames) # DEBUG
    #print(bSpeech) # DEBUG

    # Process subcortical ROIs
    if bSC:
        parcSCDir = os.path.join(os.path.split(parcTypeDir)[0], "subcort")
        check_dir(parcSCDir)
        
        sc_roiNames = []
        sc_nzIdx = []
        for (i0, troi) in enumerate(sc_roiList):
            if (hemi == "lh" and troi.startswith("Left-")) or \
               (hemi == "rh" and troi.startswith("Right")):
                sc_roiNames.append(troi)

                maskFN = os.path.join(parcSCDir, \
                                      "%s.diff.nii.gz" % (troi))
                check_file(maskFN, logFN=logFN)
                
                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)

                sc_nzIdx.append(np.nonzero(t_img_dat)[0])
                #print(sc_nzIdx[-1]) # DEBUG
                #print(maskFN) # DEBUG

        sc_roiNames = np.array(sc_roiNames)
        sc_nzIdx = np.array(sc_nzIdx)
        
        #print(sc_roiNames) # DEBUG
        

    nROIs = len(roiNames)
    assert(len(nzIdx) == nROIs)
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)  
    imgShape = np.unique(mask_shapes)[0]

    if bSC:
        nROIs_sc = len(sc_roiNames)

    #=== Check the completion of seed-only probtrackx ===#
    #===     and calculate the conn matrix ===#
    if not bSC:
        d1_roiNames = roiNames
        d2_roiNames = roiNames
    else:
        d1_roiNames = sc_roiNames
        d2_roiNames = np.array(list(sc_roiNames) + list(roiNames))

    connMat = np.zeros([len(d1_roiNames), len(d2_roiNames)])

    #print(d2_roiNames) # DEBUG
    #print(len(connMat)) # DEBUG
    #print(len(connMat[0])) # DEBUG

    #print(parcTracksDir) # DEBUG

    
    if bSC:
        tmp_dir = os.path.split(parcTracksDir)[1]
        parcTracksSCDir = os.path.split(os.path.split(parcTracksDir)[0])[0]
        parcTracksSCDir = os.path.join(parcTracksSCDir, "tracks_sc", tmp_dir)
        #print(parcTracksSCDir) # DEBUG
        check_dir(parcTracksSCDir)
        
    for (i0, troi) in enumerate(d1_roiNames):
        seedROI = troi
        if not bSC:
            trackResDir = os.path.join(parcTracksDir, 
                                       "%s_%s_%s" % \
                                       (hemi, seedROI, maskType))
        else:
            trackResDir = os.path.join(parcTracksSCDir, seedROI)
                                   
        check_probtrackx_complete(trackResDir, "seedOnly", 
                                  doSeedNorm=True, doSize=True,
                                  logFN=logFN)
        
        fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz")
        t_img = nb.load(fdt_norm)
        t_img_dat = t_img.get_data()
            
        assert(list(np.shape(t_img_dat)) == list(imgShape))
        t_img_dat = np.ndarray.flatten(t_img_dat)

        for (i1, troi1) in enumerate(d2_roiNames):
            if not bSC:
                connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1]])
            else:
                if i1 < nROIs_sc:
                    connMat[i0, i1] = np.mean(t_img_dat[sc_nzIdx[i1]])
                else:
                    connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1 - nROIs_sc]])

    #=== Make symmetric ===#
    if not bSC:
        connMat = 0.5 * (connMat + connMat.T)

    #print(connMat) ## DEBUG

    #=== Write result .mat file ===#
    from scipy.io import savemat
    if not bSC:
        res = {"roiNames": roiNames,
               "connMat": connMat}
    else:
        res = {"d1_roiNames": d1_roiNames,
               "d2_roiNames": d2_roiNames,
               "connMat": connMat}
        
    savemat(connFN, res)
    print("connFN = " + connFN)
    check_file(connFN, logFN=logFN)
        
    info_log("Connectivity matrix and associated data were saved at: %s" \
             % (connFN),
             logFN=logFN)
Exemplo n.º 17
0
def run_probtrackx(seedMask,
                   targMask,
                   bedpBase,
                   brainMask,
                   outDir,
                   doSeedNorm=True,
                   doSize=True,
                   doTargMaskedFDT=True,
                   ccStop=False,
                   bRedo=False,
                   logFN=None):
    #=========================================================#
    # Mode 1: from seed to targ
    #         Specify both seedMask and targMask
    #
    # Mode 2: from seed to all
    #         Specify only seedMask; set targMask=None
    #
    # Options:
    #         ccStop: Use corpus callosum stop mask
    #
    #=========================================================#
    import os
    from scai_utils import check_file, check_dir, check_bin_path, \
                           saydo, cmd_stdout, info_log, error_log
    from mri_utils import nz_voxels

    #== Get seed and targ nvox ==#
    check_file(seedMask, logFN=logFN)

    (seed_nVoxels, seed_mm3) = nz_voxels(seedMask)
    seed_nVoxels = float(seed_nVoxels)
    #assert(seed_nVoxels > 0)

    if targMask != None:
        check_file(targMask, logFN=logFN)

        (targ_nVoxels, targ_mm3) = nz_voxels(targMask)
        targ_nVoxels = float(targ_nVoxels)
        assert (targ_nVoxels > 0)

    check_bin_path("probtrackx", logFN=logFN)

    check_dir(outDir, logFN=logFN)

    if targMask != None:
        #= Prepare waypoint file =#
        wpfn = os.path.join(outDir, "waypoints.txt")
        wptext = os.path.abspath(targMask) + "\n"

        wpf = open(wpfn, "w")
        wpf.write(wptext)
        wpf.close()
        check_file(wpfn, logFN=logFN)

    cmd = 'probtrackx --mode=seedmask -x %s ' % seedMask + \
          '-s %s ' % bedpBase + \
          '-m %s ' % brainMask + \
          '-l -c 0.2 -S 2000 --steplength=0.5 ' + \
          '-P 5000 ' + \
          '--forcedir --opd --pd --dir=%s ' % outDir

    if targMask != None:
        cmd += "--stop=%s --waypoints=%s " % (targMask, wpfn)

    fdt_paths_fn = os.path.join(outDir, "fdt_paths.nii.gz")

    #== Get the size of fdt_paths.nii.gz. If the size is zero, start over. ==#

    if not os.path.isfile(fdt_paths_fn) \
            or os.path.getsize(fdt_paths_fn) <= 1 \
            or bRedo:
        saydo(cmd, logFN=logFN)

    #== Check for probtrackx completion ==#
    check_file(fdt_paths_fn, logFN=logFN)

    #== Save probtrackx command ==#
    cmd_fn = os.path.join(outDir, "command.txt")
    cmd_f = open(cmd_fn, "wt")
    cmd_f.write("%s\n" % cmd)
    cmd_f.close()
    check_file(cmd_fn, logFN=logFN)

    #== Generate seed size-normalized fdt_paths ==#
    fdt_paths_norm_fn = os.path.join(outDir, "fdt_paths_norm.nii.gz")
    check_bin_path("fslmaths", logFN=logFN)

    norm_cmd = "fslmaths -dt float %s -div %d %s -odt float" % \
               (fdt_paths_fn, seed_nVoxels, fdt_paths_norm_fn)
    if not os.path.isfile(fdt_paths_norm_fn) or bRedo:
        saydo(norm_cmd, logFN=logFN)

    check_file(fdt_paths_norm_fn, logFN=logFN)

    if doSize:
        #== Write to seed size file ==#
        seed_size_fn = os.path.join(outDir, 'seed_size.txt')
        seed_size_f = open(seed_size_fn, 'w')
        seed_size_f.write("%d %f" % (int(seed_nVoxels), seed_mm3))
        seed_size_f.close()
        check_file(seed_size_fn, logFN=logFN)

        info_log("INFO: Saved seed size data to file: %s" % seed_size_fn,
                 logFN=logFN)

        if targMask != None:
            #== Write to targ size file ==#
            targ_size_fn = os.path.join(outDir, 'targ_size.txt')
            targ_size_f = open(targ_size_fn, 'w')
            targ_size_f.write("%d %f" % (int(targ_nVoxels), targ_mm3))
            targ_size_f.close()
            check_file(targ_size_fn, logFN=logFN)

            info_log("INFO: Saved targ size data to file: %s" % targ_size_fn,
                     logFN=logFN)

    if (targMask != None) and doTargMaskedFDT:
        #== Get target masked tract density ==#
        check_bin_path("fslstats", logFN=logFN)
        (so, se) = cmd_stdout("fslstats %s -k %s -m" \
                              % (fdt_paths_norm_fn, targMask))
        assert (len(se) == 0)
        so = so.split()
        assert (len(so) >= 1)
        targ_masked_norm_fdt = float(so[0])

        targ_masked_norm_fdt_fn = \
            os.path.join(outDir, "targ_masked_norm_fdt.txt")
        tmnff = open(targ_masked_norm_fdt_fn, "wt")
        tmnff.write("%f" % targ_masked_norm_fdt)
        tmnff.close()

        check_file(targ_masked_norm_fdt_fn, logFN=logFN)

        info_log("INFO: Saved target-masked normalized FDT value tofile: %s" \
                 % targ_masked_norm_fdt_fn,
                 logFN=logFN)
Exemplo n.º 18
0
                    imgFNs["lat"].append(os.path.join(CON_IMG_DIR,  
                                               "%s_%s_%s_%s.%s.lat.tiff" % \
                                               (args.grp, args.contr, t_type, 
                                                grpCon, hemi)))
                    imgFNs["med"].append(os.path.join(CON_IMG_DIR, 
                                               "%s_%s_%s_%s.%s.med.tiff" % \
                                               (args.grp, args.contr, t_type, 
                                                grpCon, hemi)))

                    if grpCon == "osgm":
                        sigImg = os.path.join(conDir, "%s_%s" % (grpCon, hemi), 
                                              grpCon, "sig.mgh")
                    elif grpCon == "bgc":
                        sigImg = os.path.join(conDir, "%s_%s" % (grpCon, hemi), 
                                              "con_01", "sig.mgh")
                    check_file(sigImg)

                    tksurferCommand = "tksurfer %s %s inflated -gray " % \
                                      (commonSurfID, hemi, ) + \
                                      "-overlay %s -fthresh %f -fmid %f " % \
                                      (sigImg, args.fThresh, args.fMid) + \
                                      "-colscalebarflag 1 "

                    # Prepare the tcl script
                    tclScript = tempfile.mktemp() + ".tcl"
                    t_imgFNs = []
                    t_imgFNs.append(imgFNs["lat"][-1])
                    t_imgFNs.append(imgFNs["med"][-1])
                    prep_tcl_script_surf(tclScript, t_imgFNs)

                    # Put in tcl script information 
Exemplo n.º 19
0
def calculate_roi_tensor_measures(parcName, roiList, wmDepths,
                                  TENSOR_MEASURES, HEMIS,
                                  dmriDir, annotDir, 
                                  tensMeasMatFN, logFileName):
    #=== Load masks (gm and wm of different depths) ===#
    import os
    import nibabel as nb
    import numpy as np
    from scai_utils import check_file, check_dir, info_log, error_log
    
    mask_shapes = []
        
    nDepths = len(wmDepths)
    roiNames = []
    nzIdx = []
    for i0 in range(nDepths):
        nzIdx.append([])
            
    bSpeech = []

    parcDir = os.path.join(annotDir, parcName)
    for (i0, wmDepth) in enumerate(wmDepths):
        if wmDepth == -1:
            parcTypeDir = os.path.join(parcDir, "gm")
            info_log("Loading gray-matter masks")
        else:
            parcTypeDir = os.path.join(parcDir, "wm%dmm" % wmDepth)
            info_log("Loading white-matter masks of %d-mm depth" \
                     % wmDepth)
            
        for (i1, troi) in enumerate(roiList):
            for (i2, hemi) in enumerate(HEMIS):
                maskFN = os.path.join(parcTypeDir, \
                                      "%s_%s.diff.nii.gz" % (hemi, troi[0]))
                check_file(maskFN, logFN=logFileName)
        
                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)
                nzIdx[i0].append(np.nonzero(t_img_dat)[0])

                if wmDepth == -1:
                    if troi[2] == 'N':
                        bSpeech.append(0)
                    else:
                        bSpeech.append(1)
                        
                    roiNames.append("%s_%s" % (hemi, troi[0]))

    #=== Check that the dimensions of all mask images match ===#
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files",
                  logFN=logFN)

    #=== Load the dtifit_* files and extract the measures ===#
    nROIs = len(roiNames)
    assert(len(bSpeech) == nROIs)
        
    tensMeas = {}

    mask_shape = np.unique(mask_shapes)

    check_dir(dmriDir, logFN=logFileName)
    for (i0, measName) in enumerate(TENSOR_MEASURES):
        tensMeas[measName] = np.zeros([nROIs, nDepths])

        for (i1, t_depth) in enumerate(wmDepths):
            if t_depth == -1:
                info_log("Extracting tensor measure %s from gray matter" \
                         % measName)
            else:
                info_log("Extracting tensor measure %s from %d-mm deep white matter" % (measName, t_depth))
                
            assert(len(nzIdx[i1]) == nROIs)

            for (i2, troi) in enumerate(roiNames):
                measImg = os.path.join(dmriDir, \
                                       "dtifit_%s.nii.gz" % measName)
                check_file(measImg, logFN=logFileName)

                t_img = nb.load(measImg)
                if not list(mask_shape[0]) == list(np.shape(t_img)):
                    error_log("The diffusion tensor measure volume %s (%s) does not have a dimension that matches those of the mask files" \
                              % (measImg, measName))
                    
                t_img_dat = np.ndarray.flatten(t_img.get_data())

                tensMeas[measName][i2, i1] = \
                                       np.mean(t_img_dat[nzIdx[i1][i2]])

    #=== Write data to file ===#
    from scipy.io import savemat
    res = {"roiNames": roiNames,
           "bSpeech": bSpeech,
           "parcName": parcName,
           "maskShape": mask_shape,
           "wmDepths": wmDepths, 
           "tensMeas": tensMeas}

    savemat(tensMeasMatFN, res)
    check_file(tensMeasMatFN, logFN=logFileName)

    info_log("Tensor measures (%d types) and associated data were saved at: %s"
             % (len(TENSOR_MEASURES), tensMeasMatFN),
             logFN=logFileName)
WM_DEPTHS = [1, 2, 3]

# List of tensor measures to be extracted in steps including roi_tensor
# Need to match dtifit_*.nii.gz file names in dmri directory (generated by dtifit)
TENSOR_MEASURES = ["FA", "MD"]

SUBJECT_MASTER_CODE_FILE = "/speechlab/2/jtour/SID/Master_Code.xls"

FNIRT_TEMPLATE = "/speechlab/software/fsl64/data/standard/MNI152_T1_1mm_brain.nii.gz"
FNIRT_CNF = "/speechlab/5/scai/RHY/rhythm-fmri/fmri_code/MNI152_T1_1mm_brain.cnf"

SUBCORT_LABEL_LIST = "ASAP_subcortical_labels.txt"

SUBCORT_TRACT_SEEDS = ["Left-Thalamus-Proper", "Left-Caudate",
                       "Left-Putamen", "Left-Pallidum",
                       "Right-Thalamus-Proper", "Right-Caudate",
                       "Right-Putamen", "Right-Pallidum"]

if __name__ == "__main__":
    from scipy.io import savemat
    from scai_utils import check_file, info_log

    matFN = __file__.replace(".py", ".mat")

    analysisSettings = {"DWI_ANALYSIS_DIR": DWI_ANALYSIS_DIR,
                        "BASE_TRACULA_CFG": BASE_TRACULA_CFG,
                        "SUBJECT_MASTER_CODE_FILE": SUBJECT_MASTER_CODE_FILE}
    savemat(matFN, analysisSettings)
    check_file(matFN)
    info_log("Saved analysis settings to mat file: %s" % matFN)
Exemplo n.º 21
0
def get_dpath_track_data(trackDir):
    # List of data fields to extract
    dataFlds = ["Count", "Volume", 
                "Len_Min", "Len_Max", "Len_Avg", "Len_Center", 
                "AD_Avg", "AD_Avg_Weight", "AD_Avg_Center", 
                "RD_Avg", "RD_Avg_Weight", "RD_Avg_Center", 
                "MD_Avg", "MD_Avg_Weight", "MD_Avg_Center", 
                "FA_Avg", "FA_Avg_Weight", "FA_Avg_Center"]

    dat = {}

    import os
    from scai_utils import check_dir, check_file, read_text_file, info_log

    check_dir(trackDir)
    
    # Read from pathstats.overall.txt
    pso = os.path.join(trackDir, "pathstats.overall.txt")
    check_file(pso)

    txt = read_text_file(pso)

    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        
        if t_line.startswith("#"):
            continue

        t_items = t_line.split(" ")
        if len(t_items) == 2:
            if dataFlds.count(t_items[0]) == 1:
                dat[t_items[0]] = float(t_items[1])
                
    # Check for completeness
    for (i0, t_fld) in enumerate(dataFlds):
        if not (t_fld in dat):
            info_log("WARNING: could not find data field %s in file %s" % \
                     (t_fld, pso), bWarn=True)

    # Read from pathstats.byvoxel.txt
    vso = os.path.join(trackDir, "pathstats.byvoxel.txt")
    check_file(vso)

    txt = read_text_file(vso)

    import numpy as np
    bvDat = {}
    cols = {}
    
    # Determine the number of data points
    npts = 0
    lidx0 = -1
    bFoundHeader = False
    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        if len(t_line) == 0:
            continue
        
        if not (t_line.startswith("#") or t_line.startswith("x")):
            npts += 1
            if lidx0 == -1:
                lidx0 = i0
            
        if t_line.startswith("x"):
            bFoundHeader = True
            t_items = t_line.split(" ")
            cols["x"] = t_items.index("x")
            cols["y"] = t_items.index("y")
            cols["z"] = t_items.index("z")
            cols["AD"] = t_items.index("AD")
            cols["RD"] = t_items.index("RD")
            cols["MD"] = t_items.index("MD")
            cols["FA"] = t_items.index("FA")
            cols["AD_Avg"] = t_items.index("AD_Avg")
            cols["RD_Avg"] = t_items.index("RD_Avg")
            cols["MD_Avg"] = t_items.index("MD_Avg")
            cols["FA_Avg"] = t_items.index("FA_Avg")
            
    if not bFoundHeader:
        raise Exception, "Cannot find header column in file %s" % vso


    txt = txt[lidx0 : lidx0 + npts]
    #print(txt)

    # Allocate space
    bvDat["x"] = np.zeros(npts)
    bvDat["y"] = np.zeros(npts)
    bvDat["z"] = np.zeros(npts)
    bvDat["AD"] = np.zeros(npts)
    bvDat["RD"] = np.zeros(npts)
    bvDat["MD"] = np.zeros(npts)
    bvDat["FA"] = np.zeros(npts)
    bvDat["AD_Avg"] = np.zeros(npts)
    bvDat["RD_Avg"] = np.zeros(npts)
    bvDat["MD_Avg"] = np.zeros(npts)
    bvDat["FA_Avg"] = np.zeros(npts)

    keys = bvDat.keys()

    for (i0, t_line) in enumerate(txt):
        t_items = t_line.split(" ")
        
        for t_fld in keys:
            bvDat[t_fld][i0] = float(t_items[cols[t_fld]])

    dat["byVoxel"] = bvDat

    return dat
Exemplo n.º 22
0
            ['pdSTs',  'Temporal',     'S', []], \
            ['adSTs',  'Temporal',     'N', []], \
            ['pvSTs',  'Temporal',     'N', []], \
            ['avSTs',  'Temporal',     'N', []], \
            ['aMTg',   'Temporal',     'N', []], \
            ['pMTg',   'Temporal',     'N', []], \
            ['pITg',   'Temporal',     'N', []], \
            ['aCGg',    'Cingulate',    'S', ["aCG"]], \
            ['midCGg',  'Cingulate',    'N', ["midCG"]], \
            ['pCGg',    'Cingulate',    'N', ["pCG"]], \
            ['OC',     'Occipital',     'N', []], \
            ['MTOg',    'Occipital',    'N', ["MTO"]], \
            ['ITOg',    'Occipital',    'N', ["ITO"]], \
            ['Lg',     'Occipital',     'N', ["LG"]], \
            ['pPHg',    'Parahippocampal',  'N', ["pPH"]], \
            ['aPHg',    'Parahippocampal',  'N', ["aPH"]], \
            ['SCC',    'Parahippocampal',   'N', []]]

if __name__ == "__main__":
    from scipy.io import savemat
    from scai_utils import check_file

    dat = {"aROIs": [], "speechROIs": []}

    for (i0, t_line) in enumerate(aROIs):
        dat["aROIs"].append(t_line[0])
        if t_line[2] == "S":
            dat["speechROIs"].append(t_line[0])
    savemat("aparc12.mat", dat)
    check_file("aparc12.mat")
Exemplo n.º 23
0
def run_probtrackx(seedMask, targMask, bedpBase, brainMask, outDir, 
                   doSeedNorm=True, doSize=True, 
                   doTargMaskedFDT=True, 
                   ccStop=False, 
                   bRedo=False,
                   logFN=None):
#=========================================================#
# Mode 1: from seed to targ
#         Specify both seedMask and targMask
#
# Mode 2: from seed to all
#         Specify only seedMask; set targMask=None
#
# Options:
#         ccStop: Use corpus callosum stop mask
#
#=========================================================#
    import os
    from scai_utils import check_file, check_dir, check_bin_path, \
                           saydo, cmd_stdout, info_log, error_log
    from mri_utils import nz_voxels

    #== Get seed and targ nvox ==#
    check_file(seedMask, logFN=logFN)

    (seed_nVoxels, seed_mm3) = nz_voxels(seedMask)
    seed_nVoxels = float(seed_nVoxels)
    #assert(seed_nVoxels > 0)
    
    if targMask != None:
        check_file(targMask, logFN=logFN)
        
        (targ_nVoxels, targ_mm3) = nz_voxels(targMask)
        targ_nVoxels = float(targ_nVoxels)
        assert(targ_nVoxels > 0)
        
    check_bin_path("probtrackx", logFN=logFN)
        
    check_dir(outDir, logFN=logFN)

    if targMask != None:
        #= Prepare waypoint file =#
        wpfn = os.path.join(outDir, "waypoints.txt")
        wptext = os.path.abspath(targMask) + "\n"

        wpf = open(wpfn, "w")
        wpf.write(wptext)
        wpf.close()
        check_file(wpfn, logFN=logFN)
        
    cmd = 'probtrackx --mode=seedmask -x %s ' % seedMask + \
          '-s %s ' % bedpBase + \
          '-m %s ' % brainMask + \
          '-l -c 0.2 -S 2000 --steplength=0.5 ' + \
          '-P 5000 ' + \
          '--forcedir --opd --pd --dir=%s ' % outDir

    if targMask != None:
        cmd += "--stop=%s --waypoints=%s " % (targMask, wpfn)
        
    fdt_paths_fn = os.path.join(outDir, "fdt_paths.nii.gz")
    
    #== Get the size of fdt_paths.nii.gz. If the size is zero, start over. ==#

    if not os.path.isfile(fdt_paths_fn) \
            or os.path.getsize(fdt_paths_fn) <= 1 \
            or bRedo:
        saydo(cmd, logFN=logFN)

    #== Check for probtrackx completion ==#
    check_file(fdt_paths_fn, logFN=logFN)

    #== Save probtrackx command ==#
    cmd_fn = os.path.join(outDir, "command.txt")
    cmd_f = open(cmd_fn, "wt")
    cmd_f.write("%s\n" % cmd)
    cmd_f.close()
    check_file(cmd_fn, logFN=logFN)

    #== Generate seed size-normalized fdt_paths ==#
    fdt_paths_norm_fn = os.path.join(outDir, "fdt_paths_norm.nii.gz")
    check_bin_path("fslmaths", logFN=logFN)

    norm_cmd = "fslmaths -dt float %s -div %d %s -odt float" % \
               (fdt_paths_fn, seed_nVoxels, fdt_paths_norm_fn)
    if not os.path.isfile(fdt_paths_norm_fn) or bRedo:
        saydo(norm_cmd, logFN=logFN)
        
    check_file(fdt_paths_norm_fn, logFN=logFN)

    if doSize:
        #== Write to seed size file ==#
        seed_size_fn = os.path.join(outDir, 'seed_size.txt')
        seed_size_f = open(seed_size_fn, 'w')
        seed_size_f.write("%d %f" % (int(seed_nVoxels), seed_mm3))
        seed_size_f.close()
        check_file(seed_size_fn, logFN=logFN)

        info_log("INFO: Saved seed size data to file: %s" % seed_size_fn, 
                 logFN=logFN)

        if targMask != None:
            #== Write to targ size file ==#
            targ_size_fn = os.path.join(outDir, 'targ_size.txt')
            targ_size_f = open(targ_size_fn, 'w')
            targ_size_f.write("%d %f" % (int(targ_nVoxels), targ_mm3))
            targ_size_f.close()
            check_file(targ_size_fn, logFN=logFN)

            info_log("INFO: Saved targ size data to file: %s" % targ_size_fn,
                     logFN=logFN)

    if (targMask != None) and doTargMaskedFDT:
        #== Get target masked tract density ==#
        check_bin_path("fslstats", logFN=logFN)
        (so, se) = cmd_stdout("fslstats %s -k %s -m" \
                              % (fdt_paths_norm_fn, targMask))
        assert(len(se) == 0)
        so = so.split()
        assert(len(so) >= 1)
        targ_masked_norm_fdt = float(so[0])

        targ_masked_norm_fdt_fn = \
            os.path.join(outDir, "targ_masked_norm_fdt.txt")
        tmnff = open(targ_masked_norm_fdt_fn, "wt")
        tmnff.write("%f" % targ_masked_norm_fdt)
        tmnff.close()

        check_file(targ_masked_norm_fdt_fn, logFN=logFN)

        info_log("INFO: Saved target-masked normalized FDT value tofile: %s" \
                 % targ_masked_norm_fdt_fn,
                 logFN=logFN)