Ejemplo n.º 1
0
def get_parc_stats(fsDir, subjID, parcName, bVerbose=False):
    import sys, os
    import tempfile
    from scai_utils import (
        check_dir,
        check_file,
        check_bin_path,
        info_log,
        cmd_stdout,
        saydo,
        read_text_file,
        remove_empty_strings,
    )

    # === Constants ===#
    hemis = ["lh", "rh"]
    segStatsBin = "mri_segstats"

    # === Implementation ===#
    check_bin_path(segStatsBin)

    sDir = os.path.join(fsDir, subjID)
    check_dir(sDir)

    lblDir = os.path.join(sDir, "label")
    check_dir(sDir)

    if bVerbose:
        info_log("Label directory = %s" % lblDir)

    morphInfo = {}

    rois = []
    area_mm2 = []

    for (i0, hemi) in enumerate(hemis):
        if bVerbose:
            info_log("Working on hemisphere: %s" % hemi)

        annot = os.path.join(lblDir, "%s.%s.annot" % (hemi, parcName))
        check_file(annot)

        tmpSum = tempfile.mktemp()
        cmd = "%s --annot %s %s %s --sum %s" % (segStatsBin, subjID, hemi, parcName, tmpSum)
        saydo(cmd)

        check_file(tmpSum)
        if bVerbose:
            print("Intermediate results saved at: %s" % tmpSum)

        t = read_text_file(tmpSum)

        for (i0, tline) in enumerate(t):
            tline = tline.strip()
            if len(tline) == 0:
                continue
            elif tline.startswith("#"):
                continue

            t_items = remove_empty_strings(tline.split())
            if len(t_items) != 5:
                continue

            t_roi = t_items[4]
            if t_roi.lower() == "none":
                continue

            rois.append("%s_%s" % (hemi, t_roi))
            area_mm2.append(float(t_items[3]))

        saydo("rm -rf %s" % tmpSum)
    morphInfo["rois"] = rois
    morphInfo["area_mm2"] = area_mm2

    return morphInfo
Ejemplo n.º 2
0
    ap.add_argument("--skipSubj", \
                    help="List of subjects to skip, separated by commas (e.g., S24,S40")
    #ap.add_argument("groupMethod", \
    #                help="Method for getting the group identity of the subjects (e.g., get_qdec_info,diagnosis). Must be a function name followed by a field name. The field name will be the second input argument to the function. The first input argument will be the subject ID")
    
    if len(sys.argv) == 1:
        ap.print_help()
        sys.exit(0)

    args = ap.parse_args()
    
    #=== Check the matching of environmental SUBJECTS_DIR ===#
    check_env_var("SUBJECTS_DIR", args.fsDir)

    #=== Discover subjects ===#
    check_dir(args.fsDir)
    ds = glob.glob(os.path.join(args.fsDir, args.sNameWC))
    ds.sort()

    skipIDs = []
    if args.skipSubj != None:
        skipIDs = args.skipSubj.split(",")

    sIDs = []
    grps = []
    for (i0, d) in enumerate(ds):
        t_sID = os.path.split(d)[1]

        if skipIDs.count(t_sID) > 0:
            info_log("Skipping subject: %s" % t_sID)
            continue
Ejemplo n.º 3
0
                    help="Subject group (e.g., AWS). If not specified, will include all subjects and perform between-group analysis")
    ap.add_argument("--fwhm-surf", dest="fwhmSurf", type=float, default=10, 
                    help="FWHM for surface-based smoothing (default=0)")
    ap.add_argument("--vol", action="store_true", dest="bVol",
                    help="Perform volumetric analysis, instead of surface-based analysis (default: false)")
    ap.add_argument("--redo", dest="bRedo", action="store_true", 
                    help="Force redo all steps (default: false)")

    if len(sys.argv) == 1:
        ap.print_help()
        sys.exit(0)
        
    args = ap.parse_args()

    #=== Input sanity check ===#
    check_dir(args.batchBase)
    check_dir(args.fsDir)

    #=== Check that SUBJECTS_DIR matches input argument ===#
    envFSDir = os.path.abspath(os.getenv("SUBJECTS_DIR"))
    if envFSDir != os.path.abspath(args.fsDir):
        error_log("Input FreeSurfer SUBJECTS_DIR does not match environmental SUBECTS_DIR")

    #=== Get the list of subjects ===#
    if args.group == None or len(args.group) == 0:
        wc = "*_*"
        bAll = True
    else:
        wc = args.group + "*"
        bAll = False;
Ejemplo n.º 4
0
def get_dpath_track_data(trackDir):
    # List of data fields to extract
    dataFlds = [
        "Count", "Volume", "Len_Min", "Len_Max", "Len_Avg", "Len_Center",
        "AD_Avg", "AD_Avg_Weight", "AD_Avg_Center", "RD_Avg", "RD_Avg_Weight",
        "RD_Avg_Center", "MD_Avg", "MD_Avg_Weight", "MD_Avg_Center", "FA_Avg",
        "FA_Avg_Weight", "FA_Avg_Center"
    ]

    dat = {}

    import os
    from scai_utils import check_dir, check_file, read_text_file, info_log

    check_dir(trackDir)

    # Read from pathstats.overall.txt
    pso = os.path.join(trackDir, "pathstats.overall.txt")
    check_file(pso)

    txt = read_text_file(pso)

    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()

        if t_line.startswith("#"):
            continue

        t_items = t_line.split(" ")
        if len(t_items) == 2:
            if dataFlds.count(t_items[0]) == 1:
                dat[t_items[0]] = float(t_items[1])

    # Check for completeness
    for (i0, t_fld) in enumerate(dataFlds):
        if not (t_fld in dat):
            info_log("WARNING: could not find data field %s in file %s" % \
                     (t_fld, pso), bWarn=True)

    # Read from pathstats.byvoxel.txt
    vso = os.path.join(trackDir, "pathstats.byvoxel.txt")
    check_file(vso)

    txt = read_text_file(vso)

    import numpy as np
    bvDat = {}
    cols = {}

    # Determine the number of data points
    npts = 0
    lidx0 = -1
    bFoundHeader = False
    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        if len(t_line) == 0:
            continue

        if not (t_line.startswith("#") or t_line.startswith("x")):
            npts += 1
            if lidx0 == -1:
                lidx0 = i0

        if t_line.startswith("x"):
            bFoundHeader = True
            t_items = t_line.split(" ")
            cols["x"] = t_items.index("x")
            cols["y"] = t_items.index("y")
            cols["z"] = t_items.index("z")
            cols["AD"] = t_items.index("AD")
            cols["RD"] = t_items.index("RD")
            cols["MD"] = t_items.index("MD")
            cols["FA"] = t_items.index("FA")
            cols["AD_Avg"] = t_items.index("AD_Avg")
            cols["RD_Avg"] = t_items.index("RD_Avg")
            cols["MD_Avg"] = t_items.index("MD_Avg")
            cols["FA_Avg"] = t_items.index("FA_Avg")

    if not bFoundHeader:
        raise Exception, "Cannot find header column in file %s" % vso

    txt = txt[lidx0:lidx0 + npts]
    #print(txt)

    # Allocate space
    bvDat["x"] = np.zeros(npts)
    bvDat["y"] = np.zeros(npts)
    bvDat["z"] = np.zeros(npts)
    bvDat["AD"] = np.zeros(npts)
    bvDat["RD"] = np.zeros(npts)
    bvDat["MD"] = np.zeros(npts)
    bvDat["FA"] = np.zeros(npts)
    bvDat["AD_Avg"] = np.zeros(npts)
    bvDat["RD_Avg"] = np.zeros(npts)
    bvDat["MD_Avg"] = np.zeros(npts)
    bvDat["FA_Avg"] = np.zeros(npts)

    keys = bvDat.keys()

    for (i0, t_line) in enumerate(txt):
        t_items = t_line.split(" ")

        for t_fld in keys:
            bvDat[t_fld][i0] = float(t_items[cols[t_fld]])

    dat["byVoxel"] = bvDat

    return dat
Ejemplo n.º 5
0
def calculate_roi_tensor_measures(parcName, roiList, wmDepths, TENSOR_MEASURES,
                                  HEMIS, dmriDir, annotDir, tensMeasMatFN,
                                  logFileName):
    #=== Load masks (gm and wm of different depths) ===#
    import os
    import nibabel as nb
    import numpy as np
    from scai_utils import check_file, check_dir, info_log, error_log

    mask_shapes = []

    nDepths = len(wmDepths)
    roiNames = []
    nzIdx = []
    for i0 in range(nDepths):
        nzIdx.append([])

    bSpeech = []

    parcDir = os.path.join(annotDir, parcName)
    for (i0, wmDepth) in enumerate(wmDepths):
        if wmDepth == -1:
            parcTypeDir = os.path.join(parcDir, "gm")
            info_log("Loading gray-matter masks")
        else:
            parcTypeDir = os.path.join(parcDir, "wm%dmm" % wmDepth)
            info_log("Loading white-matter masks of %d-mm depth" \
                     % wmDepth)

        for (i1, troi) in enumerate(roiList):
            for (i2, hemi) in enumerate(HEMIS):
                maskFN = os.path.join(parcTypeDir, \
                                      "%s_%s.diff.nii.gz" % (hemi, troi[0]))
                check_file(maskFN, logFN=logFileName)

                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)
                nzIdx[i0].append(np.nonzero(t_img_dat)[0])

                if wmDepth == -1:
                    if troi[2] == 'N':
                        bSpeech.append(0)
                    else:
                        bSpeech.append(1)

                    roiNames.append("%s_%s" % (hemi, troi[0]))

    #=== Check that the dimensions of all mask images match ===#
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)

    #=== Load the dtifit_* files and extract the measures ===#
    nROIs = len(roiNames)
    assert (len(bSpeech) == nROIs)

    tensMeas = {}

    mask_shape = np.unique(mask_shapes)

    check_dir(dmriDir, logFN=logFileName)
    for (i0, measName) in enumerate(TENSOR_MEASURES):
        tensMeas[measName] = np.zeros([nROIs, nDepths])

        for (i1, t_depth) in enumerate(wmDepths):
            if t_depth == -1:
                info_log("Extracting tensor measure %s from gray matter" \
                         % measName)
            else:
                info_log(
                    "Extracting tensor measure %s from %d-mm deep white matter"
                    % (measName, t_depth))

            assert (len(nzIdx[i1]) == nROIs)

            for (i2, troi) in enumerate(roiNames):
                measImg = os.path.join(dmriDir, \
                                       "dtifit_%s.nii.gz" % measName)
                check_file(measImg, logFN=logFileName)

                t_img = nb.load(measImg)
                if not list(mask_shape[0]) == list(np.shape(t_img)):
                    error_log("The diffusion tensor measure volume %s (%s) does not have a dimension that matches those of the mask files" \
                              % (measImg, measName))

                t_img_dat = np.ndarray.flatten(t_img.get_data())

                tensMeas[measName][i2, i1] = \
                                       np.mean(t_img_dat[nzIdx[i1][i2]])

    #=== Write data to file ===#
    from scipy.io import savemat
    res = {
        "roiNames": roiNames,
        "bSpeech": bSpeech,
        "parcName": parcName,
        "maskShape": mask_shape,
        "wmDepths": wmDepths,
        "tensMeas": tensMeas
    }

    savemat(tensMeasMatFN, res)
    check_file(tensMeasMatFN, logFN=logFileName)

    info_log(
        "Tensor measures (%d types) and associated data were saved at: %s" %
        (len(TENSOR_MEASURES), tensMeasMatFN),
        logFN=logFileName)
Ejemplo n.º 6
0
def get_dpath_track_data(trackDir):
    # List of data fields to extract
    dataFlds = ["Count", "Volume", 
                "Len_Min", "Len_Max", "Len_Avg", "Len_Center", 
                "AD_Avg", "AD_Avg_Weight", "AD_Avg_Center", 
                "RD_Avg", "RD_Avg_Weight", "RD_Avg_Center", 
                "MD_Avg", "MD_Avg_Weight", "MD_Avg_Center", 
                "FA_Avg", "FA_Avg_Weight", "FA_Avg_Center"]

    dat = {}

    import os
    from scai_utils import check_dir, check_file, read_text_file, info_log

    check_dir(trackDir)
    
    # Read from pathstats.overall.txt
    pso = os.path.join(trackDir, "pathstats.overall.txt")
    check_file(pso)

    txt = read_text_file(pso)

    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        
        if t_line.startswith("#"):
            continue

        t_items = t_line.split(" ")
        if len(t_items) == 2:
            if dataFlds.count(t_items[0]) == 1:
                dat[t_items[0]] = float(t_items[1])
                
    # Check for completeness
    for (i0, t_fld) in enumerate(dataFlds):
        if not (t_fld in dat):
            info_log("WARNING: could not find data field %s in file %s" % \
                     (t_fld, pso), bWarn=True)

    # Read from pathstats.byvoxel.txt
    vso = os.path.join(trackDir, "pathstats.byvoxel.txt")
    check_file(vso)

    txt = read_text_file(vso)

    import numpy as np
    bvDat = {}
    cols = {}
    
    # Determine the number of data points
    npts = 0
    lidx0 = -1
    bFoundHeader = False
    for (i0, t_line) in enumerate(txt):
        t_line = t_line.strip()
        if len(t_line) == 0:
            continue
        
        if not (t_line.startswith("#") or t_line.startswith("x")):
            npts += 1
            if lidx0 == -1:
                lidx0 = i0
            
        if t_line.startswith("x"):
            bFoundHeader = True
            t_items = t_line.split(" ")
            cols["x"] = t_items.index("x")
            cols["y"] = t_items.index("y")
            cols["z"] = t_items.index("z")
            cols["AD"] = t_items.index("AD")
            cols["RD"] = t_items.index("RD")
            cols["MD"] = t_items.index("MD")
            cols["FA"] = t_items.index("FA")
            cols["AD_Avg"] = t_items.index("AD_Avg")
            cols["RD_Avg"] = t_items.index("RD_Avg")
            cols["MD_Avg"] = t_items.index("MD_Avg")
            cols["FA_Avg"] = t_items.index("FA_Avg")
            
    if not bFoundHeader:
        raise Exception, "Cannot find header column in file %s" % vso


    txt = txt[lidx0 : lidx0 + npts]
    #print(txt)

    # Allocate space
    bvDat["x"] = np.zeros(npts)
    bvDat["y"] = np.zeros(npts)
    bvDat["z"] = np.zeros(npts)
    bvDat["AD"] = np.zeros(npts)
    bvDat["RD"] = np.zeros(npts)
    bvDat["MD"] = np.zeros(npts)
    bvDat["FA"] = np.zeros(npts)
    bvDat["AD_Avg"] = np.zeros(npts)
    bvDat["RD_Avg"] = np.zeros(npts)
    bvDat["MD_Avg"] = np.zeros(npts)
    bvDat["FA_Avg"] = np.zeros(npts)

    keys = bvDat.keys()

    for (i0, t_line) in enumerate(txt):
        t_items = t_line.split(" ")
        
        for t_fld in keys:
            bvDat[t_fld][i0] = float(t_items[cols[t_fld]])

    dat["byVoxel"] = bvDat

    return dat
Ejemplo n.º 7
0
def generate_conn_mat(roiList,
                      sc_roiList,
                      parcTypeDir,
                      parcTracksDir,
                      hemi,
                      arg_bSpeech,
                      maskType,
                      connFN,
                      logFN=None):
    import os
    import sys
    import numpy as np
    import nibabel as nb
    from scai_utils import check_file, check_dir, info_log, error_log

    bSC = sc_roiList != None  # Subcortical mode flag

    # Process cortical ROIs (this is needed for both SC and C matrix types)
    mask_shapes = []
    roiNames = []
    nzIdx = []
    bSpeech = []
    for (i0, troi) in enumerate(roiList):
        targROI = troi[0]
        maskFN = os.path.join(parcTypeDir, \
                              "%s_%s.diff.nii.gz" % (hemi, targROI))
        check_file(maskFN, logFN=logFN)

        t_img = nb.load(maskFN)
        t_img_dat = t_img.get_data()

        mask_shapes.append(np.shape(t_img_dat))

        t_img_dat = np.ndarray.flatten(t_img_dat)

        nzIdx.append(np.nonzero(t_img_dat)[0])
        roiNames.append(troi[0])
        if troi[2] == 'N':
            bSpeech.append(0)
        else:
            bSpeech.append(1)

    roiNames = np.array(roiNames)
    bSpeech = np.array(bSpeech)
    nzIdx = np.array(nzIdx)
    if arg_bSpeech:
        roiNames = roiNames[np.nonzero(bSpeech)[0]]
        nzIdx = nzIdx[np.nonzero(bSpeech)[0]]

    #print(roiNames) # DEBUG
    #print(bSpeech) # DEBUG

    # Process subcortical ROIs
    if bSC:
        parcSCDir = os.path.join(os.path.split(parcTypeDir)[0], "subcort")
        check_dir(parcSCDir)

        sc_roiNames = []
        sc_nzIdx = []
        for (i0, troi) in enumerate(sc_roiList):
            if (hemi == "lh" and troi.startswith("Left-")) or \
               (hemi == "rh" and troi.startswith("Right")):
                sc_roiNames.append(troi)

                maskFN = os.path.join(parcSCDir, \
                                      "%s.diff.nii.gz" % (troi))
                check_file(maskFN, logFN=logFN)

                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)

                sc_nzIdx.append(np.nonzero(t_img_dat)[0])
                #print(sc_nzIdx[-1]) # DEBUG
                #print(maskFN) # DEBUG

        sc_roiNames = np.array(sc_roiNames)
        sc_nzIdx = np.array(sc_nzIdx)

        #print(sc_roiNames) # DEBUG

    nROIs = len(roiNames)
    assert (len(nzIdx) == nROIs)
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)
    imgShape = np.unique(mask_shapes)[0]

    if bSC:
        nROIs_sc = len(sc_roiNames)

    #=== Check the completion of seed-only probtrackx ===#
    #===     and calculate the conn matrix ===#
    if not bSC:
        d1_roiNames = roiNames
        d2_roiNames = roiNames
    else:
        d1_roiNames = sc_roiNames
        d2_roiNames = np.array(list(sc_roiNames) + list(roiNames))

    connMat = np.zeros([len(d1_roiNames), len(d2_roiNames)])

    #print(d2_roiNames) # DEBUG
    #print(len(connMat)) # DEBUG
    #print(len(connMat[0])) # DEBUG

    #print(parcTracksDir) # DEBUG

    if bSC:
        tmp_dir = os.path.split(parcTracksDir)[1]
        parcTracksSCDir = os.path.split(os.path.split(parcTracksDir)[0])[0]
        parcTracksSCDir = os.path.join(parcTracksSCDir, "tracks_sc", tmp_dir)
        #print(parcTracksSCDir) # DEBUG
        check_dir(parcTracksSCDir)

    for (i0, troi) in enumerate(d1_roiNames):
        seedROI = troi
        if not bSC:
            trackResDir = os.path.join(parcTracksDir,
                                       "%s_%s_%s" % \
                                       (hemi, seedROI, maskType))
        else:
            trackResDir = os.path.join(parcTracksSCDir, seedROI)

        check_probtrackx_complete(trackResDir,
                                  "seedOnly",
                                  doSeedNorm=True,
                                  doSize=True,
                                  logFN=logFN)

        fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz")
        t_img = nb.load(fdt_norm)
        t_img_dat = t_img.get_data()

        assert (list(np.shape(t_img_dat)) == list(imgShape))
        t_img_dat = np.ndarray.flatten(t_img_dat)

        for (i1, troi1) in enumerate(d2_roiNames):
            if not bSC:
                connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1]])
            else:
                if i1 < nROIs_sc:
                    connMat[i0, i1] = np.mean(t_img_dat[sc_nzIdx[i1]])
                else:
                    connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1 - nROIs_sc]])

    #=== Make symmetric ===#
    if not bSC:
        connMat = 0.5 * (connMat + connMat.T)

    #print(connMat) ## DEBUG

    #=== Write result .mat file ===#
    from scipy.io import savemat
    if not bSC:
        res = {"roiNames": roiNames, "connMat": connMat}
    else:
        res = {
            "d1_roiNames": d1_roiNames,
            "d2_roiNames": d2_roiNames,
            "connMat": connMat
        }

    savemat(connFN, res)
    print("connFN = " + connFN)
    check_file(connFN, logFN=logFN)

    info_log("Connectivity matrix and associated data were saved at: %s" \
             % (connFN),
             logFN=logFN)
    sID = args.subjID.replace("MRI_", "")

    if machineSettings.keys().count(hostName) == 0:
        info_log("Cannot find host name %s in machineSettings. _default_ settings will be used" % hostName)
        hostName = "_default_"
        
    #=== Set up some paths ===#
    dataDir = machineSettings[hostName]["dataDir"]
    batchDataDir = machineSettings[hostName]["batchDataDir"]

    fsDataDir = machineSettings[hostName]["fsDataDir"]

    sDataDir = os.path.join(dataDir, sID)
    sBatchDataDir = os.path.join(batchDataDir, sID)
    
    check_dir(dataDir)
    check_dir(sDataDir)
    check_dir(sBatchDataDir, bCreate=True)
    check_dir(fsDataDir)

    boldDir = os.path.join(sDataDir, "bold")
    check_dir(boldDir)

    #=== Look for the fmri_model.mat and fmri_contrasts.mat files ===#
    modelMat = os.path.join(sDataDir, "fmri_model.mat")
    check_file(modelMat)
    info_log("modelMat = %s" % modelMat)

    contrMat = os.path.join(sDataDir, "fmri_contrasts.mat")
    check_file(contrMat)
    info_log("contrMat = %s" % contrMat)
Ejemplo n.º 9
0
def run_probtrackx(seedMask,
                   targMask,
                   bedpBase,
                   brainMask,
                   outDir,
                   doSeedNorm=True,
                   doSize=True,
                   doTargMaskedFDT=True,
                   ccStop=False,
                   bRedo=False,
                   logFN=None):
    #=========================================================#
    # Mode 1: from seed to targ
    #         Specify both seedMask and targMask
    #
    # Mode 2: from seed to all
    #         Specify only seedMask; set targMask=None
    #
    # Options:
    #         ccStop: Use corpus callosum stop mask
    #
    #=========================================================#
    import os
    from scai_utils import check_file, check_dir, check_bin_path, \
                           saydo, cmd_stdout, info_log, error_log
    from mri_utils import nz_voxels

    #== Get seed and targ nvox ==#
    check_file(seedMask, logFN=logFN)

    (seed_nVoxels, seed_mm3) = nz_voxels(seedMask)
    seed_nVoxels = float(seed_nVoxels)
    #assert(seed_nVoxels > 0)

    if targMask != None:
        check_file(targMask, logFN=logFN)

        (targ_nVoxels, targ_mm3) = nz_voxels(targMask)
        targ_nVoxels = float(targ_nVoxels)
        assert (targ_nVoxels > 0)

    check_bin_path("probtrackx", logFN=logFN)

    check_dir(outDir, logFN=logFN)

    if targMask != None:
        #= Prepare waypoint file =#
        wpfn = os.path.join(outDir, "waypoints.txt")
        wptext = os.path.abspath(targMask) + "\n"

        wpf = open(wpfn, "w")
        wpf.write(wptext)
        wpf.close()
        check_file(wpfn, logFN=logFN)

    cmd = 'probtrackx --mode=seedmask -x %s ' % seedMask + \
          '-s %s ' % bedpBase + \
          '-m %s ' % brainMask + \
          '-l -c 0.2 -S 2000 --steplength=0.5 ' + \
          '-P 5000 ' + \
          '--forcedir --opd --pd --dir=%s ' % outDir

    if targMask != None:
        cmd += "--stop=%s --waypoints=%s " % (targMask, wpfn)

    fdt_paths_fn = os.path.join(outDir, "fdt_paths.nii.gz")

    #== Get the size of fdt_paths.nii.gz. If the size is zero, start over. ==#

    if not os.path.isfile(fdt_paths_fn) \
            or os.path.getsize(fdt_paths_fn) <= 1 \
            or bRedo:
        saydo(cmd, logFN=logFN)

    #== Check for probtrackx completion ==#
    check_file(fdt_paths_fn, logFN=logFN)

    #== Save probtrackx command ==#
    cmd_fn = os.path.join(outDir, "command.txt")
    cmd_f = open(cmd_fn, "wt")
    cmd_f.write("%s\n" % cmd)
    cmd_f.close()
    check_file(cmd_fn, logFN=logFN)

    #== Generate seed size-normalized fdt_paths ==#
    fdt_paths_norm_fn = os.path.join(outDir, "fdt_paths_norm.nii.gz")
    check_bin_path("fslmaths", logFN=logFN)

    norm_cmd = "fslmaths -dt float %s -div %d %s -odt float" % \
               (fdt_paths_fn, seed_nVoxels, fdt_paths_norm_fn)
    if not os.path.isfile(fdt_paths_norm_fn) or bRedo:
        saydo(norm_cmd, logFN=logFN)

    check_file(fdt_paths_norm_fn, logFN=logFN)

    if doSize:
        #== Write to seed size file ==#
        seed_size_fn = os.path.join(outDir, 'seed_size.txt')
        seed_size_f = open(seed_size_fn, 'w')
        seed_size_f.write("%d %f" % (int(seed_nVoxels), seed_mm3))
        seed_size_f.close()
        check_file(seed_size_fn, logFN=logFN)

        info_log("INFO: Saved seed size data to file: %s" % seed_size_fn,
                 logFN=logFN)

        if targMask != None:
            #== Write to targ size file ==#
            targ_size_fn = os.path.join(outDir, 'targ_size.txt')
            targ_size_f = open(targ_size_fn, 'w')
            targ_size_f.write("%d %f" % (int(targ_nVoxels), targ_mm3))
            targ_size_f.close()
            check_file(targ_size_fn, logFN=logFN)

            info_log("INFO: Saved targ size data to file: %s" % targ_size_fn,
                     logFN=logFN)

    if (targMask != None) and doTargMaskedFDT:
        #== Get target masked tract density ==#
        check_bin_path("fslstats", logFN=logFN)
        (so, se) = cmd_stdout("fslstats %s -k %s -m" \
                              % (fdt_paths_norm_fn, targMask))
        assert (len(se) == 0)
        so = so.split()
        assert (len(so) >= 1)
        targ_masked_norm_fdt = float(so[0])

        targ_masked_norm_fdt_fn = \
            os.path.join(outDir, "targ_masked_norm_fdt.txt")
        tmnff = open(targ_masked_norm_fdt_fn, "wt")
        tmnff.write("%f" % targ_masked_norm_fdt)
        tmnff.close()

        check_file(targ_masked_norm_fdt_fn, logFN=logFN)

        info_log("INFO: Saved target-masked normalized FDT value tofile: %s" \
                 % targ_masked_norm_fdt_fn,
                 logFN=logFN)
Ejemplo n.º 10
0
                    help="Functional activation (sig) mid value (e.g., 5)")

    if len(sys.argv) == 1:
        ap.print_help()
        sys.exit(1)

    args = ap.parse_args()

    if args.type == "both":
        types = ["sf10", "vol"]
    else:
        types = [args.type]

    check_bin_path(CONVERT_BIN)

    check_dir(CON_IMG_DIR)

    l2Base = os.path.abspath(L2_BATCH_BASE)
    check_dir(l2Base)
    
    stitchRes = {}
    for t_type in types:
        conDir = os.path.join(l2Base, 
                              "%s_%s_%s" % (args.grp, args.contr, t_type))
        check_dir(conDir)

        stitchRes[t_type] = {}

        if t_type.startswith("sf"):
            #=== OSGM and BGC ===#
            if args.grp == "ALL":
Ejemplo n.º 11
0
def calculate_roi_tensor_measures(parcName, roiList, wmDepths,
                                  TENSOR_MEASURES, HEMIS,
                                  dmriDir, annotDir, 
                                  tensMeasMatFN, logFileName):
    #=== Load masks (gm and wm of different depths) ===#
    import os
    import nibabel as nb
    import numpy as np
    from scai_utils import check_file, check_dir, info_log, error_log
    
    mask_shapes = []
        
    nDepths = len(wmDepths)
    roiNames = []
    nzIdx = []
    for i0 in range(nDepths):
        nzIdx.append([])
            
    bSpeech = []

    parcDir = os.path.join(annotDir, parcName)
    for (i0, wmDepth) in enumerate(wmDepths):
        if wmDepth == -1:
            parcTypeDir = os.path.join(parcDir, "gm")
            info_log("Loading gray-matter masks")
        else:
            parcTypeDir = os.path.join(parcDir, "wm%dmm" % wmDepth)
            info_log("Loading white-matter masks of %d-mm depth" \
                     % wmDepth)
            
        for (i1, troi) in enumerate(roiList):
            for (i2, hemi) in enumerate(HEMIS):
                maskFN = os.path.join(parcTypeDir, \
                                      "%s_%s.diff.nii.gz" % (hemi, troi[0]))
                check_file(maskFN, logFN=logFileName)
        
                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)
                nzIdx[i0].append(np.nonzero(t_img_dat)[0])

                if wmDepth == -1:
                    if troi[2] == 'N':
                        bSpeech.append(0)
                    else:
                        bSpeech.append(1)
                        
                    roiNames.append("%s_%s" % (hemi, troi[0]))

    #=== Check that the dimensions of all mask images match ===#
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files",
                  logFN=logFN)

    #=== Load the dtifit_* files and extract the measures ===#
    nROIs = len(roiNames)
    assert(len(bSpeech) == nROIs)
        
    tensMeas = {}

    mask_shape = np.unique(mask_shapes)

    check_dir(dmriDir, logFN=logFileName)
    for (i0, measName) in enumerate(TENSOR_MEASURES):
        tensMeas[measName] = np.zeros([nROIs, nDepths])

        for (i1, t_depth) in enumerate(wmDepths):
            if t_depth == -1:
                info_log("Extracting tensor measure %s from gray matter" \
                         % measName)
            else:
                info_log("Extracting tensor measure %s from %d-mm deep white matter" % (measName, t_depth))
                
            assert(len(nzIdx[i1]) == nROIs)

            for (i2, troi) in enumerate(roiNames):
                measImg = os.path.join(dmriDir, \
                                       "dtifit_%s.nii.gz" % measName)
                check_file(measImg, logFN=logFileName)

                t_img = nb.load(measImg)
                if not list(mask_shape[0]) == list(np.shape(t_img)):
                    error_log("The diffusion tensor measure volume %s (%s) does not have a dimension that matches those of the mask files" \
                              % (measImg, measName))
                    
                t_img_dat = np.ndarray.flatten(t_img.get_data())

                tensMeas[measName][i2, i1] = \
                                       np.mean(t_img_dat[nzIdx[i1][i2]])

    #=== Write data to file ===#
    from scipy.io import savemat
    res = {"roiNames": roiNames,
           "bSpeech": bSpeech,
           "parcName": parcName,
           "maskShape": mask_shape,
           "wmDepths": wmDepths, 
           "tensMeas": tensMeas}

    savemat(tensMeasMatFN, res)
    check_file(tensMeasMatFN, logFN=logFileName)

    info_log("Tensor measures (%d types) and associated data were saved at: %s"
             % (len(TENSOR_MEASURES), tensMeasMatFN),
             logFN=logFileName)
Ejemplo n.º 12
0
def generate_conn_mat(roiList, sc_roiList, 
                      parcTypeDir, parcTracksDir, hemi, 
                      arg_bSpeech, maskType, connFN, logFN=None):    
    import os
    import sys
    import numpy as np
    import nibabel as nb
    from scai_utils import check_file, check_dir, info_log, error_log

    bSC = sc_roiList != None # Subcortical mode flag

    # Process cortical ROIs (this is needed for both SC and C matrix types)
    mask_shapes = []
    roiNames = []
    nzIdx = []
    bSpeech = []
    for (i0, troi) in enumerate(roiList):
        targROI = troi[0]
        maskFN = os.path.join(parcTypeDir, \
                              "%s_%s.diff.nii.gz" % (hemi, targROI))
        check_file(maskFN, logFN=logFN)
        
        t_img = nb.load(maskFN)
        t_img_dat = t_img.get_data()

        mask_shapes.append(np.shape(t_img_dat))
            
        t_img_dat = np.ndarray.flatten(t_img_dat)
            
        nzIdx.append(np.nonzero(t_img_dat)[0])
        roiNames.append(troi[0])
        if troi[2] == 'N':
            bSpeech.append(0)
        else:
            bSpeech.append(1)

    roiNames = np.array(roiNames)
    bSpeech = np.array(bSpeech)
    nzIdx = np.array(nzIdx)
    if arg_bSpeech:
        roiNames = roiNames[np.nonzero(bSpeech)[0]]
        nzIdx = nzIdx[np.nonzero(bSpeech)[0]]

    #print(roiNames) # DEBUG
    #print(bSpeech) # DEBUG

    # Process subcortical ROIs
    if bSC:
        parcSCDir = os.path.join(os.path.split(parcTypeDir)[0], "subcort")
        check_dir(parcSCDir)
        
        sc_roiNames = []
        sc_nzIdx = []
        for (i0, troi) in enumerate(sc_roiList):
            if (hemi == "lh" and troi.startswith("Left-")) or \
               (hemi == "rh" and troi.startswith("Right")):
                sc_roiNames.append(troi)

                maskFN = os.path.join(parcSCDir, \
                                      "%s.diff.nii.gz" % (troi))
                check_file(maskFN, logFN=logFN)
                
                t_img = nb.load(maskFN)
                t_img_dat = t_img.get_data()

                mask_shapes.append(np.shape(t_img_dat))

                t_img_dat = np.ndarray.flatten(t_img_dat)

                sc_nzIdx.append(np.nonzero(t_img_dat)[0])
                #print(sc_nzIdx[-1]) # DEBUG
                #print(maskFN) # DEBUG

        sc_roiNames = np.array(sc_roiNames)
        sc_nzIdx = np.array(sc_nzIdx)
        
        #print(sc_roiNames) # DEBUG
        

    nROIs = len(roiNames)
    assert(len(nzIdx) == nROIs)
    if len(np.unique(mask_shapes)) != 1:
        error_log("Non-unique matrix size among the mask files", logFN=logFN)  
    imgShape = np.unique(mask_shapes)[0]

    if bSC:
        nROIs_sc = len(sc_roiNames)

    #=== Check the completion of seed-only probtrackx ===#
    #===     and calculate the conn matrix ===#
    if not bSC:
        d1_roiNames = roiNames
        d2_roiNames = roiNames
    else:
        d1_roiNames = sc_roiNames
        d2_roiNames = np.array(list(sc_roiNames) + list(roiNames))

    connMat = np.zeros([len(d1_roiNames), len(d2_roiNames)])

    #print(d2_roiNames) # DEBUG
    #print(len(connMat)) # DEBUG
    #print(len(connMat[0])) # DEBUG

    #print(parcTracksDir) # DEBUG

    
    if bSC:
        tmp_dir = os.path.split(parcTracksDir)[1]
        parcTracksSCDir = os.path.split(os.path.split(parcTracksDir)[0])[0]
        parcTracksSCDir = os.path.join(parcTracksSCDir, "tracks_sc", tmp_dir)
        #print(parcTracksSCDir) # DEBUG
        check_dir(parcTracksSCDir)
        
    for (i0, troi) in enumerate(d1_roiNames):
        seedROI = troi
        if not bSC:
            trackResDir = os.path.join(parcTracksDir, 
                                       "%s_%s_%s" % \
                                       (hemi, seedROI, maskType))
        else:
            trackResDir = os.path.join(parcTracksSCDir, seedROI)
                                   
        check_probtrackx_complete(trackResDir, "seedOnly", 
                                  doSeedNorm=True, doSize=True,
                                  logFN=logFN)
        
        fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz")
        t_img = nb.load(fdt_norm)
        t_img_dat = t_img.get_data()
            
        assert(list(np.shape(t_img_dat)) == list(imgShape))
        t_img_dat = np.ndarray.flatten(t_img_dat)

        for (i1, troi1) in enumerate(d2_roiNames):
            if not bSC:
                connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1]])
            else:
                if i1 < nROIs_sc:
                    connMat[i0, i1] = np.mean(t_img_dat[sc_nzIdx[i1]])
                else:
                    connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1 - nROIs_sc]])

    #=== Make symmetric ===#
    if not bSC:
        connMat = 0.5 * (connMat + connMat.T)

    #print(connMat) ## DEBUG

    #=== Write result .mat file ===#
    from scipy.io import savemat
    if not bSC:
        res = {"roiNames": roiNames,
               "connMat": connMat}
    else:
        res = {"d1_roiNames": d1_roiNames,
               "d2_roiNames": d2_roiNames,
               "connMat": connMat}
        
    savemat(connFN, res)
    print("connFN = " + connFN)
    check_file(connFN, logFN=logFN)
        
    info_log("Connectivity matrix and associated data were saved at: %s" \
             % (connFN),
             logFN=logFN)
Ejemplo n.º 13
0
def run_probtrackx(seedMask, targMask, bedpBase, brainMask, outDir, 
                   doSeedNorm=True, doSize=True, 
                   doTargMaskedFDT=True, 
                   ccStop=False, 
                   bRedo=False,
                   logFN=None):
#=========================================================#
# Mode 1: from seed to targ
#         Specify both seedMask and targMask
#
# Mode 2: from seed to all
#         Specify only seedMask; set targMask=None
#
# Options:
#         ccStop: Use corpus callosum stop mask
#
#=========================================================#
    import os
    from scai_utils import check_file, check_dir, check_bin_path, \
                           saydo, cmd_stdout, info_log, error_log
    from mri_utils import nz_voxels

    #== Get seed and targ nvox ==#
    check_file(seedMask, logFN=logFN)

    (seed_nVoxels, seed_mm3) = nz_voxels(seedMask)
    seed_nVoxels = float(seed_nVoxels)
    #assert(seed_nVoxels > 0)
    
    if targMask != None:
        check_file(targMask, logFN=logFN)
        
        (targ_nVoxels, targ_mm3) = nz_voxels(targMask)
        targ_nVoxels = float(targ_nVoxels)
        assert(targ_nVoxels > 0)
        
    check_bin_path("probtrackx", logFN=logFN)
        
    check_dir(outDir, logFN=logFN)

    if targMask != None:
        #= Prepare waypoint file =#
        wpfn = os.path.join(outDir, "waypoints.txt")
        wptext = os.path.abspath(targMask) + "\n"

        wpf = open(wpfn, "w")
        wpf.write(wptext)
        wpf.close()
        check_file(wpfn, logFN=logFN)
        
    cmd = 'probtrackx --mode=seedmask -x %s ' % seedMask + \
          '-s %s ' % bedpBase + \
          '-m %s ' % brainMask + \
          '-l -c 0.2 -S 2000 --steplength=0.5 ' + \
          '-P 5000 ' + \
          '--forcedir --opd --pd --dir=%s ' % outDir

    if targMask != None:
        cmd += "--stop=%s --waypoints=%s " % (targMask, wpfn)
        
    fdt_paths_fn = os.path.join(outDir, "fdt_paths.nii.gz")
    
    #== Get the size of fdt_paths.nii.gz. If the size is zero, start over. ==#

    if not os.path.isfile(fdt_paths_fn) \
            or os.path.getsize(fdt_paths_fn) <= 1 \
            or bRedo:
        saydo(cmd, logFN=logFN)

    #== Check for probtrackx completion ==#
    check_file(fdt_paths_fn, logFN=logFN)

    #== Save probtrackx command ==#
    cmd_fn = os.path.join(outDir, "command.txt")
    cmd_f = open(cmd_fn, "wt")
    cmd_f.write("%s\n" % cmd)
    cmd_f.close()
    check_file(cmd_fn, logFN=logFN)

    #== Generate seed size-normalized fdt_paths ==#
    fdt_paths_norm_fn = os.path.join(outDir, "fdt_paths_norm.nii.gz")
    check_bin_path("fslmaths", logFN=logFN)

    norm_cmd = "fslmaths -dt float %s -div %d %s -odt float" % \
               (fdt_paths_fn, seed_nVoxels, fdt_paths_norm_fn)
    if not os.path.isfile(fdt_paths_norm_fn) or bRedo:
        saydo(norm_cmd, logFN=logFN)
        
    check_file(fdt_paths_norm_fn, logFN=logFN)

    if doSize:
        #== Write to seed size file ==#
        seed_size_fn = os.path.join(outDir, 'seed_size.txt')
        seed_size_f = open(seed_size_fn, 'w')
        seed_size_f.write("%d %f" % (int(seed_nVoxels), seed_mm3))
        seed_size_f.close()
        check_file(seed_size_fn, logFN=logFN)

        info_log("INFO: Saved seed size data to file: %s" % seed_size_fn, 
                 logFN=logFN)

        if targMask != None:
            #== Write to targ size file ==#
            targ_size_fn = os.path.join(outDir, 'targ_size.txt')
            targ_size_f = open(targ_size_fn, 'w')
            targ_size_f.write("%d %f" % (int(targ_nVoxels), targ_mm3))
            targ_size_f.close()
            check_file(targ_size_fn, logFN=logFN)

            info_log("INFO: Saved targ size data to file: %s" % targ_size_fn,
                     logFN=logFN)

    if (targMask != None) and doTargMaskedFDT:
        #== Get target masked tract density ==#
        check_bin_path("fslstats", logFN=logFN)
        (so, se) = cmd_stdout("fslstats %s -k %s -m" \
                              % (fdt_paths_norm_fn, targMask))
        assert(len(se) == 0)
        so = so.split()
        assert(len(so) >= 1)
        targ_masked_norm_fdt = float(so[0])

        targ_masked_norm_fdt_fn = \
            os.path.join(outDir, "targ_masked_norm_fdt.txt")
        tmnff = open(targ_masked_norm_fdt_fn, "wt")
        tmnff.write("%f" % targ_masked_norm_fdt)
        tmnff.close()

        check_file(targ_masked_norm_fdt_fn, logFN=logFN)

        info_log("INFO: Saved target-masked normalized FDT value tofile: %s" \
                 % targ_masked_norm_fdt_fn,
                 logFN=logFN)