def format_bvals_bvecs(bvalsFN, bvecsFN, logFN=None): from scai_utils import cmd_stdout, info_log, error_log import numpy as np #=== Check the format of bvals and bvecs ===# import numpy as np (so, se) = cmd_stdout("wc -l %s" % bvecsFN) if len(se) > 0 or len(so) == 0: error_log("Cannot perform wc on bvecs file: %s" % bvecsFN, logFN=logFN) ln = int(so.split(" ")[0]) info_log("ln = %d" % ln, logFN=logFN) if ln < 3: error_log("Unrecognized format in bvecs file: %s" % bvecsFN, \ logFN=logFN) elif ln == 3: #== Convert bvecs file ==# bvecs = np.genfromtxt(bvecsFN) assert(len(bvecs) == ln) bvecs = bvecs.T np.savetxt(bvecsFN, bvecs, fmt="%.15f") bvecs = np.genfromtxt(bvecsFN) lbv = len(bvecs) assert(lbv > 3) info_log("INFO: Swapped rows and columns in bvecs file: %s\n" \ % bvecsFN, logFN=logFN) #== Convert bvals file ==# bvals = np.genfromtxt(bvalsFN).T np.savetxt(bvalsFN, bvals, fmt="%.15f") info_log("INFO: Swapped rows and columns in bvecs file: %s\nc" \ % bvecsFN, logFN=logFN)
def format_bvals_bvecs(bvalsFN, bvecsFN, logFN=None): from scai_utils import cmd_stdout, info_log, error_log import numpy as np #=== Check the format of bvals and bvecs ===# import numpy as np (so, se) = cmd_stdout("wc -l %s" % bvecsFN) if len(se) > 0 or len(so) == 0: error_log("Cannot perform wc on bvecs file: %s" % bvecsFN, logFN=logFN) ln = int(so.split(" ")[0]) info_log("ln = %d" % ln, logFN=logFN) if ln < 3: error_log("Unrecognized format in bvecs file: %s" % bvecsFN, \ logFN=logFN) elif ln == 3: #== Convert bvecs file ==# bvecs = np.genfromtxt(bvecsFN) assert (len(bvecs) == ln) bvecs = bvecs.T np.savetxt(bvecsFN, bvecs, fmt="%.15f") bvecs = np.genfromtxt(bvecsFN) lbv = len(bvecs) assert (lbv > 3) info_log("INFO: Swapped rows and columns in bvecs file: %s\n" \ % bvecsFN, logFN=logFN) #== Convert bvals file ==# bvals = np.genfromtxt(bvalsFN).T np.savetxt(bvalsFN, bvals, fmt="%.15f") info_log("INFO: Swapped rows and columns in bvecs file: %s\nc" \ % bvecsFN, logFN=logFN)
def generate_design_matrix(isAWS, matFN, bReverse=False): # Input arguments: # isAWS: vector of 0's and 1's # matFN: output design matrix .mat file name # bReverse: if AWS should be set to -1 and ANS to +1 (default: no) if len(isAWS) < 2: error_log("The design matrix cannot be generated because there are fewer than two subjects") if not matFN.endswith(".mat"): error_log("The input .mat file name %s has a wrong extension name" % matFN) X_line = 'X = [' for (i0, t_isAWS) in enumerate(isAWS): t_x = (float(t_isAWS) * 2.0 - 1.0) if bReverse: X_line += "%.1f; " % -t_x else: X_line += "%.1f; " % t_x X_line = X_line[:-2] + "];\n" X_line += "X = [ones(%d , 1), X];\n" % len(isAWS) X_line += "save('%s', 'X', '-v4');\n" % os.path.abspath(matFN) print(X_line) # DEBUG (t_path, t_fn) = os.path.split(os.path.abspath(matFN)) mScriptGenX = os.path.join(t_path, 'gen_%s' % t_fn.replace(".mat", ".m")) mScriptGenX_f = open(mScriptGenX, "wt") mScriptGenX_f.write(X_line) mScriptGenX_f.close() check_file(mScriptGenX) matlabCmd = "%s -nosplash -nodesktop -r 'run %s; exit; '" % \ (MATLAB_BIN, mScriptGenX) saydo(matlabCmd) check_file(matFN)
savemat(matFile, intRes) info_log("Saved intermediate results to file: %s" % matFile) info_log("Loaded intermediate results from file: %s" % matFile) intRes = loadmat(matFile) #== Verify the intermediate results ==# bVerified = True; bVerified = bVerified and (list(intRes["sIDs"]) == sIDs) bVerified = bVerified and (list(intRes["grps"]) == grps) if bVerified: info_log("Intermediate results verified.") else: error_log("Intermediate results veroficiation failed. Use --redo option to generate valid intermediate results.") morphInfo = intRes["morphInfo"] uniqueROIs = list(intRes["uniqueROIs"]) uniqueROIs = [x.strip() for x in uniqueROIs] area_mm2 = intRes["area_mm2"][0] #=== Test analysis ===# import matplotlib.pyplot as plt roi_a = "rh_vPMC" roi_b = "rh_PT" ridx_a = uniqueROIs.index(roi_a)
help="Force redo all steps (default: false)") if len(sys.argv) == 1: ap.print_help() sys.exit(0) args = ap.parse_args() #=== Input sanity check ===# check_dir(args.batchBase) check_dir(args.fsDir) #=== Check that SUBJECTS_DIR matches input argument ===# envFSDir = os.path.abspath(os.getenv("SUBJECTS_DIR")) if envFSDir != os.path.abspath(args.fsDir): error_log("Input FreeSurfer SUBJECTS_DIR does not match environmental SUBECTS_DIR") #=== Get the list of subjects ===# if args.group == None or len(args.group) == 0: wc = "*_*" bAll = True else: wc = args.group + "*" bAll = False; ds = glob.glob(os.path.join(args.batchBase, wc)) if len(ds) == 0: raise Exception, "Cannot find subjects that match the selection criterion in directory: %s" % args.batchBase subjIDs = [] subjIsAWS = []
def calculate_roi_tensor_measures(parcName, roiList, wmDepths, TENSOR_MEASURES, HEMIS, dmriDir, annotDir, tensMeasMatFN, logFileName): #=== Load masks (gm and wm of different depths) ===# import os import nibabel as nb import numpy as np from scai_utils import check_file, check_dir, info_log, error_log mask_shapes = [] nDepths = len(wmDepths) roiNames = [] nzIdx = [] for i0 in range(nDepths): nzIdx.append([]) bSpeech = [] parcDir = os.path.join(annotDir, parcName) for (i0, wmDepth) in enumerate(wmDepths): if wmDepth == -1: parcTypeDir = os.path.join(parcDir, "gm") info_log("Loading gray-matter masks") else: parcTypeDir = os.path.join(parcDir, "wm%dmm" % wmDepth) info_log("Loading white-matter masks of %d-mm depth" \ % wmDepth) for (i1, troi) in enumerate(roiList): for (i2, hemi) in enumerate(HEMIS): maskFN = os.path.join(parcTypeDir, \ "%s_%s.diff.nii.gz" % (hemi, troi[0])) check_file(maskFN, logFN=logFileName) t_img = nb.load(maskFN) t_img_dat = t_img.get_data() mask_shapes.append(np.shape(t_img_dat)) t_img_dat = np.ndarray.flatten(t_img_dat) nzIdx[i0].append(np.nonzero(t_img_dat)[0]) if wmDepth == -1: if troi[2] == 'N': bSpeech.append(0) else: bSpeech.append(1) roiNames.append("%s_%s" % (hemi, troi[0])) #=== Check that the dimensions of all mask images match ===# if len(np.unique(mask_shapes)) != 1: error_log("Non-unique matrix size among the mask files", logFN=logFN) #=== Load the dtifit_* files and extract the measures ===# nROIs = len(roiNames) assert (len(bSpeech) == nROIs) tensMeas = {} mask_shape = np.unique(mask_shapes) check_dir(dmriDir, logFN=logFileName) for (i0, measName) in enumerate(TENSOR_MEASURES): tensMeas[measName] = np.zeros([nROIs, nDepths]) for (i1, t_depth) in enumerate(wmDepths): if t_depth == -1: info_log("Extracting tensor measure %s from gray matter" \ % measName) else: info_log( "Extracting tensor measure %s from %d-mm deep white matter" % (measName, t_depth)) assert (len(nzIdx[i1]) == nROIs) for (i2, troi) in enumerate(roiNames): measImg = os.path.join(dmriDir, \ "dtifit_%s.nii.gz" % measName) check_file(measImg, logFN=logFileName) t_img = nb.load(measImg) if not list(mask_shape[0]) == list(np.shape(t_img)): error_log("The diffusion tensor measure volume %s (%s) does not have a dimension that matches those of the mask files" \ % (measImg, measName)) t_img_dat = np.ndarray.flatten(t_img.get_data()) tensMeas[measName][i2, i1] = \ np.mean(t_img_dat[nzIdx[i1][i2]]) #=== Write data to file ===# from scipy.io import savemat res = { "roiNames": roiNames, "bSpeech": bSpeech, "parcName": parcName, "maskShape": mask_shape, "wmDepths": wmDepths, "tensMeas": tensMeas } savemat(tensMeasMatFN, res) check_file(tensMeasMatFN, logFN=logFileName) info_log( "Tensor measures (%d types) and associated data were saved at: %s" % (len(TENSOR_MEASURES), tensMeasMatFN), logFN=logFileName)
r_L1Dir = os.path.join(sID, "firstlevel_%s" % machineSettings[hostName]["modelName"]) r_func2Struct_dat = os.path.join(sID, "nii", "func2struct.bbr.dat") spmTViewCmd_vol = "CONTR=%s; tkmedit ${S} T1.mgz -surfs -overlay ${RHYBASE}/%s/spmT_${CONTR}.img -overlay-reg ${RHYBASE}/%s -fthresh 6 -fmid 9" \ % (strContr, r_L1Dir, r_func2Struct_dat) info_log("# Commands for viewing spmT for contrast #%d in the volume: " % (i0 + 1)) info_log("\t%s" % spmTViewCmd_vol) info_log("# Command for viewing spmT for contrast #%d on the surface: " \ % (i0 + 1)) for hemi in HEMIS: spmTViewCmd_surf = "CONTR=%s; HEMI=%s; tksurfer ${S} ${HEMI} inflated -gray -overlay ${RHYBASE}/%s/spmT_${CONTR}.img -ovelay-reg ${RHYBASE}/%s -fthresh 6 -fmid 9" \ % (strContr, hemi, r_L1Dir, r_func2Struct_dat) info_log("\t%s" % spmTViewCmd_surf) info_log(" ") #=== (Optional): Automatically run the batch commands ===% if args.runBatch != None and args.runBatch != "": if args.runBatch.lower() == "all": for (i0, cmd) in enumerate(batchCmds): saydo("%s; %s" % (cdCmd, cmd)) else: if batchSteps.count(args.runBatch) == 0: error_log("Unrecognized step: %s" % args.runBatch) else: saydo("%s; %s" % (cdCmd, batchCmds[batchSteps.index(args.runBatch)]))
def generate_conn_mat(roiList, sc_roiList, parcTypeDir, parcTracksDir, hemi, arg_bSpeech, maskType, connFN, logFN=None): import os import sys import numpy as np import nibabel as nb from scai_utils import check_file, check_dir, info_log, error_log bSC = sc_roiList != None # Subcortical mode flag # Process cortical ROIs (this is needed for both SC and C matrix types) mask_shapes = [] roiNames = [] nzIdx = [] bSpeech = [] for (i0, troi) in enumerate(roiList): targROI = troi[0] maskFN = os.path.join(parcTypeDir, \ "%s_%s.diff.nii.gz" % (hemi, targROI)) check_file(maskFN, logFN=logFN) t_img = nb.load(maskFN) t_img_dat = t_img.get_data() mask_shapes.append(np.shape(t_img_dat)) t_img_dat = np.ndarray.flatten(t_img_dat) nzIdx.append(np.nonzero(t_img_dat)[0]) roiNames.append(troi[0]) if troi[2] == 'N': bSpeech.append(0) else: bSpeech.append(1) roiNames = np.array(roiNames) bSpeech = np.array(bSpeech) nzIdx = np.array(nzIdx) if arg_bSpeech: roiNames = roiNames[np.nonzero(bSpeech)[0]] nzIdx = nzIdx[np.nonzero(bSpeech)[0]] #print(roiNames) # DEBUG #print(bSpeech) # DEBUG # Process subcortical ROIs if bSC: parcSCDir = os.path.join(os.path.split(parcTypeDir)[0], "subcort") check_dir(parcSCDir) sc_roiNames = [] sc_nzIdx = [] for (i0, troi) in enumerate(sc_roiList): if (hemi == "lh" and troi.startswith("Left-")) or \ (hemi == "rh" and troi.startswith("Right")): sc_roiNames.append(troi) maskFN = os.path.join(parcSCDir, \ "%s.diff.nii.gz" % (troi)) check_file(maskFN, logFN=logFN) t_img = nb.load(maskFN) t_img_dat = t_img.get_data() mask_shapes.append(np.shape(t_img_dat)) t_img_dat = np.ndarray.flatten(t_img_dat) sc_nzIdx.append(np.nonzero(t_img_dat)[0]) #print(sc_nzIdx[-1]) # DEBUG #print(maskFN) # DEBUG sc_roiNames = np.array(sc_roiNames) sc_nzIdx = np.array(sc_nzIdx) #print(sc_roiNames) # DEBUG nROIs = len(roiNames) assert (len(nzIdx) == nROIs) if len(np.unique(mask_shapes)) != 1: error_log("Non-unique matrix size among the mask files", logFN=logFN) imgShape = np.unique(mask_shapes)[0] if bSC: nROIs_sc = len(sc_roiNames) #=== Check the completion of seed-only probtrackx ===# #=== and calculate the conn matrix ===# if not bSC: d1_roiNames = roiNames d2_roiNames = roiNames else: d1_roiNames = sc_roiNames d2_roiNames = np.array(list(sc_roiNames) + list(roiNames)) connMat = np.zeros([len(d1_roiNames), len(d2_roiNames)]) #print(d2_roiNames) # DEBUG #print(len(connMat)) # DEBUG #print(len(connMat[0])) # DEBUG #print(parcTracksDir) # DEBUG if bSC: tmp_dir = os.path.split(parcTracksDir)[1] parcTracksSCDir = os.path.split(os.path.split(parcTracksDir)[0])[0] parcTracksSCDir = os.path.join(parcTracksSCDir, "tracks_sc", tmp_dir) #print(parcTracksSCDir) # DEBUG check_dir(parcTracksSCDir) for (i0, troi) in enumerate(d1_roiNames): seedROI = troi if not bSC: trackResDir = os.path.join(parcTracksDir, "%s_%s_%s" % \ (hemi, seedROI, maskType)) else: trackResDir = os.path.join(parcTracksSCDir, seedROI) check_probtrackx_complete(trackResDir, "seedOnly", doSeedNorm=True, doSize=True, logFN=logFN) fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz") t_img = nb.load(fdt_norm) t_img_dat = t_img.get_data() assert (list(np.shape(t_img_dat)) == list(imgShape)) t_img_dat = np.ndarray.flatten(t_img_dat) for (i1, troi1) in enumerate(d2_roiNames): if not bSC: connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1]]) else: if i1 < nROIs_sc: connMat[i0, i1] = np.mean(t_img_dat[sc_nzIdx[i1]]) else: connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1 - nROIs_sc]]) #=== Make symmetric ===# if not bSC: connMat = 0.5 * (connMat + connMat.T) #print(connMat) ## DEBUG #=== Write result .mat file ===# from scipy.io import savemat if not bSC: res = {"roiNames": roiNames, "connMat": connMat} else: res = { "d1_roiNames": d1_roiNames, "d2_roiNames": d2_roiNames, "connMat": connMat } savemat(connFN, res) print("connFN = " + connFN) check_file(connFN, logFN=logFN) info_log("Connectivity matrix and associated data were saved at: %s" \ % (connFN), logFN=logFN)
def calculate_roi_tensor_measures(parcName, roiList, wmDepths, TENSOR_MEASURES, HEMIS, dmriDir, annotDir, tensMeasMatFN, logFileName): #=== Load masks (gm and wm of different depths) ===# import os import nibabel as nb import numpy as np from scai_utils import check_file, check_dir, info_log, error_log mask_shapes = [] nDepths = len(wmDepths) roiNames = [] nzIdx = [] for i0 in range(nDepths): nzIdx.append([]) bSpeech = [] parcDir = os.path.join(annotDir, parcName) for (i0, wmDepth) in enumerate(wmDepths): if wmDepth == -1: parcTypeDir = os.path.join(parcDir, "gm") info_log("Loading gray-matter masks") else: parcTypeDir = os.path.join(parcDir, "wm%dmm" % wmDepth) info_log("Loading white-matter masks of %d-mm depth" \ % wmDepth) for (i1, troi) in enumerate(roiList): for (i2, hemi) in enumerate(HEMIS): maskFN = os.path.join(parcTypeDir, \ "%s_%s.diff.nii.gz" % (hemi, troi[0])) check_file(maskFN, logFN=logFileName) t_img = nb.load(maskFN) t_img_dat = t_img.get_data() mask_shapes.append(np.shape(t_img_dat)) t_img_dat = np.ndarray.flatten(t_img_dat) nzIdx[i0].append(np.nonzero(t_img_dat)[0]) if wmDepth == -1: if troi[2] == 'N': bSpeech.append(0) else: bSpeech.append(1) roiNames.append("%s_%s" % (hemi, troi[0])) #=== Check that the dimensions of all mask images match ===# if len(np.unique(mask_shapes)) != 1: error_log("Non-unique matrix size among the mask files", logFN=logFN) #=== Load the dtifit_* files and extract the measures ===# nROIs = len(roiNames) assert(len(bSpeech) == nROIs) tensMeas = {} mask_shape = np.unique(mask_shapes) check_dir(dmriDir, logFN=logFileName) for (i0, measName) in enumerate(TENSOR_MEASURES): tensMeas[measName] = np.zeros([nROIs, nDepths]) for (i1, t_depth) in enumerate(wmDepths): if t_depth == -1: info_log("Extracting tensor measure %s from gray matter" \ % measName) else: info_log("Extracting tensor measure %s from %d-mm deep white matter" % (measName, t_depth)) assert(len(nzIdx[i1]) == nROIs) for (i2, troi) in enumerate(roiNames): measImg = os.path.join(dmriDir, \ "dtifit_%s.nii.gz" % measName) check_file(measImg, logFN=logFileName) t_img = nb.load(measImg) if not list(mask_shape[0]) == list(np.shape(t_img)): error_log("The diffusion tensor measure volume %s (%s) does not have a dimension that matches those of the mask files" \ % (measImg, measName)) t_img_dat = np.ndarray.flatten(t_img.get_data()) tensMeas[measName][i2, i1] = \ np.mean(t_img_dat[nzIdx[i1][i2]]) #=== Write data to file ===# from scipy.io import savemat res = {"roiNames": roiNames, "bSpeech": bSpeech, "parcName": parcName, "maskShape": mask_shape, "wmDepths": wmDepths, "tensMeas": tensMeas} savemat(tensMeasMatFN, res) check_file(tensMeasMatFN, logFN=logFileName) info_log("Tensor measures (%d types) and associated data were saved at: %s" % (len(TENSOR_MEASURES), tensMeasMatFN), logFN=logFileName)
def generate_conn_mat(roiList, sc_roiList, parcTypeDir, parcTracksDir, hemi, arg_bSpeech, maskType, connFN, logFN=None): import os import sys import numpy as np import nibabel as nb from scai_utils import check_file, check_dir, info_log, error_log bSC = sc_roiList != None # Subcortical mode flag # Process cortical ROIs (this is needed for both SC and C matrix types) mask_shapes = [] roiNames = [] nzIdx = [] bSpeech = [] for (i0, troi) in enumerate(roiList): targROI = troi[0] maskFN = os.path.join(parcTypeDir, \ "%s_%s.diff.nii.gz" % (hemi, targROI)) check_file(maskFN, logFN=logFN) t_img = nb.load(maskFN) t_img_dat = t_img.get_data() mask_shapes.append(np.shape(t_img_dat)) t_img_dat = np.ndarray.flatten(t_img_dat) nzIdx.append(np.nonzero(t_img_dat)[0]) roiNames.append(troi[0]) if troi[2] == 'N': bSpeech.append(0) else: bSpeech.append(1) roiNames = np.array(roiNames) bSpeech = np.array(bSpeech) nzIdx = np.array(nzIdx) if arg_bSpeech: roiNames = roiNames[np.nonzero(bSpeech)[0]] nzIdx = nzIdx[np.nonzero(bSpeech)[0]] #print(roiNames) # DEBUG #print(bSpeech) # DEBUG # Process subcortical ROIs if bSC: parcSCDir = os.path.join(os.path.split(parcTypeDir)[0], "subcort") check_dir(parcSCDir) sc_roiNames = [] sc_nzIdx = [] for (i0, troi) in enumerate(sc_roiList): if (hemi == "lh" and troi.startswith("Left-")) or \ (hemi == "rh" and troi.startswith("Right")): sc_roiNames.append(troi) maskFN = os.path.join(parcSCDir, \ "%s.diff.nii.gz" % (troi)) check_file(maskFN, logFN=logFN) t_img = nb.load(maskFN) t_img_dat = t_img.get_data() mask_shapes.append(np.shape(t_img_dat)) t_img_dat = np.ndarray.flatten(t_img_dat) sc_nzIdx.append(np.nonzero(t_img_dat)[0]) #print(sc_nzIdx[-1]) # DEBUG #print(maskFN) # DEBUG sc_roiNames = np.array(sc_roiNames) sc_nzIdx = np.array(sc_nzIdx) #print(sc_roiNames) # DEBUG nROIs = len(roiNames) assert(len(nzIdx) == nROIs) if len(np.unique(mask_shapes)) != 1: error_log("Non-unique matrix size among the mask files", logFN=logFN) imgShape = np.unique(mask_shapes)[0] if bSC: nROIs_sc = len(sc_roiNames) #=== Check the completion of seed-only probtrackx ===# #=== and calculate the conn matrix ===# if not bSC: d1_roiNames = roiNames d2_roiNames = roiNames else: d1_roiNames = sc_roiNames d2_roiNames = np.array(list(sc_roiNames) + list(roiNames)) connMat = np.zeros([len(d1_roiNames), len(d2_roiNames)]) #print(d2_roiNames) # DEBUG #print(len(connMat)) # DEBUG #print(len(connMat[0])) # DEBUG #print(parcTracksDir) # DEBUG if bSC: tmp_dir = os.path.split(parcTracksDir)[1] parcTracksSCDir = os.path.split(os.path.split(parcTracksDir)[0])[0] parcTracksSCDir = os.path.join(parcTracksSCDir, "tracks_sc", tmp_dir) #print(parcTracksSCDir) # DEBUG check_dir(parcTracksSCDir) for (i0, troi) in enumerate(d1_roiNames): seedROI = troi if not bSC: trackResDir = os.path.join(parcTracksDir, "%s_%s_%s" % \ (hemi, seedROI, maskType)) else: trackResDir = os.path.join(parcTracksSCDir, seedROI) check_probtrackx_complete(trackResDir, "seedOnly", doSeedNorm=True, doSize=True, logFN=logFN) fdt_norm = os.path.join(trackResDir, "fdt_paths_norm.nii.gz") t_img = nb.load(fdt_norm) t_img_dat = t_img.get_data() assert(list(np.shape(t_img_dat)) == list(imgShape)) t_img_dat = np.ndarray.flatten(t_img_dat) for (i1, troi1) in enumerate(d2_roiNames): if not bSC: connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1]]) else: if i1 < nROIs_sc: connMat[i0, i1] = np.mean(t_img_dat[sc_nzIdx[i1]]) else: connMat[i0, i1] = np.mean(t_img_dat[nzIdx[i1 - nROIs_sc]]) #=== Make symmetric ===# if not bSC: connMat = 0.5 * (connMat + connMat.T) #print(connMat) ## DEBUG #=== Write result .mat file ===# from scipy.io import savemat if not bSC: res = {"roiNames": roiNames, "connMat": connMat} else: res = {"d1_roiNames": d1_roiNames, "d2_roiNames": d2_roiNames, "connMat": connMat} savemat(connFN, res) print("connFN = " + connFN) check_file(connFN, logFN=logFN) info_log("Connectivity matrix and associated data were saved at: %s" \ % (connFN), logFN=logFN)