import os
import argparse
import numpy as np
import nibabel as nib
from nipy.labs import as_volume_img
from scipy.stats import scoreatpercentile

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Normalize DWI intensity for group fiber tractography.')
    parser.add_argument('-i', dest='dwi_file') # 4D DWI Nifti
    dwi_path = parser.parse_args().dwi_file
    [output_path, dwi_file] = os.path.split(dwi_path)
    dwi_file = dwi_file.replace(".nii", "_norm.nii")
    dwi_img = as_volume_img(dwi_path)
    dwi = dwi_img.get_data()
    dwi = dwi / np.float(scoreatpercentile(dwi.ravel(), 99))
    dwi = np.int16(dwi * 1e4)   
    nii = nib.Nifti1Image(dwi, dwi_img.affine)
    nib.save(nii, os.path.join(output_path, dwi_file))
    
Beispiel #2
0
        graph_left_ipsi = binary dxd matrix with 1 for spatially-connected parcels on left side of brain (.mat)
"""
import os
import numpy as np
import argparse
from nipy.labs import as_volume_img
from scipy import io
from scipy.ndimage.morphology import binary_dilation

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Generate adjacency and bilateral graph.')
    parser.add_argument('-i', dest='filepath')
    filepath = parser.parse_args().filepath
    path, afile = os.path.split(filepath)
    afile = afile.replace('.nii','.mat')
    template_img = as_volume_img(filepath)
    template = template_img.get_data()
    rois = np.unique(template)
    rois = rois[1:] # Remove background
    n_rois = np.size(rois)
    n_neigh = 1 # Increase n_neigh to increase robustness to parcellation errors
    
    # Build bilateral connection matrix
    masks = template[np.newaxis, :] == np.arange(n_rois + 1)[:, np.newaxis, np.newaxis, np.newaxis] # Each row = mask of an ROI
    grid = np.mgrid[0:template.shape[0], 0:template.shape[1], 0:template.shape[2]] # Three 3D volumes of x, y, and z coords
    roi_cen = (grid[np.newaxis, :] * masks[:, np.newaxis, :]).reshape(n_rois + 1, 3, -1).sum(axis=-1) / masks.reshape(n_rois + 1, -1).sum(axis=-1).astype(np.float64)[:, np.newaxis]
    roi_cen = roi_cen[1:] # Compute centroid of each ROI
    x_cen = np.mean(grid[0][template > 0]) # x centroid computed at voxel level
    roi_cen_xflip = roi_cen.copy()
    roi_cen_xflip[:, 0] = 2 * x_cen - roi_cen[:, 0] # Find bilateral ROI
    ind_neigh = np.sum((roi_cen[np.newaxis, :] - roi_cen_xflip[:, np.newaxis, :]) ** 2, axis=2).argsort(axis=1)
Beispiel #3
0
import os
import numpy as np
import argparse
import nibabel as nib
from nipy.labs import as_volume_img
from scipy.ndimage.morphology import binary_dilation

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Binarize tissue mask based on user-defined threshold (-t 0.33.')
    parser.add_argument('-i', dest='filename') # Take .nii as input
    parser.add_argument('-t', dest='threshold', default=0.33)
    filepath = parser.parse_args().filename
    thresh = np.float32(parser.parse_args().threshold)
    path, afile = os.path.split(filepath)
    afile = afile.replace('.nii', '_bin.nii')
    vol_img = as_volume_img(filepath)
    vol = vol_img.get_data() > thresh
    vol = binary_dilation(vol, structure=np.ones((2, 2, 2))).astype(np.uint8)
    nii = nib.Nifti1Image(vol, vol_img.affine)
    nib.save(nii, os.path.join(path, afile))

    
#for sub in subList:
#    tc = io.loadmat(os.path.join(BASE_DIR, sub, "restfMRI/tc_rest_vox.mat"))
#    tc = tc["tc_vox"]
#    pca = PCA(n_components=10)    
#    pca.fit(tc.T)
#    if sub == subList[0]:
#        tc_group = preprocessing.standardize(pca.transform(tc.T))
#    else:
#        tc_group = np.hstack((tc_group, preprocessing.standardize(pca.transform(tc.T))))
#    print("Concatenating subject" + sub + "'s timecourses")
#io.savemat(os.path.join(BASE_DIR, "group/tc_rest_pca_vox.mat"), {"tc_group": tc_group})
tc_group = io.loadmat(os.path.join(BASE_DIR, "group/tc_rest_pca_vox.mat"))
tc_group = tc_group["tc_group"]

# Load Freesurfer template
brain_img = as_volume_img("/volatile/bernardng/templates/freesurfer/cort_subcort_333.nii")
brain = brain_img.get_data()
dim = np.shape(brain)
rois = np.unique(brain)
rois = rois[1:] # Remove background
n_rois = np.shape(rois)[0]
n_vox = np.sum(brain != 0) # Number of voxels within ROIs in Freesurfer template

# Spatially smoothing the group timecourses
tc_group = tc_group.reshape((dim[0], dim[1], dim[2], -1))
n_tpts = tc_group.shape[-1]
for t in np.arange(n_tpts):
     tc_group[:,:,:,t] = gaussian_filter(tc_group[:,:,:,t], sigma=5)
tc_group = tc_group.reshape((-1, n_tpts))

# Perform parcellation on smoothed PCA-ed timecourses for each ROI
    # Standardizing pca-ed time courses
    tc_std = np.std(tc_pca, axis=1)
    ind = tc_std > 1e-16
    tc_pca[ind, :] = tc_pca[ind, :] - tc_pca[ind, :].mean(axis=1)[:, np.newaxis]
    tc_pca[ind, :] = tc_pca[ind, :] / tc_pca[ind, :].std(axis=1)[:, np.newaxis]

    # Concatenate time courses across subjects
    if sub == subList[0]:
        tc_group = tc_pca
    else:
        tc_group = np.hstack((tc_group, tc_pca))
    print ("Concatenating subject" + sub + "'s timecourses")

# Load Freesurfer template
brain_img = as_volume_img(ANAT_DIR)
brain = brain_img.get_data()
dim = np.shape(brain)
rois = np.unique(brain)
rois = rois[1:]  # Remove background
n_rois = np.shape(rois)[0]
n_vox = np.sum(brain != 0)  # Number of voxels within ROIs in Freesurfer template

# Spatially smoothing the group timecourses
tc_group = tc_group.reshape((dim[0], dim[1], dim[2], -1))
n_tpts = tc_group.shape[-1]
for t in np.arange(n_tpts):
    tc_group[:, :, :, t] = gaussian_filter(tc_group[:, :, :, t], sigma=2.5)
tc_group = tc_group.reshape((-1, n_tpts))

# Perform parcellation on smoothed PCA-ed timecourses for each ROI
Beispiel #6
0
    
    # Standardizing pca-ed time courses
    tc_std = np.std(tc_pca, axis=1)
    ind = tc_std > 1e-16
    tc_pca[ind, :] = tc_pca[ind, :] - tc_pca[ind, :].mean(axis=1)[:, np.newaxis]
    tc_pca[ind, :] = tc_pca[ind, :] / tc_pca[ind, :].std(axis=1)[:, np.newaxis]

    # Concatenate time courses across subjects
    if sub == subList[0]:
        tc_group = tc_pca
    else:
        tc_group = np.hstack((tc_group, tc_pca))
    print("Concatenating subject" + sub + "'s timecourses")

# Load Freesurfer template
brain_img = as_volume_img(ANAT_DIR)

# Run relabel_disconnected_rois.py

brain = brain_img.get_data()
dim = np.shape(brain)
rois = np.unique(brain)
rois = rois[1:] # Remove background
n_rois = np.shape(rois)[0]
n_vox = np.sum(brain != 0) # Number of voxels within ROIs in Freesurfer template

# Spatially smoothing the group timecourses
tc_group = tc_group.reshape((dim[0], dim[1], dim[2], -1))
n_tpts = tc_group.shape[-1]
for t in np.arange(n_tpts):
    tc_group[:,:,:,t] = gaussian_filter(tc_group[:,:,:,t], sigma=1.5)
Beispiel #7
0
        # Add shifted motion regressors
        for i in np.array([-1, 1]):
            regressors = np.hstack(
                (regressors, np.roll(motion_confounds, i, axis=0)))
    else:
        regressors = io.loadmat(reg_file)
        regressors = regressors['SPM'][0, 0].xX[
            0, 0].X  # Contains task and SHIFTED versions of motion regressors
#        # Data from other than IMAGEN database might not have shifted motion regressors
#        motion_confounds = regressors[:, n_cond:]
#        for i in np.array([-1, 1]):
#            regressors = np.hstack((regressors, np.roll(motion_confounds, i, axis=0)))

    print "Extracting time series..."
    # Extract timeseries
    tc_img = as_volume_img(tc_file)
    tc = tc_img.get_data()
    tc_dim = tc.shape
    n_tpts = tc_dim[3]
    tc = tc.reshape((-1, n_tpts)).T
    tc[np.isnan(tc)] = 0  # Remove NAN's

    print "Resampling tissue masks to EPI resolution..."
    # Load tissue data objects
    gm_img = as_volume_img(gm_file)
    gm_img = gm_img.resampled_to_img(tc_img)
    gm = gm_img.get_data()
    gm[gm < 0] = 0
    wm_img = as_volume_img(wm_file)
    wm_img = wm_img.resampled_to_img(tc_img)
    wm = wm_img.get_data()
Beispiel #8
0
s = 1

# Constants
BASE_DIR = "/volatile/bernardng/data/sepideh/preprocessedBN/"
TR = 2.4
subList = np.loadtxt(os.path.join(BASE_DIR, "subListTruncated"), dtype='str')
for sub in [subList[s]]:
    # Define path to data
    tc_file = os.path.join(BASE_DIR, sub, "rsfMRI", "wbold_audiospont_1.hdr")
    gm_file = os.path.join(BASE_DIR, sub, "anat", "wc1anat_" + sub + "_3T_neurospin.hdr")
    wm_file = os.path.join(BASE_DIR, sub, "anat", "wc2anat_" + sub + "_3T_neurospin.hdr")
    csf_file = os.path.join(BASE_DIR, sub, "anat", "wc3anat_" + sub + "_3T_neurospin.hdr")
    motion_confounds = np.loadtxt(os.path.join(BASE_DIR, sub, "rsfMRI", "rp_bold_audiospont_1_0001.txt"))
    template_file = "/volatile/bernardng/templates/yeo_sulci_merged/Yeo_cort_sulci_subcort_MNI152_3mm.nii"
    # Load data objects
    tc_img = as_volume_img(tc_file)
    gm_img = as_volume_img(gm_file)
    wm_img = as_volume_img(wm_file)
    csf_img = as_volume_img(csf_file)
    template_img = as_volume_img(template_file)
    # Temporal filtering and removing WM and CSF signal   
    tc, tc_roi = preproc(tc_img, gm_img, wm_img, csf_img, motion_confounds, template_img=template_img, tr=TR)
    
    # Graphical LASSO
    
#    glasso = GraphLassoCV(verbose=1, n_refinements=5, alphas=5, n_jobs=1)
#    glasso.fit(tc_roi)
#    cov_ = glasso.covariance_
#    prec_ = glasso.precision_
    
Beispiel #9
0
Input:  template_file = location of parcel template (.nii)
Output: parcel_centroid = centroid of each parcel
"""
import os
import numpy as np
import argparse
from nipy.labs import as_volume_img
from scipy import io

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Compute parcel centroid.')
    parser.add_argument('-i', dest='filepath')
    filepath = parser.parse_args().filepath
    path, afile = os.path.split(filepath)
    afile = afile.replace('.nii','_coords.mat')
    template_img = as_volume_img(filepath)
    template = template_img.get_data()
    rois = np.unique(template)
    rois = rois[1:] # Remove background
    n_rois = np.size(rois)
    
    masks = template[np.newaxis, :] == np.arange(n_rois + 1)[:, np.newaxis, np.newaxis, np.newaxis] # Each row = mask of an ROI
    grid = np.mgrid[0:template.shape[0], 0:template.shape[1], 0:template.shape[2]] # Three 3D volumes of x, y, and z coords
    roi_cen = (grid[np.newaxis, :] * masks[:, np.newaxis, :]).reshape(n_rois + 1, 3, -1).sum(axis=-1) / masks.reshape(n_rois + 1, -1).sum(axis=-1).astype(np.float64)[:, np.newaxis]
    roi_cen = roi_cen[1:] # Compute centroid of each ROI
    roi_cen = np.vstack((roi_cen.T, np.ones(n_rois))
    parcel_cen_mni = np.dot(template_img.affine, roi_cen)
    parcel_cen_mni = parcel_cen_mni[0:3].T
    io.savemat(os.path.join(path, afile), {"coords": parcel_cen_mni})

Beispiel #10
0
import os
import numpy as np
from scipy import io
from nipy.labs import as_volume_img

BASE_DIR = "/volatile/bernardng/data/imagen/"
TR = 2.2
#subList = np.loadtxt(os.path.join(BASE_DIR, "subjectLists/subjectList.txt"), dtype='str')
subList = np.loadtxt(os.path.join(BASE_DIR, "subjectLists/facesList.txt"), dtype='str')
data_type = 0 # 0 = task, # 1 = rest

# Load parcel template
template = io.loadmat(os.path.join(BASE_DIR, "group/parcel500refined.mat"))
template = template['template'].ravel()
label = np.unique(template)
brain_img = as_volume_img("/volatile/bernardng/templates/spm8/rgrey.nii") # For resample the tissue maps
for sub in subList:
    print str("Subject" + sub)
    # Load preprocessed voxel timecourses    
    if data_type:    
        tc = io.loadmat(os.path.join(BASE_DIR, sub, "restfMRI/tc_rest_vox.mat"))
    else:
#        tc = io.loadmat(os.path.join(BASE_DIR, sub, "gcafMRI/tc_task_vox.mat"))
        tc = io.loadmat(os.path.join(BASE_DIR, sub, "facesfMRI/tc_task_vox.mat"))
    tc = tc["tc_vox"]
    # Generate tissue mask
    gm_file = os.path.join(BASE_DIR, sub, "anat", "gmMask.nii")
    gm_img = as_volume_img(gm_file)
    gm_img = gm_img.resampled_to_img(brain_img)
    gm = gm_img.get_data()
    wm_file = os.path.join(BASE_DIR, sub, "anat", "wmMask.nii")
Beispiel #11
0
n_parcels = 500.0

# Change path to files
TC_PATH = "tc_vox.mat"
REF_PATH = "wea000000459848s007a001.nii.gz" 
GM_PATH = "../Maps/wc1mprage000000459848.nii.gz"
WM_PATH = "../Maps/wc2mprage000000459848.nii.gz"
CSF_PATH = "../Maps/wc3mprage000000459848.nii.gz"
PARCEL_PATH = "parcel500.nii"

# Load subject timecourse
tc = io.loadmat(TC_PATH)
tc = tc["tc"].T
        
# Generate dilated GM mask
ref_img = as_volume_img(REF_PATH) # For resampling to 3mm
ref = ref_img.get_data()
gm_img = as_volume_img(GM_PATH)
gm = gm_img.get_data()
#wm_img = as_volume_img(WM_PATH)
#wm = wm_img.get_data()
#csf_img = as_volume_img(CSF_PATH)
#csf = csf_img.get_data()
#probTotal = gm + wm + csf
#dim = np.shape(probTotal)
#ind = probTotal > 0
#gm[ind] = gm[ind] / probTotal[ind]
#wm[ind] = wm[ind] / probTotal[ind]
#csf[ind] = csf[ind] / probTotal[ind]
#tissue = np.array([gm.ravel(), wm.ravel(), csf.ravel()])
#tissue_mask = tissue.argmax(axis=0) + 1
    
    # Standardizing pca-ed time courses
    tc_std = np.std(tc_pca, axis=1)
    ind = tc_std > 1e-16
    tc_pca[ind, :] = tc_pca[ind, :] - tc_pca[ind, :].mean(axis=1)[:, np.newaxis]
    tc_pca[ind, :] = tc_pca[ind, :] / tc_pca[ind, :].std(axis=1)[:, np.newaxis]

    # Concatenate time courses across subjects
    if sub == subList[0]:
        tc_group = tc_pca
    else:
        tc_group = np.hstack((tc_group, tc_pca))
    print("Concatenating subject" + sub + "'s timecourses")

# Generate dilated GM mask
ref_img = as_volume_img(REF_DIR) # For resampling to 3mm
ref = ref_img.get_data()
gm_img = as_volume_img(GM_DIR)
gm = gm_img.get_data()
wm_img = as_volume_img(WM_DIR)
wm = wm_img.get_data()
csf_img = as_volume_img(CSF_DIR)
csf = csf_img.get_data()
probTotal = gm + wm + csf
dim = np.shape(probTotal)
ind = probTotal > 0
gm[ind] = gm[ind] / probTotal[ind]
wm[ind] = wm[ind] / probTotal[ind]
csf[ind] = csf[ind] / probTotal[ind]
tissue = np.array([gm.ravel(), wm.ravel(), csf.ravel()])
tissue_mask = tissue.argmax(axis=0) + 1
Beispiel #13
0
# Concatenating PCA-ed voxel timecourses across subjects
for sub in subList:
    tc = io.loadmat(os.path.join(BASE_DIR, sub, "restfMRI/tc_rest_vox.mat"))
    tc = tc["tc_vox"]
    pca = PCA(n_components=10)
    pca.fit(tc.T)
    if sub == subList[0]:
        tc_group = preprocessing.standardize(pca.transform(tc.T))
    else:
        tc_group = np.hstack(
            (tc_group, preprocessing.standardize(pca.transform(tc.T))))
    print("Concatenating subject" + sub + "'s timecourses")
#io.savemat(os.path.join(BASE_DIR, "group/tc_rest_pca_vox.mat"), {"tc_group": tc_group})

# Perform parcellation on PCA-ed timecourses
brain_img = as_volume_img("/volatile/bernardng/templates/spm8/rgrey.nii")
brain = brain_img.get_data()
dim = np.shape(brain)
brain = brain > 0.2  # Generate brain mask
brain = mask_utils.largest_cc(brain)
mem = Memory(cachedir='.', verbose=1)
# Define connectivity based on brain mask
A = grid_to_graph(n_x=brain.shape[0],
                  n_y=brain.shape[1],
                  n_z=brain.shape[2],
                  mask=brain)
# Create ward object
ward = WardAgglomeration(n_clusters=500, connectivity=A, memory=mem)
tc_group = tc_group.reshape((dim[0], dim[1], dim[2], -1))
n_tpts = tc_group.shape[-1]
for t in np.arange(n_tpts):
Beispiel #14
0
"""
Resample input volume to resolution of reference
"""
import os
import nibabel as nib
import numpy as np
import argparse
from nipy.labs import as_volume_img

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Resample input volume to resolution of reference (-t 0 = continuous interpolation; -t 1 = nearest neighbor')
    parser.add_argument('-i', dest='input') # Takes .nii as input
    parser.add_argument('-r', dest='reference')
    parser.add_argument('-t', dest='interp_type', default=0)
    input_path = parser.parse_args().input
    ref_path = parser.parse_args().reference
    interp_type = np.float32(parser.parse_args().interp_type)
    if interp_type == 0:
	interp = 'continuous'
    elif interp_type == 1:
	interp = 'nearest'
    path, afile = os.path.split(input_path)
    afile = afile.replace('.nii', '_rs.nii')
    vol_img = as_volume_img(input_path)
    ref_img = as_volume_img(ref_path)
    vol_img = vol_img.resampled_to_img(ref_img, interpolation=interp)
    vol = vol_img.get_data()
    nii = nib.Nifti1Image(vol, ref_img.get_affine())
    nib.save(nii, os.path.join(path, afile))
n_parcels = 500.0

# Change path to files
TC_PATH = "tc_vox.mat"
REF_PATH = "wea000000459848s007a001.nii.gz"
GM_PATH = "../Maps/wc1mprage000000459848.nii.gz"
WM_PATH = "../Maps/wc2mprage000000459848.nii.gz"
CSF_PATH = "../Maps/wc3mprage000000459848.nii.gz"
PARCEL_PATH = "parcel500.nii"

# Load subject timecourse
tc = io.loadmat(TC_PATH)
tc = tc["tc"].T

# Generate dilated GM mask
ref_img = as_volume_img(REF_PATH)  # For resampling to 3mm
ref = ref_img.get_data()
gm_img = as_volume_img(GM_PATH)
gm = gm_img.get_data()
# wm_img = as_volume_img(WM_PATH)
# wm = wm_img.get_data()
# csf_img = as_volume_img(CSF_PATH)
# csf = csf_img.get_data()
# probTotal = gm + wm + csf
# dim = np.shape(probTotal)
# ind = probTotal > 0
# gm[ind] = gm[ind] / probTotal[ind]
# wm[ind] = wm[ind] / probTotal[ind]
# csf[ind] = csf[ind] / probTotal[ind]
# tissue = np.array([gm.ravel(), wm.ravel(), csf.ravel()])
# tissue_mask = tissue.argmax(axis=0) + 1
Beispiel #16
0
import os
import argparse
import numpy as np
import nibabel as nib
from nipy.labs import as_volume_img

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Put B0 volumes at the top.')
    parser.add_argument('-i', dest='dwi_file')  # 4D DWI Nifti
    parser.add_argument('-b', dest='bval_file')  # b-value txt file
    parser.add_argument('-d', dest='bvec_file')  # gradient table
    dwi_path = parser.parse_args().dwi_file
    bval_path = parser.parse_args().bval_file
    bvec_path = parser.parse_args().bvec_file
    dwi_img = as_volume_img(dwi_path)
    dwi = dwi_img.get_data()
    bval = np.loadtxt(bval_path)
    bvec = np.loadtxt(bvec_path)

    b0_ave = np.mean(dwi[:, :, :, bval == 0], axis=3)
    dwi_reorder = np.concatenate(
        (b0_ave[:, :, :, np.newaxis], dwi[:, :, :, bval != 0]), axis=3)
    bvec_reorder = np.array([0, 0, 0])
    bvec_reorder = np.vstack((bvec_reorder, bvec[bval != 0]))
    bval_reorder = np.array([0])
    bval_reorder = np.hstack((bval_reorder, bval[bval != 0]))

    nii = nib.Nifti1Image(dwi_reorder, dwi_img.affine)
    nib.save(nii, dwi_path)
    np.savetxt(bvec_path, bvec_reorder, fmt='%1.6f')
    np.savetxt(bval_path, bval_reorder, fmt='%1.0f')
Beispiel #17
0
"""
Create average volume for generating brain mask and aligning fMRI to dMRI
"""
import os
import nibabel as nib
import numpy as np
import argparse
from nipy.labs import as_volume_img

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Compute average of a 4D volume.')
    parser.add_argument('-i', dest='filename')  # Takes .nii as input
    filepath = parser.parse_args().filename
    path, afile = os.path.split(filepath)
    afile = afile.replace('.nii', '_ave.nii')
    vol_img = as_volume_img(filepath)
    vol = vol_img.get_data()
    mean_vol = np.mean(vol, 3)
    nii = nib.Nifti1Image(mean_vol, vol_img.get_affine())
    nib.save(nii, os.path.join(path, afile))
Beispiel #18
0
    parser.add_argument('-tc', dest='tc_file')
    parser.add_argument('-gm', dest='gm_file')
    parser.add_argument('-wm', dest='wm_file')
    parser.add_argument('-csf', dest='csf_file')
    parser.add_argument('-t', dest='template_file')
    tc_file = parser.parse_args().tc_file
    gm_file = parser.parse_args().gm_file
    wm_file = parser.parse_args().wm_file
    csf_file = parser.parse_args().csf_file
    template_file = parser.parse_args().template_file
    path, afile = os.path.split(tc_file)
    _, template_name = os.path.split(template_file)
    template_name = template_name.replace('.nii', '')

    # Load parcel template
    template_img = as_volume_img(template_file)
    template = template_img.get_data().ravel()
    label = np.unique(template)

    # Load preprocessed voxel timecourses
    tc = io.loadmat(tc_file)
    tc = tc["tc"]

    # Generate tissue mask
    gm_img = as_volume_img(gm_file)
    gm_img = gm_img.resampled_to_img(template_img)
    gm = gm_img.get_data()
    gm[gm < 0] = 0
    wm_img = as_volume_img(wm_file)
    wm_img = wm_img.resampled_to_img(template_img)
    wm = wm_img.get_data()
Beispiel #19
0
from scipy import stats
from scipy import signal
from scipy import linalg
from nipy.labs import mask as mask_utils
from nipy.labs import as_volume_img
from nipy.labs import viz

BASE_DIR = "/volatile/bernardng/data/sepideh/testSubject"
# Path to warped greymatter probabilistic mask
gm_file = os.path.join(BASE_DIR, "t1mri", "wc1sga070108233-0012-00001-000160-01.hdr")
# Path to randomly chosen EPI volume as reference
epi_ref_file = os.path.join(BASE_DIR, "fmri", "swfga070108233-0004-00815-000815-01.hdr")
# Paths to all EPI volumes with name matching wildcard
epi_files = sorted(glob.glob(os.path.join(BASE_DIR, 'fmri', 'swf*.hdr')))
# Load gm_img and epi_img objects
gm_img = as_volume_img(gm_file)
epi_ref_img = as_volume_img(epi_ref_file)
# Resample tissue mask to grid of epi
gm_img = gm_img.resampled_to_img(epi_ref_img)
# Extract tissue mask
gm = gm_img.get_data()
# Normalize the mask to [0,1]
gm -= gm.min()
gm /= gm.max()
# Threshold tissue mask
gm_mask = (gm > .5)
# Find largest connected component
gm_mask = mask_utils.largest_cc(gm_mask)

# Extract graymatter voxel timecourses
time_series_gm, header_gm = mask_utils.series_from_mask(epi_files, gm_mask)
# Concatenating PCA-ed voxel timecourses across subjects
for sub in subList:
    tc = io.loadmat(os.path.join(BASE_DIR, sub, "restfMRI/tc_rest_vox.mat"))
    tc = tc["tc_vox"]
    pca = PCA(n_components=10)    
    pca.fit(tc.T)
    if sub == subList[0]:
        tc_group = preprocessing.standardize(pca.transform(tc.T))
    else:
        tc_group = np.hstack((tc_group, preprocessing.standardize(pca.transform(tc.T))))
    print("Concatenating subject" + sub + "'s timecourses")
#io.savemat(os.path.join(BASE_DIR, "group/tc_rest_pca_vox.mat"), {"tc_group": tc_group})

# Perform parcellation on PCA-ed timecourses
brain_img = as_volume_img("/volatile/bernardng/templates/spm8/rgrey.nii")
brain = brain_img.get_data()
dim = np.shape(brain)
brain = brain > 0.2 # Generate brain mask
brain = mask_utils.largest_cc(brain)
mem = Memory(cachedir='.', verbose=1)
# Define connectivity based on brain mask
A = grid_to_graph(n_x=brain.shape[0], n_y=brain.shape[1], n_z=brain.shape[2], mask=brain)
# Create ward object
ward = WardAgglomeration(n_clusters=500, connectivity=A, memory=mem)
tc_group = tc_group.reshape((dim[0], dim[1], dim[2], -1))
n_tpts = tc_group.shape[-1]
for t in np.arange(n_tpts):
    tc_group[:,:,:,t] = gaussian_filter(tc_group[:,:,:,t], sigma=5)
tc_group = tc_group.reshape((-1, n_tpts))
tc_group = tc_group[brain.ravel()==1, :]
Beispiel #21
0
import numpy as np
from scipy import io
from nipy.labs import as_volume_img

BASE_DIR = "/volatile/bernardng/data/imagen/"
TR = 2.2
#subList = np.loadtxt(os.path.join(BASE_DIR, "subjectLists/subjectList.txt"), dtype='str')
subList = np.loadtxt(os.path.join(BASE_DIR, "subjectLists/facesList.txt"),
                     dtype='str')
data_type = 0  # 0 = task, # 1 = rest

# Load parcel template
template = io.loadmat(os.path.join(BASE_DIR, "group/parcel500refined.mat"))
template = template['template'].ravel()
label = np.unique(template)
brain_img = as_volume_img("/volatile/bernardng/templates/spm8/rgrey.nii"
                          )  # For resample the tissue maps
for sub in subList:
    print str("Subject" + sub)
    # Load preprocessed voxel timecourses
    if data_type:
        tc = io.loadmat(os.path.join(BASE_DIR, sub,
                                     "restfMRI/tc_rest_vox.mat"))
    else:
        #        tc = io.loadmat(os.path.join(BASE_DIR, sub, "gcafMRI/tc_task_vox.mat"))
        tc = io.loadmat(
            os.path.join(BASE_DIR, sub, "facesfMRI/tc_task_vox.mat"))
    tc = tc["tc_vox"]
    # Generate tissue mask
    gm_file = os.path.join(BASE_DIR, sub, "anat", "gmMask.nii")
    gm_img = as_volume_img(gm_file)
    gm_img = gm_img.resampled_to_img(brain_img)