예제 #1
0
# Loop through subjects and extract ROI average time series
for hemi in ['L', 'R']:

    # Get the ROI mask
    roi_fn = join(tpl_dir, f'tpl-{space}_hemi-{hemi}_desc-{roi}_mask.npy')
    roi_mask = np.load(roi_fn)

    # Loop through subjects
    for subject in subject_meta:   
        subject_dir = join(afni_dir, subject, 'func')

        # Grab BOLD filenames (and filter for hemisphere)
        bold_fns = subject_meta[subject]['bold'][space]['preproc']
        bold_fns = [bold_fn for bold_fn in bold_fns
                    if f'hemi-{hemi}' in bold_fn]

        # Loop through BOLD images and extract ROI
        for bold_fn in bold_fns:
            bold_map = read_gifti(bold_fn)
            roi_avg = np.nanmean(bold_map[:, roi_mask == 1], axis=1)
            assert bold_map.shape[0] == roi_avg.shape[0]

            roi_1D = join(subject_dir,
                          basename(bold_fn).replace(
                              '.func.gii',
                              f'_roi-{roi}_desc-mean_timeseries.1D'))
            np.savetxt(roi_1D, roi_avg[None, :], delimiter=' ', fmt='%f')

            print(f"Extracted average {roi} time series for {subject}"
                   f"\n  {basename(roi_1D)}")
예제 #2
0
# Name guard for when we want to actually compute cSRM
if __name__ == '__main__':

    # Load dictionary of input filenames and parameters
    with open('data/metadata.json') as f:
        metadata = json.load(f)

    # Subjects and stories
    stories = [
        'pieman', 'prettymouth', 'milkyway', 'slumlordreach', 'notthefall',
        '21styear', 'pieman (PNI)', 'bronx (PNI)', 'black', 'forgot'
    ]

    # Load the surface parcellation
    atlas = {
        'lh': read_gifti('data/MMP_fsaverage6.lh.gii')[0],
        'rh': read_gifti('data/MMP_fsaverage6.rh.gii')[0]
    }
    parcel_labels = {
        'lh': np.unique(atlas['lh'])[1:],
        'rh': np.unique(atlas['rh'])[1:]
    }

    # Select story halves for training and test
    train_half, test_half = 1, 2

    # Load in first-half training surface data
    target_data = load_split_data(metadata,
                                  stories=stories,
                                  subjects=None,
                                  half=train_half)
예제 #3
0
from os.path import join
import json
import numpy as np
from gifti_io import read_gifti, write_gifti

hemi = 'R'

# Load in HCP-MMP1 parcellation on fsaverage6
deriv_dir = '/jukebox/hasson/snastase/narratives/derivatives'
tpl_dir = join(deriv_dir, 'afni', 'tpl-fsaverage6')
mmp_fn = join(tpl_dir, f'tpl-fsaverage6_hemi-{hemi}_desc-MMP_dseg.label.gii')
mmp = read_gifti(mmp_fn)[0]

# Load in ROI labels
base_dir = '/jukebox/hasson/snastase/isc-confounds'
with open(join(base_dir, 'MMP_ROIs.json')) as f:
    rois = json.load(f)

roi_colors = {'EAC': rois['EAC']['A1'], 'V1': rois['EVC']['V1']}

rois['V1'] = {'V1': 1}
rois.pop('EVC')

# Create separate masks
masks = {}
for roi in rois:
    mask = np.zeros(mmp.shape)
    for area in rois[roi]:
        mask[mmp == rois[roi][area]] = 1

    write_gifti(
예제 #4
0
                        glob(
                            join(data_dir,
                                 (f'{subject}_task-{task}_*space-{space}_'
                                  f'hemi-{hemi}_desc-clean.func.gii'))))

                    # Grab all runs in case of multiple runs
                    for bold_fn in bold_fns:

                        if exclude and exclude_scan(bold_fn, scan_exclude):
                            print(f"Excluding {basename(bold_fn)}!")
                            continue

                        else:

                            # Strip comments and load in data as numpy array
                            subj_data = read_gifti(bold_fn)

                            # Trim data based on event onset and duration
                            subj_data = subj_data[onset:offset, :]
                            subj_data = subj_data[initial_trim:, :]

                            # Z-score input time series
                            subj_data = zscore(subj_data, axis=0)

                            subject_list.append(subject)
                            run_list.append(basename(bold_fn))
                            data.append(subj_data)
                            print(f"Loaded {subtask} {subject} "
                                  f"({hemi}) for ISC analysis")

                data = np.dstack(data)
예제 #5
0
def split_data(metadata, stories=None, subjects=None,
               hemisphere=None, zscore_data=True,
               mask=None, roi=None, save_files=True):

    # By default grab all stories in metadata
    stories = check_keys(metadata, keys=stories)

    # Loop through stories
    for story in stories:

        # By default just grab all subjects in metadata
        subject_list = check_keys(metadata[story]['data'],
                                  keys=subjects, subkey=story)

        # Use data trims to find midpoint
        data_trims = metadata[story]['data_trims']
        n_TRs = metadata[story]['n_TRs']
        midpoint = (n_TRs - data_trims[0] - data_trims[1]) // 2

        # Loop through subjects and split data
        for subject in subject_list:

            # By default grab both hemispheres
            hemis = check_keys(metadata[story]['data'][subject],
                               keys=hemisphere)

            # One or both hemispheres 
            for hemi in hemis:

                # Load in data from GIfTI
                data_fn = metadata[story]['data'][subject][hemi]
                surf_data = read_gifti(data_fn)

                # Optionally mask
                if mask and roi:
                    surf_data = surf_data[:, mask[hemi]]

                # Trim data
                assert surf_data.shape[0] == n_TRs, ("TR mismatch! "
                    f"Expected {n_TRs}, but got {surf_data.shape[0]}")
                surf_data = surf_data[data_trims[0]:(
                    -data_trims[1] or None), :]

                half1_data = surf_data[:midpoint, :]
                half2_data = surf_data[midpoint:, :]

                if zscore_data:
                    half1_data = zscore(half1_data, axis=0)
                    half2_data = zscore(half2_data, axis=0)

                if save_files:
                    if mask and roi:
                        half1_fn = (f'data/{subject}_task-{story}_'
                                    f'half-1_{roi}_{hemi}.npy')
                        half2_fn = (f'data/{subject}_task-{story}_'
                                    f'half-2_{roi}_{hemi}.npy')

                    else:
                        half1_fn = (f'data/{subject}_task-{story}_'
                                    f'half-1_{hemi}.npy')
                        half2_fn = (f'data/{subject}_task-{story}_'
                                    f'half-2_{hemi}.npy')

                    np.save(half1_fn, half1_data)
                    np.save(half2_fn, half2_data)

            print(f"Saved split-half data for subject '{subject}' "
                  f"and story '{story}'")
예제 #6
0
mask_dir = join(deriv_dir, afni_pipe, f'tpl-{space}')
if not exists(mask_dir):
    makedirs(mask_dir)

# Pull in any subject's FreeSurfer parcellation
# All subjects in fsaverage6 space have the same medial NaNs
subject = 'sub-001'
task = 'tunnel'

# Check against known number of medial wall vertices
n_medial = {'L': 3486, 'R': 3491}

for hemi in ['L', 'R']:
    fs6_fn = join(deriv_dir, 'fmriprep', subject, 'func',
                  f'{subject}_task-{task}_space-{space}_hemi-{hemi}.func.gii')
    fs6 = read_gifti(fs6_fn)[0]

    # Get the medial wall NaNs output by fMRIPrep
    medial_vs = np.isnan(fs6)
    assert np.sum(medial_vs) == n_medial[hemi]
    print(f"Excluding {np.sum(medial_vs)} {hemi} medial wall vertices")

    # Negate to get a cortex-only mask
    cortical_vs = (~medial_vs).astype(float)

    mask_fn = join(mask_dir, f'tpl-{space}_hemi-{hemi}_desc-cortex_mask.gii')
    write_gifti(cortical_vs, mask_fn, fs6_fn)

    # Make a 1D file for SUMA's SurfSmooth -c_mask
    mask_1D = mask_fn.replace('.gii', '.1D')
    with open(mask_1D, 'w') as f:
예제 #7
0

# Compute mean across all input images
chdir(afni_dir)
for hemi in ['L', 'R']:
    input_fns = join(afni_dir, 'sub-*', 'func',
                     f'sub-*_task-*_space-{space}_'
                     f'hemi-{hemi}_desc-tsnr.func.gii')
    mean_fn = join(afni_dir, f'group_space-{space}_hemi-{hemi}_'
                             'desc-mean_tsnr.gii')
    run(f'3dMean -verbose -prefix {mean_fn} {input_fns}', shell=True)
    print(f"Finished computing mean tSNR for {hemi} hemisphere")
    
    
# Compute median across all input images
for hemi in ['L', 'R']:
    input_fns = glob(join(afni_dir, 'sub-*', 'func',
                          f'sub-*_task-*_space-{space}_'
                          f'hemi-{hemi}_desc-tsnr.func.gii'))
    tsnr_stack = []
    for input_fn in input_fns:
        tsnr_stack.append(read_gifti(input_fn))
    tsnr_stack = np.vstack(tsnr_stack)
    print(f"Loaded all {tsnr_stack.shape[0]} tSNR maps")
    
    tsnr_median = np.median(tsnr_stack, axis=0)
    median_fn = join(afni_dir, f'group_space-{space}_hemi-{hemi}_'
                               'desc-median_tsnr.gii')

    write_gifti(tsnr_median, median_fn, input_fns[0])
    print(f"Finished computing median tSNR for {hemi} hemisphere")
예제 #8
0
import json
import numpy as np
from gifti_io import read_gifti, write_gifti

hemi = 'rh'

# Load in HCP-MMP1 parcellation on fsaverage6
mmp = read_gifti(f'data/MMP_fsaverage6.{hemi}.gii')[0]

# Load in ROI labels
with open('data/MMP_ROIs.json') as f:
    rois = json.load(f)

roi_colors = {
    'EAC': rois['EAC']['A1'],
    'AAC': rois['AAC']['STSdp'],
    'PCC': rois['PCC']['POS2'],
    'PMC': rois['PCC']['POS2'],
    'TPOJ': rois['TPOJ']['TPOJ1']
}

# Create separate masks
masks = {}
for roi in rois:
    mask = np.zeros(mmp.shape)
    for area in rois[roi]:
        mask[mmp == rois[roi][area]] = 1

    write_gifti(mask, f'data/MMP_{roi}_fsaverage6.{hemi}.gii',
                f'data/MMP_fsaverage6.{hemi}.gii')