Ejemplo n.º 1
0
def extract_brainstem_roi(sub, epi_dir, atlas_dir, out_dir):
    '''
    Load EPIs and masks and resmaple the Epis to the masks.

    Output: Masked EPIs and weight file per atlas
    '''
    instruct = 'instructed*T1w*prepro*denoise'
    infer = 'infer*T1w*prepro*denoise'
    slu.mkdir_p(out_dir)
    e = re.EPI(sub, out_dir=out_dir)
    e.load_epi('{1}/sub-{0}/fmriprep/sub-{0}/ses-3/func/'.format(sub, epi_dir),
               identifier=infer)
    e.load_epi('{1}/sub-{0}/fmriprep/sub-{0}/ses-2/func/'.format(sub, epi_dir),
               identifier=infer)
    '''
    e.load_epi('{1}/sub-{0}/fmriprep/sub-{0}/ses-2/func/'.format(sub, epi_dir),
               identifier=instruct)
    e.load_epi('{1}/sub-{0}/fmriprep/sub-{0}/ses-3/func/'.format(sub, epi_dir),
               identifier=instruct)
    '''
    print('{} loaded'.format(sub))
    e.load_mask(expanduser('{1}/sub-{0}'.format(sub, atlas_dir)),
                mult_roi_atlases={'CIT': {
                    2: 'NAc',
                    6: 'SNc',
                    10: 'VTA'
                }})
    e.resample_masks()
    print('{} resampled'.format(sub))
    e.mask()
    print('{} masked'.format(sub))
    e.save()
Ejemplo n.º 2
0
 def merge(self):
     '''
     Merge everything. And Save.
     '''
     grating = self.gratingpupil
     choice = self.choicepupil
     paras = self.parameters
     points = self.points
     choices = self.choice_behavior
     grating.columns = pd.MultiIndex.from_product([['pupil'], ['gratinglock'], range(grating.shape[1])], names=['source', 'type', 'name'])
     choice.columns = pd.MultiIndex.from_product([['pupil'], ['choicelock'], range(choice.shape[1])], names=['source', 'type', 'name'])
     paras.columns = pd.MultiIndex.from_product([['pupil'], ['parameters'], paras.columns], names=['source', 'type', 'name'])
     choices.columns = pd.MultiIndex.from_product([['behavior'], ['parameters'], choices.columns], names=['source', 'type', 'name'])
     points.columns = pd.MultiIndex.from_product([['behavior'], ['points'], range(points.shape[1])], names=['source', 'type', 'name'])
     singles = [grating, choice, choices, points, paras]
     master = pd.concat(singles, axis=1)
     master = master.set_index([master.pupil.parameters.trial_id, master.pupil.parameters.run])
     singles = []
     for key, frame in self.roi_task_evoked.items():
         frame.columns = pd.MultiIndex.from_product([['fmri'], [key], frame.columns], names=['source', 'type', 'name'])
         singles.append(frame)
     fmri = pd.concat(singles, axis=1)
     fmri = fmri.set_index([fmri.fmri.AAN_DR.trial_id, fmri.fmri.AAN_DR.run])
     merge = pd.merge(fmri.reset_index(), master.reset_index()).set_index(['trial_id', 'run'])
     self.master = merge
     out_dir = join(self.flex_dir, 'pupil', 'choice_epochs')
     slu.mkdir_p(out_dir)
     self.master.to_csv(join(out_dir, 'choice_epochs_{0}_{1}.csv'.format(self.subject, self.session)))
Ejemplo n.º 3
0
 def __init__(self, sub, ses, bids_path, out_path):
     self.sub = sub
     self.ses = ses
     self.subject = 'sub-{}'.format(sub)
     self.session = 'ses-{}'.format(ses)
     self.out_path = out_path
     self.bids_path = bids_path
     slurm.mkdir_p(self.out_path)
Ejemplo n.º 4
0
def hummel_ex(sub, ses):
    out_dir = '/work/faty014/FLEXRULE/fmri/voxel_denoise_debug2'
    epi_dir = '/work/faty014/FLEXRULE/fmri'
    behav_dir = '/work/faty014/FLEXRULE/behavior/behav_fmri_aligned'
    slu.mkdir_p(out_dir)
    v = VoxelSubject(sub, ses, epi_dir, out_dir, behav_dir)
    v.linreg_voxel()
    v.vol_2surf()
Ejemplo n.º 5
0
    def Output(self, dir='SubjectLevel'):
        print('Output')
        output_dir = join(self.flex_dir, dir, self.subject)
        slu.mkdir_p(output_dir)
        for name, attribute in self.__iter__():
            if name in [
                    'BehavFrame', 'BehavAligned', 'PupilFrame', 'CortRois',
                    'BrainstemRois', 'ChoiceEpochs'
            ]:
                for session in attribute.keys():
                    for run in attribute[session].keys():
                        print('Saving', name, session, run)
                        attribute[session][run].to_hdf(join(
                            output_dir,
                            '{0}_{1}_{2}.hdf'.format(name, self.subject,
                                                     session)),
                                                       key=run)

            elif name == 'CleanEpochs':
                for session in attribute.keys():
                    print('Saving', name, session)
                    attribute[session].to_hdf(join(
                        output_dir,
                        '{0}_{1}_{2}.hdf'.format(name, self.subject, session)),
                                              key=session)
            elif name in ['VoxelReg', 'SurfaceTxt']:
                for session in attribute.keys():
                    for task in attribute[session].keys():
                        for parameter, content in attribute[session][
                                task].items():
                            print('Saving', name, session, task, parameter)
                            if name == 'VoxelReg':
                                content.to_filename(
                                    join(
                                        output_dir,
                                        '{0}_{1}_{2}_{3}_{4}.nii.gz'.format(
                                            name, self.subject, session,
                                            parameter, task)))
                            elif name == 'SurfaceTxt':
                                for hemisphere, cont in content.items():
                                    cont.to_hdf(join(
                                        output_dir,
                                        '{0}_{1}_{2}_{3}_{4}.hdf'.format(
                                            name, self.subject, session,
                                            parameter, hemisphere)),
                                                key=task)
            elif name == 'DesignMatrix':
                for session in attribute.keys():
                    for task in attribute[session].keys():
                        attribute[session][task].to_hdf(join(
                            output_dir,
                            '{0}_{1}_{2}.hdf'.format(name, self.subject,
                                                     session)),
                                                        key=task)
Ejemplo n.º 6
0
def extract_brainstem(sub, flex_dir, task):
    cit = pd.read_table(join(
        flex_dir, 'fmri', 'atlases',
        'CIT168_RL_Subcortical_Nuclei/CIT168_Reinf_Learn_v1/labels.txt'),
                        sep='  ',
                        header=None)
    subject = 'sub-{}'.format(sub)
    files = glob(
        join(flex_dir, 'SubjectLevel5', subject, 'VoxelReg*{}*'.format(task)))
    l_coef_ = []
    for file in files:
        nifti = nib.load(file)
        session = file[file.find('_ses-') + 1:file.find('_ses-') + 6]
        parameter = file[file.find(session) + 5:file.find(task)]
        for a in atlases:
            atlas = nib.load(
                join(flex_dir, 'fmri', 'atlases',
                     '{0}/{1}_T1w_{0}.nii.gz'.format(subject, a)))
            atlas = resample_img(atlas,
                                 nifti.affine,
                                 target_shape=nifti.shape[0:3])
            if a == 'CIT168_MNI':
                for i in range(16):
                    atlasdata = atlas.get_data()[:, :, :, i] / atlas.get_data(
                    )[:, :, :, i].sum()
                    coef_ = np.multiply(nifti.get_data()[:, :, :, 0],
                                        atlasdata[:, :, :]).sum()
                    l_coef_.append({
                        'subject': subject,
                        'session': session,
                        'atlas': cit.iloc[i, 1].replace(' ', ''),
                        'parameter': parameter,
                        'task': task,
                        'coef_': coef_
                    })
            else:
                atlasdata = atlas.get_data() / atlas.get_data().sum()
                coef_ = np.multiply(nifti.get_data()[:, :, :, 0],
                                    atlasdata[:, :, :, 0]).sum()
                l_coef_.append({
                    'subject': subject,
                    'session': session,
                    'atlas': a,
                    'parameter': parameter,
                    'task': task,
                    'coef_': coef_
                })
    out_dir = join(flex_dir, 'fmri', 'brainstem_regression5')
    slu.mkdir_p(out_dir)
    pd.DataFrame(l_coef_).to_hdf(join(out_dir,
                                      'brainstem_coefs_{}.hdf'.format(task)),
                                 key=subject)
Ejemplo n.º 7
0
 def vol_2surf(self, radius=.3):
     for param, img in self.voxel_regressions.items():
         for hemisphere in ['L', 'R']:
             pial = join(
                 self.epi_dir, 'completed_preprocessed', self.subject,
                 'fmriprep', self.subject, 'anat',
                 '{0}_T1w_pial.{1}.surf.gii'.format(self.subject,
                                                    hemisphere))
             surface = vol_to_surf(img, pial, radius=radius, kind='line')
             self.surface_textures.append(surface)
             slu.mkdir_p(join(self.out_dir, 'surface_textures'))
             pd.DataFrame(surface, columns=['coef_', 'intercept_', 'r2_score', 'mean_squared_error']).\
                 to_csv(join(self.out_dir, 'surface_textures', '{0}_{1}_{2}_{3}.csv'.format(self.subject, self.session, param, hemisphere)))
Ejemplo n.º 8
0
def execute(sub):
    slu.mkdir_p(out_dir)
    extract_brainstem_roi(sub, epi_dir, atlas_dir, out_dir)
    concat_single_rois(sub, out_dir)
    for session in sessions:
        for run in runs:
            try:
                df = extract_cortical_roi(sub,
                                          session,
                                          run,
                                          epi_dir,
                                          combined=False)
                for atlas in atlases:
                    print(sub, session, run, atlas)
                    df[atlas] = weighted_average(sub, session, run, atlas,
                                                 out_dir)
                df.to_csv(
                    join(
                        out_dir, 'weighted',
                        '{0}_{1}_{2}_weighted_rois.csv'.format(
                            sub, session, run)))
            except IndexError:
                print('error {}'.format(sub))
Ejemplo n.º 9
0
                                                  multioutput='raw_values')
    df['intercept'] = linreg.intercept_
    noise = predict - linreg.intercept_
    denoise = d2 - noise
    new_shape = np.stack(
        [denoise[i, :].reshape(shape[0:3]) for i in range(denoise.shape[0])],
        -1)
    new_image = nib.Nifti1Image(new_shape, affine=nifti.affine)
    new_image.to_filename(
        join(
            epi_dir, subject, 'fmriprep', subject, session, 'func',
            '{0}_{1}_task-{2}_bold_space-T1w_preproc_denoise.nii.gz'.format(
                subject, session, run)))
    df.to_csv(
        join(fmri_dir,
             '{0}_{1}_{2}_denoising.csv'.format(subject, session, run)))


def execute(sub):
    for ses, run in itertools.product(['ses-2', 'ses-3'], runs):
        try:
            denoise(sub, ses, run, epi_dir, fmri_dir)
            print('{0} {1} {2} succesful'.format(sub, ses, run))
        except FileNotFoundError:
            print('{0} {1} {2} file not found'.format(sub, ses, run))


if __name__ == '__main__':
    slu.mkdir_p(fmri_dir)
    execute(sys.argv[1])
Ejemplo n.º 10
0
import pandas as pd
import numpy as np
from decim import glaze_model as gm
from os.path import join, expanduser
from scipy.interpolate import interp1d
from decim import slurm_submit as slu
import matplotlib.pyplot as plt
from joblib import Memory
if expanduser('~') == '/home/faty014':
    cachedir = expanduser('/work/faty014/joblib_cache')
else:
    cachedir = expanduser('~/joblib_cache')
slu.mkdir_p(cachedir)
memory = Memory(cachedir=cachedir, verbose=0)
'''
INPUT: Behavioral data from .tsv files in BIDS-Format
OUTPUT: Pandas data frame with the following columns
    - event: point / choice onset / response / stimulus onset
    - onset: time of event
    - value: either point location or choice
    - belief: Glaze Belief
    - gen_side: active distribution
    - obj belief
    - stim_id
    - rule response

To execute, make sure to set:
    - bids_mr: where is the raw data? up-to-date version?
    - outpath: where to store the output DFs?
    - summary: summary-file of stan-fits
    - subject-loop, session-loop
Ejemplo n.º 11
0
import pandas as pd
import numpy as np
from decim import pupil_frame as pf
from os.path import join, expanduser
from glob import glob
from decim import slurm_submit as slurm

bids_mr = '/Volumes/flxrl/fmri/bids_mr/'
outpath = expanduser('~/Flexrule/fmri/analyses/pupil_dataframes_310518')

for sub in range(1, 23):
    subject = 'sub-{}'.format(sub)
    savepath = join(outpath, subject)
    slurm.mkdir_p(savepath)
    for ses in range(1, 4):
        session = 'ses-{}'.format(ses)
        files = glob(join(bids_mr, subject, session, '*', '*inference*.edf'))
        if len(files) == 0:
            pass
        else:
            for file in files:
                run = file[file.find('inference'):file.find('_phys')]
                raw = pf.Pupilframe(subject, session, None, bids_mr, bids=True)
                raw.basicframe(directory=[file])
                raw.gaze_angle()
                raw.all_artifacts()
                raw.small_fragments()
                raw.interpol()
                raw.filter()
                raw.z_score()
                raw.pupil_frame.to_csv(
Ejemplo n.º 12
0
from os.path import join
from glob import glob
import decim.slurm_submit as slu
import sys
from multiprocessing import Pool

runs = [
    'inference_run-4', 'inference_run-5', 'inference_run-6',
    'instructed_run-7', 'instructed_run-8'
]
data_dir = '/Volumes/flxrl/fmri/bids_mr'
out_dir = '/Users/kenohagena/Desktop/behav_fmri_aligned3'
hummel_out = '/work/faty014/FLEXRULE/behavior/test'

# slu.mkdir_p(out_dir)
slu.mkdir_p(hummel_out)


def hrf(t):
    '''
    A hemodynamic response function
    '''
    h = t**8.6 * np.exp(-t / 0.547)
    h = np.concatenate((h * 0, h))
    return h / h.sum()


def make_bold(evidence, dt=0.25):
    '''
    Convolve with haemodynamic response function.
    '''
Ejemplo n.º 13
0
def climag():
    epi_dir = '/home/khagena/FLEXRULE/fmri/completed_preprocessed'
    atlas_dir = '/home/khagena/FLEXRULE/fmri/atlases'
    out_dir = '/home/khagena/FLEXRULE/fmri/test_roi_extract'
    slu.mkdir_p(out_dir)
    return epi_dir, atlas_dir, out_dir
Ejemplo n.º 14
0
    def linreg_voxel(self):
        '''
        Concatenate runwise BOLD- and behavioral timeseries per subject-session.
        Regress each voxel on each behavioral parameter.

        Return one Nifti per session, subject & parameter with four frames:
            coef_, intercept_, r2_score, mean_squared_error
        '''
        session_nifti = []
        session_behav = []
        for run in runs:
            nifti = nib.load(
                join(
                    self.epi_dir, 'completed_preprocessed', self.subject,
                    'fmriprep', self.subject, self.session, 'func',
                    '{0}_{1}_task-{2}_bold_space-T1w_preproc_denoise.nii.gz'.
                    format(self.subject, self.session, run)))
            behav = pd.read_hdf(join(
                self.behav_dir,
                'beh_regressors_{0}_{1}.hdf'.format(self.subject,
                                                    self.session)),
                                key=run)
            shape = nifti.get_data().shape
            data = nifti.get_data()
            d2 = np.stack(
                [data[:, :, :, i].ravel() for i in range(data.shape[-1])])
            if len(d2) > len(behav):
                d2 = d2[0:len(behav)]
            elif len(d2) < len(behav):
                behav = behav.iloc[0:len(d2)]
            session_behav.append(behav)
            session_nifti.append(pd.DataFrame(d2))
        session_nifti = pd.concat(session_nifti, ignore_index=True)
        session_behav = pd.concat(session_behav, ignore_index=True)
        # Z-Score behavior and voxels
        session_nifti = (session_nifti -
                         session_nifti.mean()) / session_nifti.std()
        session_behav = (session_behav -
                         session_behav.mean()) / session_behav.std()
        assert session_behav.shape[0] == session_nifti.shape[0]
        self.parameters = behav.columns
        for param in self.parameters:
            linreg = LinearRegression()
            linreg.fit(session_behav[param].values.reshape(-1, 1),
                       session_nifti)
            predict = linreg.predict(session_behav[param].values.reshape(
                -1, 1))
            reg_result = np.concatenate(([linreg.coef_.flatten()], [
                linreg.intercept_
            ], [r2_score(session_nifti, predict, multioutput='raw_values')], [
                mean_squared_error(
                    session_nifti, predict, multioutput='raw_values')
            ]),
                                        axis=0)
            new_shape = np.stack([
                reg_result[i, :].reshape(shape[0:3])
                for i in range(reg_result.shape[0])
            ], -1)
            new_image = nib.Nifti1Image(new_shape, affine=nifti.affine)
            self.voxel_regressions[param] = new_image
            slu.mkdir_p(join(self.out_dir, 'voxel_regressions'))
            new_image.to_filename(
                join(
                    self.out_dir, 'voxel_regressions',
                    '{0}_{1}_{2}.nii.gz'.format(self.subject, self.session,
                                                param)))
Ejemplo n.º 15
0
        pupil_interpolated_bp = signal.filtfilt(blp, alp, pupil_interpolated_hp)

        self.pupil_frame['bp_interpol'] = pupil_interpolated_bp

    def z_score(self):
        '''
        Normalize over session
        '''
        self.pupil_frame['biz'] = (self.pupil_frame.bp_interpol - self.pupil_frame.bp_interpol.mean()) / self.pupil_frame.bp_interpol.std()


if __name__ == '__main__':
    for sub in [22]:
        for ses in [2, 3]:
            for ri in [0, 1, 2]:
                try:
                    flex_dir = '/Volumes/flxrl/FLEXRULE/'
                    out_dir = join(flex_dir, 'pupil', 'linear_pupilframes')
                    slu.mkdir_p(out_dir)
                    p = Pupilframe(sub, ses, ri, flex_dir)
                    p.basicframe()
                    p.gaze_angle()
                    p.all_artifacts()
                    p.small_fragments()
                    p.interpol()
                    p.filter()
                    p.z_score()
                    p.pupil_frame.to_csv(join(out_dir, 'pupilframe_{}_{}_{}.csv'.format(p.subject, p.session, p.run)))
                except RuntimeError:
                    continue