Esempio n. 1
0
def main(subject,
         session,
         n_voxels=250,
         bids_folder='/data',
         mask='wang15_ips'):

    session1 = session[:2] + '1'
    session2 = session[:2] + '2'

    pars = get_prf_parameters_volume(subject,
                                     session1,
                                     cross_validated=False,
                                     mask=mask,
                                     bids_folder=bids_folder).astype(
                                         np.float32)

    behavior = get_task_behavior(subject, session2, bids_folder)
    data = get_single_trial_volume(subject,
                                   session2,
                                   bids_folder=bids_folder,
                                   mask=mask).astype(np.float32)
    print(data)

    paradigm = behavior[['log(n1)']].astype(np.float32)
    paradigm.index = data.index
    print(paradigm)

    pdfs = []
    runs = range(1, 9)

    for test_run in runs:

        test_data, test_paradigm = data.xs(
            test_run, level='run').copy(), paradigm.xs(test_run,
                                                       level='run').copy()
        train_data, train_paradigm = data.drop(
            test_run, level='run').copy(), paradigm.drop(test_run,
                                                         level='run').copy()

        model = GaussianPRF(parameters=pars, paradigm=train_paradigm)
        parfitter = ParameterFitter(model, train_data, train_paradigm)

        new_pars = parfitter.refine_baseline_and_amplitude(pars)
        new_pars = parfitter.fit(init_pars=new_pars, fixed_pars=['mu', 'sd'])
        print(new_pars)
        model.parameters = new_pars.astype(np.float32)

        pred = model.predict()
        r2 = get_rsq(train_data, pred)
        print(r2.describe())
        r2_mask = r2.sort_values(ascending=False).index[:n_voxels]

        train_data = train_data[r2_mask]
        test_data = test_data[r2_mask]

        print(r2.loc[r2_mask])
        model.apply_mask(r2_mask)

        model.init_pseudoWWT(stimulus_range, model.parameters)

        residfit = ResidualFitter(model, train_data,
                                  train_paradigm['log(n1)'].astype(np.float32))

        omega, dof = residfit.fit(init_sigma2=10.0,
                                  method='t',
                                  max_n_iterations=10000)

        print('DOF', dof)

        bins = np.linspace(np.log(5), np.log(80), 150,
                           endpoint=True).astype(np.float32)

        pdf = model.get_stimulus_pdf(test_data,
                                     bins,
                                     model.parameters,
                                     omega=omega,
                                     dof=dof)

        print(pdf)
        E = (pdf * pdf.columns).sum(1) / pdf.sum(1)

        print(pd.concat((E, test_paradigm['log(n1)']), axis=1))
        print(pingouin.corr(E, test_paradigm['log(n1)']))

        pdfs.append(pdf)

    pdfs = pd.concat(pdfs)

    target_dir = op.join(bids_folder, 'derivatives',
                         'decoded_pdfs.volume.across_session')
    target_dir = op.join(target_dir, f'sub-{subject}', 'func')

    if not op.exists(target_dir):
        os.makedirs(target_dir)

    target_fn = op.join(
        target_dir,
        f'sub-{subject}_ses-{session2}_mask-{mask}_nvoxels-{n_voxels}_space-T1w_pars.tsv'
    )
    pdfs.to_csv(target_fn, sep='\t')
Esempio n. 2
0
def main(subject, sourcedata, trialwise, clip=(-100, 100)):

    derivatives = op.join(sourcedata, 'derivatives')

    if trialwise:
        layout = BIDSLayout(op.join(derivatives, 'glm_stim1_trialwise_surf'),
                            validate=False)
    else:
        layout = BIDSLayout(op.join(derivatives, 'glm_stim1_surf'),
                            validate=False)

    for hemi in ['L', 'R']:
        pes = layout.get(subject=subject, suffix=hemi)

        print(pes)

        df = []
        for pe in pes:
            d = pd.DataFrame(
                np.clip(surface.load_surf_data(pe.path).T, clip[0], clip[1]))
            df.append(d)
            d['run'] = pe.run
            d['number'] = np.log([5, 7, 10, 14, 20, 28])

        df = pd.concat(df).set_index(['run', 'number'])

        mask = ~df.isnull().any(0)

        print('fitting {} time series'.format(mask.sum()))

        for run in df.index.unique('run'):

            train = df.drop(run)
            test = df.loc[run]

            model = GaussianReceptiveFieldModel()
            costs, parameters, predictions = model.optimize(
                train.index.get_level_values('number').values,
                train.loc[:, mask].values)

            base_dir = op.join(derivatives, 'modelfit_surf_cv',
                               f'sub-{subject}', 'func')

            if not op.exists(base_dir):
                os.makedirs(base_dir)

            parameters.columns = df.loc[:, mask].columns

            pars_df = pd.DataFrame(columns=df.columns)
            pars_df = pd.concat((pars_df, parameters))

            par_fn = op.join(
                base_dir,
                f'sub-{subject}_space-fsaverage6_desc-pars_hemi-{hemi}_cvrun-{run}.func.gii'
            )

            nb.gifti.GiftiImage(
                header=nb.load(pe.path).header,
                darrays=[
                    nb.gifti.GiftiDataArray(data=p.astype(float))
                    for _, p in pars_df.iterrows()
                ]).to_filename(par_fn)

            transformer = SurfaceTransform(source_subject='fsaverage6',
                                           target_subject='fsaverage',
                                           hemi={
                                               'L': 'lh',
                                               'R': 'rh'
                                           }[hemi])

            transformer.inputs.source_file = par_fn
            transformer.inputs.out_file = par_fn.replace(
                'fsaverage6', 'fsaverage')
            # Disable on MAC OS X (SIP problem)
            transformer.run()

            r2 = get_rsq(test.loc[:, mask].values,
                         predictions.values[:len(test), :]).to_frame('r2').T
            r2.columns = test.loc[:, mask].columns
            r2_df = pd.DataFrame(columns=test.columns)
            r2_df = pd.concat((r2_df, r2), axis=0)

            r2_fn = op.join(
                base_dir,
                f'sub-{subject}_space-fsaverage6_desc-r2_hemi-{hemi}_cvrun-{run}.func.gii'
            )

            nb.gifti.GiftiImage(
                header=nb.load(pe.path).header,
                darrays=[
                    nb.gifti.GiftiDataArray(data=r.astype(float))
                    for _, r in r2_df.iterrows()
                ]).to_filename(r2_fn)

            transformer.inputs.source_file = r2_fn
            transformer.inputs.out_file = r2_fn.replace(
                'fsaverage6', 'fsaverage')
            # Disable on MAC OS X (SIP problem)
            transformer.run()
Esempio n. 3
0
def main(subject, session, bids_folder='/data/ds-risk', smoothed=False):

    key = 'glm_stim1_surf'
    target_dir = 'encoding_model.cv'

    if smoothed:
        key += '.smoothed'
        target_dir += '.smoothed'

    target_dir = get_target_dir(subject, session, bids_folder, target_dir)

    paradigm = [
        pd.read_csv(op.join(
            bids_folder, f'sub-{subject}', f'ses-{session}', 'func',
            f'sub-{subject}_ses-{session}_task-task_run-{run}_events.tsv'),
                    sep='\t') for run in range(1, 9)
    ]
    paradigm = pd.concat(paradigm, keys=range(1, 9), names=['run'])
    paradigm = paradigm[paradigm.trial_type == 'stimulus 1'].set_index(
        ['trial_nr'], append=True)
    paradigm = paradigm[['n1']]
    paradigm['n1'] = np.log(paradigm['n1'])

    model = GaussianPRF()
    # SET UP GRID
    mus = np.log(np.linspace(5, 80, 120, dtype=np.float32))
    sds = np.log(np.linspace(2, 30, 120, dtype=np.float32))
    amplitudes = np.array([1.], dtype=np.float32)
    baselines = np.array([0], dtype=np.float32)

    for hemi in ['L', 'R']:

        data = surface.load_surf_data(
            op.join(
                bids_folder, 'derivatives', key, f'sub-{subject}',
                f'ses-{session}', 'func',
                f'sub-{subject}_ses-{session}_task-task_space-fsnative_desc-stims1_hemi-{hemi}.pe.gii'
            )).T

        data = pd.DataFrame(data, index=paradigm.index)

        for test_run in range(1, 9):

            test_data, test_paradigm = data.loc[test_run].copy(
            ), paradigm.loc[test_run].copy()
            print(test_data, test_paradigm)
            train_data, train_paradigm = data.drop(
                test_run,
                level='run').copy(), paradigm.drop(test_run,
                                                   level='run').copy()
            print(train_data, train_paradigm)

            optimizer = ParameterFitter(model, train_data, train_paradigm)

            grid_parameters = optimizer.fit_grid(mus,
                                                 sds,
                                                 amplitudes,
                                                 baselines,
                                                 use_correlation_cost=True)
            grid_parameters = optimizer.refine_baseline_and_amplitude(
                grid_parameters, n_iterations=2)

            optimizer.fit(init_pars=grid_parameters,
                          learning_rate=.05,
                          store_intermediate_parameters=False,
                          max_n_iterations=10000,
                          r2_atol=0.00001)

            target_fn = op.join(
                target_dir,
                f'sub-{subject}_ses-{session}_run-{test_run}_desc-r2.optim_space-fsnative_hemi-{hemi}.func.gii'
            )
            write_gifti(subject, session, bids_folder, 'fsnative',
                        pd.concat([optimizer.r2], keys=[hemi], names=['hemi']),
                        target_fn)
            transform_data(target_fn,
                           f'sub-{subject}',
                           bids_folder,
                           target_subject='fsaverage')

            target_fn = op.join(
                target_dir,
                f'sub-{subject}_ses-{session}_run-{test_run}_desc-cvr2.optim_space-fsnative_hemi-{hemi}.func.gii'
            )
            cv_r2 = get_rsq(
                test_data,
                model.predict(parameters=optimizer.estimated_parameters,
                              paradigm=test_paradigm.astype(np.float32)))
            print(cv_r2)
            write_gifti(subject, session, bids_folder, 'fsnative',
                        pd.concat([cv_r2], keys=[hemi], names=['hemi']),
                        target_fn)
            transform_data(target_fn,
                           f'sub-{subject}',
                           bids_folder,
                           target_subject='fsaverage')

            for par, values in optimizer.estimated_parameters.T.iterrows():
                print(values)
                target_fn = op.join(
                    target_dir,
                    f'sub-{subject}_ses-{session}_run-{test_run}_desc-{par}.optim_space-fsnative_hemi-{hemi}.func.gii'
                )
                write_gifti(subject, session, bids_folder, 'fsnative',
                            pd.concat([values], keys=[hemi], names=['hemi']),
                            target_fn)
                transform_data(target_fn,
                               f'sub-{subject}',
                               bids_folder,
                               target_subject='fsaverage')
Esempio n. 4
0
def main(subject,
         session,
         smoothed,
         pca_confounds,
         n_voxels=1000,
         bids_folder='/data',
         mask='wang15_ips'):

    target_dir = op.join(bids_folder, 'derivatives', 'decoded_pdfs.volume')

    if smoothed:
        target_dir += '.smoothed'

    if pca_confounds:
        target_dir += '.pca_confounds'

    target_dir = op.join(target_dir, f'sub-{subject}', 'func')

    if not op.exists(target_dir):
        os.makedirs(target_dir)

    sub = Subject(subject, bids_folder)
    paradigm = sub.get_behavior(sessions=session, drop_no_responses=False)
    paradigm['log(n1)'] = np.log(paradigm['n1'])
    paradigm = paradigm.droplevel(['subject', 'session'])

    data = get_single_trial_volume(subject,
                                   session,
                                   bids_folder=bids_folder,
                                   mask=mask,
                                   smoothed=smoothed,
                                   pca_confounds=pca_confounds).astype(
                                       np.float32)
    data.index = paradigm.index
    print(data)

    pdfs = []
    runs = range(1, 9)

    for test_run in runs:

        test_data, test_paradigm = data.loc[test_run].copy(
        ), paradigm.loc[test_run].copy()
        train_data, train_paradigm = data.drop(
            test_run, level='run').copy(), paradigm.drop(test_run,
                                                         level='run').copy()

        pars = get_prf_parameters_volume(subject,
                                         session,
                                         cross_validated=True,
                                         smoothed=smoothed,
                                         pca_confounds=pca_confounds,
                                         run=test_run,
                                         mask=mask,
                                         bids_folder=bids_folder)
        # pars = get_prf_parameters_volume(subject, session, cross_validated=False,  mask=mask, bids_folder=bids_folder)
        print(pars)

        model = GaussianPRF(parameters=pars)
        pred = model.predict(
            paradigm=train_paradigm['log(n1)'].astype(np.float32))

        r2 = get_rsq(train_data, pred)
        print(r2.describe())
        r2_mask = r2.sort_values(ascending=False).index[:n_voxels]

        train_data = train_data[r2_mask]
        test_data = test_data[r2_mask]

        print(r2.loc[r2_mask])
        model.apply_mask(r2_mask)

        model.init_pseudoWWT(stimulus_range, model.parameters)
        residfit = ResidualFitter(model, train_data,
                                  train_paradigm['log(n1)'].astype(np.float32))

        omega, dof = residfit.fit(init_sigma2=10.0,
                                  method='t',
                                  max_n_iterations=10000)

        print('DOF', dof)

        bins = stimulus_range.astype(np.float32)

        pdf = model.get_stimulus_pdf(test_data,
                                     bins,
                                     model.parameters,
                                     omega=omega,
                                     dof=dof)

        print(pdf)
        E = (pdf * pdf.columns).sum(1) / pdf.sum(1)

        print(pd.concat((E, test_paradigm['log(n1)']), axis=1))
        print(pingouin.corr(E, test_paradigm['log(n1)']))

        pdfs.append(pdf)

    pdfs = pd.concat(pdfs)

    target_fn = op.join(
        target_dir,
        f'sub-{subject}_ses-{session}_mask-{mask}_nvoxels-{n_voxels}_space-{space}_pars.tsv'
    )
    pdfs.to_csv(target_fn, sep='\t')
Esempio n. 5
0
from braincoder.decoders import GaussianReceptiveFieldModel
from braincoder.utils import get_rsq
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

model = GaussianReceptiveFieldModel()
n_voxels = 10

parameters = np.vstack((np.linspace(-10, 10, n_voxels), np.ones(n_voxels) * 5,
                        np.arange(1, n_voxels + 1), np.ones(n_voxels) * .2)).T
parameters = parameters.astype(np.float32)

paradigm = np.repeat(np.arange(-10, 11, 1), 1)
simulated_data = model.simulate(parameters, paradigm, noise=.5)

costs, pars, predictions = model.optimize(paradigm.copy(),
                                          simulated_data,
                                          ftol=1e-12)

r2 = get_rsq(simulated_data, predictions)

plt.gcf()
plt.plot(paradigm, predictions)
plt.plot(paradigm, simulated_data, marker='+', lw=0)
plt.show()
Esempio n. 6
0
def main(subject, sourcedata, trialwise, clip=(-100, 100)):

    derivatives = op.join(sourcedata, 'derivatives')

    if trialwise:
        layout = BIDSLayout(op.join(derivatives, 'glm_stim1_trialwise_surf'),
                            validate=False)
    else:
        layout = BIDSLayout(op.join(derivatives, 'glm_stim1_surf'),
                            validate=False)

    for hemi in ['L', 'R']:
        pes = layout.get(subject=subject, suffix=hemi)

        print(pes)

        if trialwise:
            paradigm = np.log(
                pd.Series(
                    np.repeat([5, 7, 10, 14, 20, 28], 6).tolist() * len(pes)))
        else:
            paradigm = np.log(pd.Series([5, 7, 10, 14, 20, 28] * len(pes)))

        df = []
        for pe in pes:
            d = pd.DataFrame(
                np.clip(surface.load_surf_data(pe.path).T, clip[0], clip[1]))
            df.append(d)

        df = pd.concat(df)

        mask = ~df.isnull().any(0)
        # mask = mask & (np.random.rand(df.shape[1]) < 0.001)

        print('fitting {} time series'.format(mask.sum()))

        model = GaussianReceptiveFieldModel()
        costs, parameters, predictions = model.optimize(
            paradigm.values.ravel(), df.loc[:, mask].values)

        if trialwise:
            base_dir = op.join(derivatives, 'modelfit_trialwise_surf',
                               f'sub-{subject}', 'func')
        else:
            base_dir = op.join(derivatives, 'modelfit_surf', f'sub-{subject}',
                               'func')

        if not op.exists(base_dir):
            os.makedirs(base_dir)

        parameters.columns = df.loc[:, mask].columns

        pars_df = pd.DataFrame(columns=df.columns)
        pars_df = pd.concat((pars_df, parameters))

        par_fn = op.join(
            base_dir,
            f'sub-{subject}_space-fsaverage6_desc-pars_hemi-{hemi}.func.gii')

        nb.gifti.GiftiImage(header=nb.load(pe.path).header,
                            darrays=[
                                nb.gifti.GiftiDataArray(data=p.astype(float))
                                for _, p in pars_df.iterrows()
                            ]).to_filename(par_fn)

        transformer = SurfaceTransform(source_subject='fsaverage6',
                                       target_subject='fsaverage',
                                       hemi={
                                           'L': 'lh',
                                           'R': 'rh'
                                       }[hemi])

        transformer.inputs.source_file = par_fn
        transformer.inputs.out_file = par_fn.replace('fsaverage6', 'fsaverage')
        # Disable on MAC OS X (SIP problem)
        transformer.run()

        r2 = get_rsq(df.loc[:, mask].values, predictions).to_frame('r2').T
        r2.columns = df.loc[:, mask].columns
        r2_df = pd.DataFrame(columns=df.columns)
        r2_df = pd.concat((r2_df, r2), axis=0)

        r2_fn = op.join(
            base_dir,
            f'sub-{subject}_space-fsaverage6_desc-r2_hemi-{hemi}.func.gii')

        nb.gifti.GiftiImage(header=nb.load(pe.path).header,
                            darrays=[
                                nb.gifti.GiftiDataArray(data=r.astype(float))
                                for _, r in r2_df.iterrows()
                            ]).to_filename(r2_fn)

        transformer.inputs.source_file = r2_fn
        transformer.inputs.out_file = r2_fn.replace('fsaverage6', 'fsaverage')
        # Disable on MAC OS X (SIP problem)
        transformer.run()
Esempio n. 7
0
results = []

for test_run in range(1, 9):

    train = df.drop(test_run).copy()
    test = df.loc[test_run].copy()

    train_values = train.index.get_level_values('Val').astype(np.float32)
    test_values = test.index.get_level_values('Val').astype(np.float32)

    costs, pars, pred = model.fit_parameters(train_values,
                                             train,
                                             patience=100,
                                             progressbar=progressbar)

    r2 = get_rsq(df, pred)
    mask = r2.sort_values(ascending=False).iloc[:n_voxels].index
    model.apply_mask(mask)

    bins = np.linspace(0, 1, 150, endpoint=True)
    bin_model = model.to_bin_model(bins)

    bin_model.fit_residuals(train_values, train.loc[:, mask], lambd=lambd)

    pdf, map_, sd, (lower_ci, higher_ci) = bin_model.get_stimulus_posterior(
        test.loc[:, mask], stimulus_range=bins, normalize=True)

    r = pd.concat((pdf, map_), axis=1, keys=['pdf', 'pars'])

    print(f'RUN {run}, r={ss.pearsonr(map_, test_values):.02f}')
Esempio n. 8
0
def main(subject,
         session,
         smoothed,
         n_verts=100,
         bids_folder='/data',
         mask='wang15_ips'):

    target_dir = op.join(bids_folder, 'derivatives', 'decoded_pdfs')

    if smoothed:
        target_dir += '.smoothed'

    target_dir = op.join(target_dir, f'sub-{subject}', 'func')

    if not op.exists(target_dir):
        os.makedirs(target_dir)

    paradigm = [
        pd.read_csv(op.join(
            bids_folder, f'sub-{subject}', f'ses-{session}', 'func',
            f'sub-{subject}_ses-{session}_task-task_run-{run}_events.tsv'),
                    sep='\t') for run in range(1, 9)
    ]
    paradigm = pd.concat(paradigm, keys=range(1, 9),
                         names=['run']).droplevel(1)
    paradigm = paradigm[paradigm.trial_type == 'stimulus 1'].set_index(
        'trial_nr', append=True)

    paradigm['log(n1)'] = np.log(paradigm['n1'])
    print(paradigm)

    data = get_single_trial_surf_data(subject,
                                      session,
                                      bids_folder,
                                      mask=mask,
                                      smoothed=smoothed,
                                      space=space)
    data.index = paradigm.index

    # np.random.seed(666)
    # resample_mask = np.random.choice(data.columns, n_verts)
    # data = data[resample_mask].astype(np.float32)

    pdfs = []
    runs = range(1, 9)

    for test_run in runs:

        test_data, test_paradigm = data.loc[test_run].copy(
        ), paradigm.loc[test_run].copy()
        train_data, train_paradigm = data.drop(
            test_run, level='run').copy(), paradigm.drop(test_run,
                                                         level='run').copy()

        pars = get_prf_parameters(subject,
                                  session,
                                  run=test_run,
                                  mask=mask,
                                  bids_folder=bids_folder,
                                  smoothed=smoothed,
                                  space=space)

        # pars = pars.loc[resample_mask]

        model = GaussianPRF(parameters=pars)
        pred = model.predict(
            paradigm=train_paradigm['log(n1)'].astype(np.float32))

        r2 = get_rsq(train_data, pred)
        print(r2.describe())
        print(r2.sort_values(ascending=False))
        r2_mask = r2.sort_values(ascending=False).index[:n_verts]
        model.apply_mask(r2_mask)

        train_data = train_data[r2_mask].astype(np.float32)
        test_data = test_data[r2_mask].astype(np.float32)

        print(model.parameters)
        print(train_data)

        model.init_pseudoWWT(stimulus_range, model.parameters)
        residfit = ResidualFitter(model, train_data,
                                  train_paradigm['log(n1)'].astype(np.float32))

        omega, dof = residfit.fit(init_sigma2=10.0,
                                  method='t',
                                  max_n_iterations=10000)

        print('DOF', dof)

        bins = np.linspace(np.log(5), np.log(80), 150,
                           endpoint=True).astype(np.float32)

        pdf = model.get_stimulus_pdf(test_data,
                                     bins,
                                     model.parameters,
                                     omega=omega,
                                     dof=dof)

        print(pdf)
        E = (pdf * pdf.columns).sum(1) / pdf.sum(1)

        print(pd.concat((E, test_paradigm['log(n1)']), axis=1))
        print(pingouin.corr(E, test_paradigm['log(n1)']))

        pdfs.append(pdf)

    pdfs = pd.concat(pdfs)

    target_fn = op.join(
        target_dir,
        f'sub-{subject}_ses-{session}_mask-{mask}_nverts-{n_verts}_space-{space}_pars.tsv'
    )
    pdfs.to_csv(target_fn, sep='\t')