Esempio n. 1
0
def test_first_level_model_design_creation():
        # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10),)
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        model = FirstLevelModel(t_r, slice_time_ref, mask=mask,
                                drift_model='polynomial', drift_order=3)
        model = model.fit(func_img, events)
        frame1, X1, names1 = check_design_matrix(model.design_matrices_[0])
        # check design computation is identical
        n_scans = func_img.get_data().shape[3]
        start_time = slice_time_ref * t_r
        end_time = (n_scans - 1 + slice_time_ref) * t_r
        frame_times = np.linspace(start_time, end_time, n_scans)
        design = make_first_level_design_matrix(frame_times, events,
                                                drift_model='polynomial', drift_order=3)
        frame2, X2, names2 = check_design_matrix(design)
        assert_array_equal(frame1, frame2)
        assert_array_equal(X1, X2)
        assert_array_equal(names1, names2)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del FUNCFILE, mask, model, func_img
Esempio n. 2
0
def test_high_level_glm_with_paths():
    # New API
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3
    with InTemporaryDirectory():
        mask_file, fmri_files, design_files = _write_fake_fmri_data(shapes, rk)
        multi_session_model = FirstLevelModel(mask=None).fit(
            fmri_files, design_matrices=design_files)
        z_image = multi_session_model.compute_contrast(np.eye(rk)[1])
        assert_array_equal(z_image.affine, load(mask_file).affine)
        assert_true(z_image.get_data().std() < 3.)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del z_image, fmri_files, multi_session_model
Esempio n. 3
0
def test_high_level_glm_one_session():
    # New API
    shapes, rk = [(7, 8, 9, 15)], 3
    mask, fmri_data, design_matrices = _generate_fake_fmri_data(shapes, rk)

    single_session_model = FirstLevelModel(mask=None).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    assert_true(isinstance(single_session_model.masker_.mask_img_,
                           Nifti1Image))

    single_session_model = FirstLevelModel(mask=mask).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    z1 = single_session_model.compute_contrast(np.eye(rk)[:1])
    assert_true(isinstance(z1, Nifti1Image))
Esempio n. 4
0
def test_high_level_glm_with_data():
    # New API
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices = _write_fake_fmri_data(shapes, rk)
        multi_session_model = FirstLevelModel(mask=None).fit(
            fmri_data, design_matrices=design_matrices)
        n_voxels = multi_session_model.masker_.mask_img_.get_data().sum()
        z_image = multi_session_model.compute_contrast(np.eye(rk)[1])
        assert_equal(np.sum(z_image.get_data() != 0), n_voxels)
        assert_true(z_image.get_data().std() < 3.)
        
        # with mask
        multi_session_model = FirstLevelModel(mask=mask).fit(
            fmri_data, design_matrices=design_matrices)
        z_image = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='z_score')
        p_value = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='p_value')
        stat_image = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='stat')
        effect_image = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='effect_size')
        variance_image = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='effect_variance')
        assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.)
        assert_true(
                (variance_image.get_data()[load(mask).get_data() > 0] > .001).all())
        
        all_images = multi_session_model.compute_contrast(
                np.eye(rk)[:2], output_type='all')
        
        assert_array_equal(all_images['z_score'].get_data(), z_image.get_data())
        assert_array_equal(all_images['p_value'].get_data(), p_value.get_data())
        assert_array_equal(all_images['stat'].get_data(), stat_image.get_data())
        assert_array_equal(all_images['effect_size'].get_data(), effect_image.get_data())
        assert_array_equal(all_images['effect_variance'].get_data(), variance_image.get_data())
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del (all_images,
             design_matrices,
             effect_image,
             fmri_data,
             mask,
             multi_session_model,
             n_voxels,
             p_value,
             rk,
             shapes,
             stat_image,
             variance_image,
             z_image,
         )
Esempio n. 5
0
def test_high_level_glm_null_contrasts():
    # test that contrast computation is resilient to 0 values.
    # new API
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
    mask, fmri_data, design_matrices = _generate_fake_fmri_data(shapes, rk)

    multi_session_model = FirstLevelModel(mask=None).fit(
        fmri_data, design_matrices=design_matrices)
    single_session_model = FirstLevelModel(mask=None).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    z1 = multi_session_model.compute_contrast([np.eye(rk)[:1],
                                               np.zeros((1, rk))],
                                              output_type='stat')
    z2 = single_session_model.compute_contrast(np.eye(rk)[:1],
                                               output_type='stat')
    np.testing.assert_almost_equal(z1.get_data(), z2.get_data())
Esempio n. 6
0
def test_first_level_model_glm_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10),)
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask,
                                drift_model='polynomial', drift_order=3,
                                minimize_memory=False)
        model = model.fit(func_img, events)

        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del mask, FUNCFILE, func_img, model
Esempio n. 7
0
def test_first_level_glm_computation_with_memory_caching():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10),)
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # initialize FirstLevelModel with memory option enabled
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r, slice_time_ref, mask=mask,
                                drift_model='polynomial', drift_order=3,
                                memory='nilearn_cache', memory_level=1,
                                minimize_memory=False)
        model.fit(func_img, events)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del mask, func_img, FUNCFILE, model
Esempio n. 8
0
def test_first_level_models_with_no_signal_scaling():
    """
    test to ensure that the FirstLevelModel works correctly with a
    signal_scaling==False. In particular, that derived theta are correct for a
    constant design matrix with a single valued fmri image
    """
    shapes, rk = [(3, 1, 1, 2)], 1
    fmri_data = list()
    design_matrices = list()
    design_matrices.append(pd.DataFrame(np.ones((shapes[0][-1], rk)),
                                        columns=list('abcdefghijklmnopqrstuvwxyz')[:rk]))
    first_level_model = FirstLevelModel(mask=False, noise_model='ols', signal_scaling=False)
    fmri_data.append(Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, np.eye(4)))

    first_level_model.fit(fmri_data, design_matrices=design_matrices)
    # trivial test of signal_scaling value
    assert_true(first_level_model.signal_scaling is False)
    # assert that our design matrix has one constant
    assert_true(first_level_model.design_matrices_[0].equals(
        pd.DataFrame([1.0, 1.0], columns=['a'])))
    # assert that we only have one theta as there is only on voxel in our image
    assert_true(first_level_model.results_[0][0].theta.shape == (1, 1))
    # assert that the theta is equal to the one voxel value
    assert_almost_equal(first_level_model.results_[0][0].theta[0, 0], 6.0, 2)
Esempio n. 9
0
def test_fmri_inputs():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        # prepare fake data
        p, q = 80, 10
        X = np.random.randn(p, q)
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        des = pd.DataFrame(np.ones((T, 1)), columns=['a'])
        des_fname = 'design.csv'
        des.to_csv(des_fname)

        # prepare correct input first level models
        flm = FirstLevelModel(subject_label='01').fit(FUNCFILE,
                                                      design_matrices=des)
        flms = [flm, flm, flm]
        # prepare correct input dataframe and lists
        shapes = ((7, 8, 9, 1), )
        _, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]

        dfcols = ['subject_label', 'map_name', 'effects_map_path']
        dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE],
                  ['03', 'a', FUNCFILE]]
        niidf = pd.DataFrame(dfrows, columns=dfcols)
        niimgs = [FUNCFILE, FUNCFILE, FUNCFILE]
        niimg_4d = concat_imgs(niimgs)
        confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]],
                                 columns=['subject_label', 'conf1'])
        sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c'])

        # smoke tests with correct input
        # First level models as input
        SecondLevelModel(mask_img=mask).fit(flms)
        SecondLevelModel().fit(flms)
        # Note : the following one creates a singular design matrix
        SecondLevelModel().fit(flms, confounds)
        SecondLevelModel().fit(flms, None, sdes)
        # dataframes as input
        SecondLevelModel().fit(niidf)
        SecondLevelModel().fit(niidf, confounds)
        SecondLevelModel().fit(niidf, confounds, sdes)
        SecondLevelModel().fit(niidf, None, sdes)
        # niimgs as input
        SecondLevelModel().fit(niimgs, None, sdes)
        # 4d niimg as input
        SecondLevelModel().fit(niimg_4d, None, sdes)

        # test wrong input errors
        # test first level model requirements
        assert_raises(ValueError, SecondLevelModel().fit, flm)
        assert_raises(ValueError, SecondLevelModel().fit, [flm])
        # test dataframe requirements
        assert_raises(ValueError,
                      SecondLevelModel().fit, niidf['subject_label'])
        # test niimgs requirements
        assert_raises(ValueError, SecondLevelModel().fit, niimgs)
        assert_raises(ValueError,
                      SecondLevelModel().fit, niimgs + [[]], confounds)
        # test first_level_conditions, confounds, and design
        assert_raises(ValueError, SecondLevelModel().fit, flms, ['', []])
        assert_raises(ValueError, SecondLevelModel().fit, flms, [])
        assert_raises(ValueError,
                      SecondLevelModel().fit, flms, confounds['conf1'])
        assert_raises(ValueError, SecondLevelModel().fit, flms, None, [])
Esempio n. 10
0
mean_img_ = mean_img(fmri_img[0])

#########################################################################
# The design matrices were pre-computed, we simply put them in a list of DataFrames
design_files = [data['design_matrix1'], data['design_matrix2']]
import pandas as pd
import numpy as np
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

#########################################################################
# GLM estimation
# ----------------------------------
# GLM specification. Note that the mask was provided in the dataset. So we use it.

from nistats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(mask=data['mask'], minimize_memory=True)

#########################################################################
# GLM fitting
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

#########################################################################
# Compute fixed effects of the two runs and compute related images
# For this, we first define the contrasts as we would do for a single session
n_columns = design_matrices[0].shape[1]

def pad_vector(contrast_, n_columns):
    """A small routine to append zeros in contrast vectors"""
    return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))

#########################################################################
Esempio n. 11
0
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])
#########################################################################
# Instead in this example we define more interesting contrasts
contrasts = {
    'faces-scrambled': contrasts['faces'] - contrasts['scrambled'],
    'scrambled-faces': -contrasts['faces'] + contrasts['scrambled'],
    'effects_of_interest': np.vstack(
        (contrasts['faces'], contrasts['scrambled']))
}

#########################################################################
# Fit GLM
print('Fitting a GLM')
fmri_glm = FirstLevelModel(tr, slice_time_ref)
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

#########################################################################
# Compute contrast maps
print('Computing contrasts')
from nilearn import plotting

for contrast_id, contrast_val in contrasts.items():
    print("\tcontrast id: %s" % contrast_id)
    z_map = fmri_glm.compute_contrast(contrast_val, output_type='z_score')
    plotting.plot_stat_map(z_map,
                           bg_img=mean_image,
                           threshold=3.0,
                           display_mode='z',
                           cut_coords=3,
#

t_r = 2.4
events_file = data['events']
import pandas as pd
events = pd.read_table(events_file)

###############################################################################
# Running a basic model
# ---------------------
#
# First specify a linear model.
# the fit() model creates the design matrix and the beta maps.
#
from nistats.first_level_model import FirstLevelModel
first_level_model = FirstLevelModel(t_r)
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]

#########################################################################
# Let us take a look at the design matrix: it has 10 main columns corresponding to 10 experimental conditions, followed by 3 columns describing low-frequency signals (drifts) and a constant regressor.
from nistats.reporting import plot_design_matrix
plot_design_matrix(design_matrix)
import matplotlib.pyplot as plt
plt.show()

#########################################################################
# Specification of the contrasts.
#
# For this, let's create a function that, given the design matrix,
# generates the corresponding contrasts.  This will be useful to
Esempio n. 13
0
# load events
events = pd.read_table(subject_data['events'])

#########################################################################
# Fit model
# ---------
# Note that `minimize_memory` is set to `False` so that `FirstLevelModel`
# stores the residuals.
# `signal_scaling` is set to False, so we keep the same scaling as the
# original data in `fmri_img`.
from nistats.first_level_model import FirstLevelModel

fmri_glm = FirstLevelModel(t_r=7,
                           drift_model='cosine',
                           signal_scaling=False,
                           mask_img=mask,
                           minimize_memory=False)

fmri_glm = fmri_glm.fit(fmri_img, events)

#########################################################################
# Calculate and plot contrast
# ---------------------------
from nilearn import plotting

z_map = fmri_glm.compute_contrast('active - rest')

plotting.plot_stat_map(z_map, bg_img=mean_img, threshold=3.1)

#########################################################################
Esempio n. 14
0
def test_first_level_model_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3,
                                minimize_memory=False)
        c1, c2, cnull = np.eye(7)[0], np.eye(7)[1], np.zeros(7)
        # asking for contrast before model fit gives error
        assert_raises(ValueError, model.compute_contrast, c1)
        # fit model
        model = model.fit([func_img, func_img], [events, events])
        # smoke test for different contrasts in fixed effects
        model.compute_contrast([c1, c2])
        # smoke test for same contrast in fixed effects
        model.compute_contrast([c2, c2])
        # smoke test for contrast that will be repeated
        model.compute_contrast(c2)
        model.compute_contrast(c2, 'F')
        model.compute_contrast(c2, 't', 'z_score')
        model.compute_contrast(c2, 't', 'stat')
        model.compute_contrast(c2, 't', 'p_value')
        model.compute_contrast(c2, None, 'effect_size')
        model.compute_contrast(c2, None, 'effect_variance')
        # formula should work (passing varible name directly)
        model.compute_contrast('c0')
        model.compute_contrast('c1')
        model.compute_contrast('c2')
        # smoke test for one null contrast in group
        model.compute_contrast([c2, cnull])
        # only passing null contrasts should give back a value error
        assert_raises(ValueError, model.compute_contrast, cnull)
        assert_raises(ValueError, model.compute_contrast, [cnull, cnull])
        # passing wrong parameters
        assert_raises(ValueError, model.compute_contrast, [])
        assert_raises(ValueError, model.compute_contrast, [c1, []])
        assert_raises(ValueError, model.compute_contrast, c1, '', '')
        assert_raises(ValueError, model.compute_contrast, c1, '', [])
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model
Esempio n. 15
0
def main(sourcedata, derivatives, subject, session, tmp_dir):

    sourcedata_layout = BIDSLayout(sourcedata)
    sourcedata_df = sourcedata_layout.as_data_frame()
    events = sourcedata_df[(sourcedata_df['type'] == 'events')
                           & (sourcedata_df['subject'] == subject) &
                           (sourcedata_df['session'] == session)]

    derivatives_layout = BIDSLayout(os.path.join(derivatives, 'spynoza'))
    derivatives_df = derivatives_layout.as_data_frame()
    bold = derivatives_df[(derivatives_df['type'] == 'preproc')
                          & (derivatives_df['subject'] == subject) &
                          (derivatives_df['session'] == session)]

    confounds = derivatives_df[(derivatives_df['type'] == 'confounds')
                               & (derivatives_df['subject'] == subject) &
                               (derivatives_df['session'] == session)]

    print(derivatives_df.type.unique())

    mask = derivatives_layout.get(subject=subject,
                                  session=session,
                                  type='mask',
                                  return_type='file')[0]

    df = events.merge(bold,
                      on=['subject', 'session', 'run'],
                      suffixes=('_events', '_bold'))

    confounds = confounds.rename(columns={'path': 'confounds'})
    df = df.merge(confounds[['subject', 'session', 'run', 'confounds']])

    models = []
    for ix, row in df.iterrows():

        results_dir = os.path.join(derivatives, 'modelfitting', 'glm3',
                                   'sub-{}'.format(row['subject']))
        if 'session' in row:
            results_dir = os.path.join(results_dir,
                                       'ses-{}'.format(row['session']))

        os.makedirs(results_dir, exist_ok=True)

        confounds = pd.read_table(row.confounds).fillna(method='bfill')

        print('Fitting {}'.format(row['path_bold']))
        model = FirstLevelModel(t_r=4, mask=mask)
        paradigm = pd.read_table(row['path_events'])
        model.fit(row['path_bold'], paradigm, confounds=confounds)

        left_right = model.compute_contrast('eye_L - eye_R',
                                            output_type='z_score')
        left_right.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_left_over_right_zmap.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))
        left_right = model.compute_contrast('eye_L - eye_R',
                                            output_type='effect_size')
        left_right.to_filename(
            os.path.join(
                results_dir,
                'sub-{}_ses-{}_run-{}_left_over_right_psc.nii.gz'.format(
                    row['subject'], row['session'], row['run'])))
        models.append(model)

    second_level_model = SecondLevelModel(mask=mask)
    second_level_model.fit(models)

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye_L - eye_R', output_type='z_score')
    left_right_group.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_zmap.nii.gz'.format(
                row['subject'], row['session'])))

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye_L - eye_R', output_type='effect_size')
    left_right_group.to_filename(
        os.path.join(
            results_dir,
            'sub-{}_ses-{}_left_over_right_effect_size.nii.gz'.format(
                row['subject'], row['session'])))

    left_right_group = second_level_model.compute_contrast(
        first_level_contrast='eye_L - eye_R', output_type='z_score')
    left_right_group.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_zmap.nii.gz'.format(
                row['subject'], row['session'])))
Esempio n. 16
0
                                 memory_level=1,
                                 verbose=0)
seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
design_matrix = make_first_level_design_matrix(frametimes,
                                               hrf_model='spm',
                                               add_regs=seed_time_series,
                                               add_reg_names=["pcc_seed"])
dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
contrasts = {'seed_based_glm': dmn_contrast}

#########################################################################
# Perform first level analysis
# ----------------------------
# Setup and fit GLM
first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                          design_matrices=design_matrix)

#########################################################################
# contrast estimation
print('Contrast seed_based_glm computed.')
z_map = first_level_model.compute_contrast(contrasts['seed_based_glm'],
                                           output_type='z_score')

# Saving snapshots of the contrasts
filename = 'dmn_z_map.png'
display = plotting.plot_stat_map(z_map,
                                 threshold=3.0,
                                 title='Seed based GLM',
                                 cut_coords=pcc_coords)
Esempio n. 17
0
def test_fmri_inputs_for_non_parametric_inference():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        # prepare fake data
        p, q = 80, 10
        X = np.random.randn(p, q)
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        des = pd.DataFrame(np.ones((T, 1)), columns=['a'])
        des_fname = 'design.csv'
        des.to_csv(des_fname)

        # prepare correct input first level models
        flm = FirstLevelModel(subject_label='01').fit(FUNCFILE,
                                                      design_matrices=des)
        # prepare correct input dataframe and lists
        shapes = ((7, 8, 9, 1), )
        _, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]

        dfcols = ['subject_label', 'map_name', 'effects_map_path']
        dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE],
                  ['03', 'a', FUNCFILE]]
        niidf = pd.DataFrame(dfrows, columns=dfcols)
        niimgs = [FUNCFILE, FUNCFILE, FUNCFILE]
        niimg_4d = concat_imgs(niimgs)
        confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]],
                                 columns=['subject_label', 'conf1'])
        sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c'])

        # test missing second-level contrast
        # niimgs as input
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs, None, sdes)
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs, confounds, sdes)
        # 4d niimg as input
        with pytest.raises(ValueError):
            non_parametric_inference(niimg_4d, None, sdes)

        # test wrong input errors
        # test first level model
        with pytest.raises(ValueError):
            non_parametric_inference(flm)
        # test list of less than two niimgs
        with pytest.raises(ValueError):
            non_parametric_inference([FUNCFILE])
        # test dataframe
        with pytest.raises(ValueError):
            non_parametric_inference(niidf)
        # test niimgs requirements
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs)
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs + [[]], confounds)
        with pytest.raises(ValueError):
            non_parametric_inference([FUNCFILE])
        # test other objects
        with pytest.raises(ValueError):
            non_parametric_inference('random string object')
        del X, FUNCFILE, func_img
from nistats.first_level_model import FirstLevelModel

###############################################################################
# Parameters of the first-level model
#
# * t_r=7(s) is the time of repetition of acquisitions
# * noise_model='ar1' specifies the noise covariance model: a lag-1 dependence
# * standardize=False means that we do not want to rescale the time
# series to mean 0, variance 1
# * hrf_model='spm' means that we rely on the SPM "canonical hrf"
# model (without time or dispersion derivatives)
# * drift_model='cosine' means that we model the signal drifts as slow oscillating time functions
# * period_cut=160(s) defines the cutoff frequency (its inverse actually).
fmri_glm = FirstLevelModel(t_r=7,
                           noise_model='ar1',
                           standardize=False,
                           hrf_model='spm',
                           drift_model='cosine',
                           period_cut=160)

###############################################################################
# Now that we have specified the model, we can run it on the fMRI image
fmri_glm = fmri_glm.fit(fmri_img, events)

###############################################################################
# One can inspect the design matrix (rows represent time, and
# columns contain the predictors).
design_matrix = fmri_glm.design_matrices_[0]

###############################################################################
# Formally, we have taken the first design matrix, because the model is
# implictily meant to for multiple runs.
Esempio n. 19
0
mean_img_ = mean_img(fmri_img[0])

#########################################################################
# The design matrices were pre-computed, we simply put them in a list of DataFrames
design_files = [data['design_matrix1'], data['design_matrix2']]
import pandas as pd
import numpy as np
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

#########################################################################
# GLM estimation
# ----------------------------------
# GLM specification. Note that the mask was provided in the dataset. So we use it.

from nistats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(mask_img=data['mask'], smoothing_fwhm=5,
                           minimize_memory=True)

#########################################################################
# Compute fixed effects of the two runs and compute related images
# For this, we first define the contrasts as we would do for a single session
n_columns = design_matrices[0].shape[1]
contrast_val = np.hstack(([-1, -1, 1, 1], np.zeros(n_columns - 4)))

#########################################################################
# Statistics for the first session
from nilearn import plotting
cut_coords = [-129, -126, 49]
contrast_id = 'DSt_minus_SSt'

fmri_glm = fmri_glm.fit(fmri_img[0], design_matrices=design_matrices[0])
summary_statistics_session1 = fmri_glm.compute_contrast(
Esempio n. 20
0
def process_subject(inputpath, subjid, dtx_mat, outputpath):
    subjglm = op.join(outputpath, "cache", "glm_{}".format(subjid))
    subjid = str(subjid)
    if op.isfile(subjglm):
        print('WARNING: Loading already saved model {}'.format(subjglm))
        fmri_glm = load(subjglm)  # the model has already been estimated
    else:
        # else, we create and estimate it
        print('Creating model for subject %s' % subjid)
        print("Searching for " +
              op.join(inputpath, subjid, "run?_medn_afw.nii.gz"))
        imgs = sorted(
            glob.glob(op.join(inputpath, subjid, "run?_medn_afw.nii.gz")))
        if len(imgs) != 9:
            print("WARNING: %s does not have 9 sessions. We skip it." % subjid)
            return

        fmri_glm = FirstLevelModel(
            t_r=2.0,
            hrf_model='spm',
            # mask='mask_ICV.nii',
            noise_model='ar1',
            period_cut=128.0,
            smoothing_fwhm=0,
            minimize_memory=True,
            # memory='/mnt/ephemeral/cache',
            memory=None,
            verbose=2,
            n_jobs=1)

        # creating and estimating the model
        fmri_glm = fmri_glm.fit(imgs, design_matrices=dtx_mat)
        # saving it as a pickle object
        dump(fmri_glm, subjglm)

    # creating the maps for each individual predictor
    # this assumes the same predictors for each session
    print('Computing contrasts for subject %s', subjid)
    contrasts = {}
    con_names = [i for i in dtx_mat[0].columns]
    ncon = len(con_names)
    con = np.eye(ncon)
    for i, name in enumerate(con_names):
        contrasts[name] = con[i, :]

    for name, val in contrasts.items():
        z_map = fmri_glm.compute_contrast(val, output_type='z_score')
        eff_map = fmri_glm.compute_contrast(val, output_type='effect_size')
        #std_map = fmri_glm.compute_contrast(val, output_type='stddev')
        nib.save(z_map,
                 op.join(outputpath, '%s_%s_zmap.nii.gz' % (name, subjid)))
        nib.save(eff_map,
                 op.join(outputpath, '%s_%s_effsize.nii.gz' % (name, subjid)))
        display = None
        display = plot_glass_brain(z_map,
                                   display_mode='lzry',
                                   threshold=3.1,
                                   colorbar=True,
                                   title=name)
        display.savefig(
            op.join(outputpath, '%s_%s_glassbrain.png' % (name, subjid)))
        display.close()
Esempio n. 21
0
# Create the design matrix
import numpy as np
import matplotlib.pyplot as plt
import nibabel
from nistats.design_matrix import make_design_matrix, plot_design_matrix
tr = 2.5
n_scans = nibabel.load(func_file).get_data().shape[-1]
frametimes = np.arange(0, n_scans * tr, tr)
design_matrix = make_design_matrix(frametimes, paradigm)
plot_design_matrix(design_matrix)
plt.tight_layout()

# Fit GLM
print('Fitting a GLM')
from nistats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(tr)
fmri_glm = fmri_glm.fit(func_file, design_matrices=design_matrix)

# Specify the contrasts
contrasts = {}
n_columns = len(design_matrix.columns)
for n, name in enumerate(design_matrix.columns[:3]):
    contrasts[name] = np.zeros((n_columns, ))
    contrasts[name][n] = 1
contrasts['[motor audio] left - right'] = \
    contrasts['motor_audio_left'] - contrasts['motor_audio_right']

# Compute contrast maps
from nilearn import plotting
for contrast_id, contrast_val in contrasts.items():
    z_map = fmri_glm.compute_contrast(contrast_val,
Esempio n. 22
0
    mkdir(write_dir)

#########################################################################
# Prepare data and analysis parameters
# --------------------------------------
data = datasets.fetch_fiac_first_level()
fmri_img = [data['func1'], data['func2']]
mean_img_ = mean_img(fmri_img[0])
design_files = [data['design_matrix1'], data['design_matrix2']]
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

#########################################################################
# GLM estimation
# ----------------------------------
# GLM specification
fmri_glm = FirstLevelModel(mask=data['mask'], minimize_memory=True)

#########################################################################
# GLM fitting
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

#########################################################################
# compute fixed effects of the two runs and compute related images
n_columns = design_matrices[0].shape[1]


def pad_vector(contrast_, n_columns):
    return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))


contrasts = {
Esempio n. 23
0
def test_high_level_glm_with_data():
    # New API
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices = _write_fake_fmri_data(shapes, rk)
        multi_session_model = FirstLevelModel(mask_img=None).fit(
            fmri_data, design_matrices=design_matrices)
        n_voxels = get_data(multi_session_model.masker_.mask_img_).sum()
        z_image = multi_session_model.compute_contrast(np.eye(rk)[1])
        assert_equal(np.sum(get_data(z_image) != 0), n_voxels)
        assert_true(get_data(z_image).std() < 3.)
        # with mask
        multi_session_model = FirstLevelModel(mask_img=mask).fit(
            fmri_data, design_matrices=design_matrices)
        z_image = multi_session_model.compute_contrast(np.eye(rk)[:2],
                                                       output_type='z_score')
        p_value = multi_session_model.compute_contrast(np.eye(rk)[:2],
                                                       output_type='p_value')
        stat_image = multi_session_model.compute_contrast(np.eye(rk)[:2],
                                                          output_type='stat')
        effect_image = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='effect_size')
        variance_image = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='effect_variance')
        assert_array_equal(get_data(z_image) == 0., get_data(load(mask)) == 0.)
        assert_true(
            (get_data(variance_image)[get_data(load(mask)) > 0] > .001).all())
        all_images = multi_session_model.compute_contrast(np.eye(rk)[:2],
                                                          output_type='all')
        assert_array_equal(get_data(all_images['z_score']), get_data(z_image))
        assert_array_equal(get_data(all_images['p_value']), get_data(p_value))
        assert_array_equal(get_data(all_images['stat']), get_data(stat_image))
        assert_array_equal(get_data(all_images['effect_size']),
                           get_data(effect_image))
        assert_array_equal(get_data(all_images['effect_variance']),
                           get_data(variance_image))
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del (all_images, design_matrices, effect_image, fmri_data, mask,
             multi_session_model, n_voxels, p_value, rk, shapes, stat_image,
             variance_image, z_image)
slice_time_ref = t_r / 12

# Prepare data
paradigm_file = "/hpc/banco/bastien.c/data/inter_tva/sub-01/paradigms/" \
                "usub-01_task-localizer-best_bold.tsv"
paradigm = pd.read_csv(paradigm_file, sep='\t', index_col=None)
fmri_img = "/hpc/banco/InterTVA/virginia/analyse_pilot/sub-01/" \
           "func/session1/swusub-01_task-localizer-best_bold.nii"
output = "/hpc/banco/bastien.c/data/inter_tva/sub-01/output/" \
         "swusub-01_localizer-best_bold_nistat_ex/con"

#########################################################################
# Perform first level analysis
# ----------------------------
# Setup and fit GLM
first_level_model = FirstLevelModel(t_r, slice_time_ref,
                                    hrf_model='glover + derivative', verbose=2)
first_level_model = first_level_model.fit(fmri_img, paradigm)

#########################################################################
# Estimate contrasts
# ------------------
# Specify the contrasts
design_matrix = first_level_model.design_matrices_[0]
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

#########################################################################
# Short list of more relevant contrasts
contrasts = {
    "all_minus_silence": (contrasts["speech"] + contrasts["non_speech"]
Esempio n. 25
0
def test_fmri_inputs():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        conf = pd.DataFrame([0, 0])
        des = pd.DataFrame(np.ones((T, 1)), columns=[''])
        des_fname = 'design.csv'
        des.to_csv(des_fname)
        for fi in func_img, FUNCFILE:
            for d in des, des_fname:
                FirstLevelModel().fit(fi, design_matrices=d)
                FirstLevelModel(mask_img=None).fit([fi], design_matrices=d)
                FirstLevelModel(mask_img=mask).fit(fi, design_matrices=[d])
                FirstLevelModel(mask_img=mask).fit([fi], design_matrices=[d])
                FirstLevelModel(mask_img=mask).fit([fi, fi],
                                                   design_matrices=[d, d])
                FirstLevelModel(mask_img=None).fit((fi, fi),
                                                   design_matrices=(d, d))
                assert_raises(ValueError,
                              FirstLevelModel(mask_img=None).fit, [fi, fi], d)
                assert_raises(ValueError,
                              FirstLevelModel(mask_img=None).fit, fi, [d, d])
                # At least paradigms or design have to be given
                assert_raises(ValueError,
                              FirstLevelModel(mask_img=None).fit, fi)
                # If paradigms are given then both tr and slice time ref were
                # required
                assert_raises(ValueError,
                              FirstLevelModel(mask_img=None).fit, fi, d)
                assert_raises(ValueError,
                              FirstLevelModel(mask_img=None, t_r=1.0).fit, fi,
                              d)
                assert_raises(
                    ValueError,
                    FirstLevelModel(mask_img=None, slice_time_ref=0.).fit, fi,
                    d)
            # confounds rows do not match n_scans
            assert_raises(ValueError,
                          FirstLevelModel(mask_img=None).fit, fi, d, conf)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del fi, func_img, mask, d, des, FUNCFILE, _
Esempio n. 26
0
#########################################################################
# Next solution is to try Finite Impulse Reponse (FIR) models: we just
# say that the hrf is an arbitrary function that lags behind the
# stimulus onset.  In the present case, given that the numbers of
# conditions is high, we should use a simple FIR model.
#
# Concretely, we set `hrf_model` to 'fir' and `fir_delays` to [1, 2,
# 3] (scans) corresponding to a 3-step functions on the [1 * t_r, 4 *
# t_r] seconds interval.
#

from nistats.first_level_model import FirstLevelModel
from nistats.reporting import plot_design_matrix, plot_contrast_matrix

first_level_model = FirstLevelModel(t_r, hrf_model='fir', fir_delays=[1, 2, 3])
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]
plot_design_matrix(design_matrix)

#########################################################################
# We have to adapt contrast specification. We characterize the BOLD
# response by the sum across the three time lags. It's a bit hairy,
# sorry, but this is the price to pay for flexibility...

import numpy as np

contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])
conditions = events.trial_type.unique()
Esempio n. 27
0
            new_events.append(
                [row['onset'], row['duration'], 'r' + row['trial_type'][1:]])
        else:
            new_events.append(
                [row['onset'], row['duration'], row['trial_type']])

    df = pd.DataFrame(new_events,
                      columns=['onset', 'duration', 'trial_type'
                               ])  #make sure only relevant columns present
    all_events.append(df)

# Do GLM

fmri_glm = FirstLevelModel(t_r=TR,
                           noise_model='ar1',
                           standardize=False,
                           hrf_model='glover',
                           n_jobs=4)

all_sg_soma = [output_dir + filenames_sg[s] for s in range(len(filenames_sg))]

print('Fitting GLM...')

fmri_glm = fmri_glm.fit(all_sg_soma, events=all_events, confounds=all_confs)

design_matrix = fmri_glm.design_matrices_[0]

# Compute z-score of contrasts

print('Computing contrasts')
Esempio n. 28
0
        'duration':
        np.cumsum(settings['flicker_experiment']['durations'] *
                  settings['flicker_experiment']['repeats'])
    })

    flickers['event_type'] = flickers['frequency'].apply(lambda x: 'on'
                                                         if x != 0 else 'off')

    confounds = pd.concat(
        (pd.read_table(row.confounds), pd.read_table(row.compcorr)),
        axis=1).fillna(method='bfill')

    t_r = events[events['event_type'] == 'pulse'].onset.diff().mean()

    model = FirstLevelModel(t_r=t_r,
                            signal_scaling=False,
                            subject_label=int(run),
                            mask=mask)

    paradigm = pd.concat((button_presses, flickers), axis=0, ignore_index=True)
    paradigm['trial_type'] = paradigm['event_type']
    paradigm = paradigm[paradigm.trial_type != 'off']

    model.fit(row['data'], paradigm, confounds=confounds)

    response = model.compute_contrast('response', output_type='z_score')
    response.to_filename(
        os.path.join(
            results_dir,
            'sub-{subject}_ses-{session}_task-flicker_run-{run:02d}_response_right_zmap.nii.gz'
            .format(**locals())))
# load dataset
dataset = load_camcan_rest(data_dir=CAMCAN_PREPROCESSED,
                           patients_excluded=CAMCAN_PATIENTS_EXCLUDED)

for (subject_id, func, motion) in zip(dataset.subject_id, dataset.func,
                                      dataset.motion):
    print(subject_id)
    events_path = join(CAMCAN_PREPROCESSED, subject_id, 'func',
                       '%s_task-SMT_events.tsv' % subject_id)

    # First-level GLM
    flm = FirstLevelModel(t_r=1.97,
                          mask=MASK_IMG,
                          smoothing_fwhm=8,
                          verbose=1,
                          memory_level=1,
                          memory=CACHE_MAPS,
                          subject_label=subject_id,
                          n_jobs=10)
    flm.fit(run_imgs=func,
            events=pd.read_csv(events_path, sep='\t'),
            confounds=pd.DataFrame(np.loadtxt(motion)))

    # Prepare contrasts
    contrasts = {}
    contrast_matrix = np.eye(flm.design_matrices_[0].shape[1])
    contrasts = dict([
        (column, contrast_matrix[i])
        for i, column in enumerate(flm.design_matrices_[0].columns[:5])
    ])
    # 'AudOnly', 'AudVid1200',  'AudVid300',  'AudVid600',    'VidOnly'
Esempio n. 30
0
# define the effects of interest contrast, a 2-dimensional contrasts
# spanning the two conditions.

contrasts = {
    'faces-scrambled': basic_contrasts['faces'] - basic_contrasts['scrambled'],
    'scrambled-faces': -basic_contrasts['faces'] + basic_contrasts['scrambled'],
    'effects_of_interest': np.vstack((basic_contrasts['faces'],
                                      basic_contrasts['scrambled']))
    }

#########################################################################
# Fit the GLM -- 2 sessions.
# Imports for GLM, the sepcify, then fit.
from nistats.first_level_model import FirstLevelModel
print('Fitting a GLM')
fmri_glm = FirstLevelModel()
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

#########################################################################
# Compute contrast-related statistical maps (in z-scale), and plot them
print('Computing contrasts')
from nilearn import plotting

# Iterate on contrasts
for contrast_id, contrast_val in contrasts.items():
    print("\tcontrast id: %s" % contrast_id)
    # compute the contrasts
    z_map = fmri_glm.compute_contrast(
        contrast_val, output_type='z_score')
    # plot the contrasts as soon as they're generated
    # the display is overlayed on the mean fMRI image
Esempio n. 31
0
                                 high_pass=0.01, t_r=2.,
                                 memory='nilearn_cache',
                                 memory_level=1, verbose=0)
seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
design_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm',
                                               add_regs=seed_time_series,
                                               add_reg_names=["pcc_seed"])
dmn_contrast = np.array([1] + [0]*(design_matrix.shape[1]-1))
contrasts = {'seed_based_glm': dmn_contrast}

#########################################################################
# Perform first level analysis
# ----------------------------
# Setup and fit GLM
first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                          design_matrices=design_matrix)

#########################################################################
# contrast estimation
print('Contrast seed_based_glm computed.')
z_map = first_level_model.compute_contrast(contrasts['seed_based_glm'],
                                           output_type='z_score')

# Saving snapshots of the contrasts
filename = 'dmn_z_map.png'
display = plotting.plot_stat_map(z_map, threshold=3.0, title='Seed based GLM',
                                 cut_coords=pcc_coords)
display.add_markers(marker_coords=[pcc_coords], marker_color='g',marker_size=300)
display.savefig(filename)
Esempio n. 32
0
def test_first_level_model_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10),)
        mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r, slice_time_ref, mask=mask,
                                drift_model='polynomial', drift_order=3,
                                minimize_memory=False)
        c1, c2, cnull = np.eye(7)[0], np.eye(7)[1], np.zeros(7)
        # asking for contrast before model fit gives error
        assert_raises(ValueError, model.compute_contrast, c1)
        # fit model
        model = model.fit([func_img, func_img], [events, events])
        # smoke test for different contrasts in fixed effects
        model.compute_contrast([c1, c2])
        # smoke test for same contrast in fixed effects
        model.compute_contrast([c2, c2])
        # smoke test for contrast that will be repeated
        model.compute_contrast(c2)
        model.compute_contrast(c2, 'F')
        model.compute_contrast(c2, 't', 'z_score')
        model.compute_contrast(c2, 't', 'stat')
        model.compute_contrast(c2, 't', 'p_value')
        model.compute_contrast(c2, None, 'effect_size')
        model.compute_contrast(c2, None, 'effect_variance')
        # formula should work (passing varible name directly)
        model.compute_contrast('c0')
        model.compute_contrast('c1')
        model.compute_contrast('c2')
        # smoke test for one null contrast in group
        model.compute_contrast([c2, cnull])
        # only passing null contrasts should give back a value error
        assert_raises(ValueError, model.compute_contrast, cnull)
        assert_raises(ValueError, model.compute_contrast, [cnull, cnull])
        # passing wrong parameters
        assert_raises(ValueError, model.compute_contrast, [])
        assert_raises(ValueError, model.compute_contrast, [c1, []])
        assert_raises(ValueError, model.compute_contrast, c1, '', '')
        assert_raises(ValueError, model.compute_contrast, c1, '', [])
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model
#

t_r = 2.4
events_file = data['events']
import pandas as pd
events= pd.read_table(events_file)

###############################################################################
# Running a basic model
# ---------------------
#
# First specify a linear model.
# the fit() model creates the design matrix and the beta maps.
#
from nistats.first_level_model import FirstLevelModel
first_level_model = FirstLevelModel(t_r)
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]

#########################################################################
# Let us take a look at the design matrix: it has 10 main columns corresponding to 10 experimental conditions, followed by 3 columns describing low-frequency signals (drifts) and a constant regressor.
from nistats.reporting import plot_design_matrix
plot_design_matrix(design_matrix)
import matplotlib.pyplot as plt
plt.show()

#########################################################################
# Specification of the contrasts.
# 
# For this, let's create a function that, given the design matrix,
# generates the corresponding contrasts.  This will be useful to
Esempio n. 34
0
            images.append(op.join(derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                                  'sub-{subject}_task-randomdotmotion_run-{run}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz').format(**locals()))
            confounds.append(pd.read_table(op.join(derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                                  'sub-{subject}_task-randomdotmotion_run-{run}_desc-confounds_regressors.tsv').format(**locals())))

            behavior.append(pd.read_table(op.join(derivatives, ds, 'event_files',
                                             'sub-{subject}_task-randomdotmotion_run-{run}_events.tsv').format(**locals())))
            behavior[-1]['duration'] = None
            behavior[-1]['onset'] += shift

        confounds = [c[include].fillna(method='bfill') for c in confounds]

        model = FirstLevelModel(t_r=3,
                                mask=masks[0],
                                drift_model=None, # Already done by fmriprep
                                smoothing_fwhm=5.0,
                                hrf_model='spm + derivative',
                                n_jobs=10,
                                subject_label='{}.{}'.format(ds, subject))

        model.fit(images, 
                  behavior,
                  confounds)

        models.append(model)

    mask = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')

    confounds = pd.read_pickle(op.join(derivatives, 'all_subjectwise_parameters.pkl'))
    confounds = confounds[['ddm difficulty_effect', 'ddm z_cue_regressor']]
    confounds = confounds.groupby('dataset').transform(lambda x: (x - x.mean())/ x.std())
            dm_diff = design_matrix_size + (par_offset -
                                            6) - design_matrix.shape[1]
            if dm_diff:
                for d in range(1, dm_diff + 1):
                    design_matrix['rand_%02d' % d] = np.repeat(
                        1.0, len(design_matrix))

            # Put the design matrices in a list
            design_matrices.append(design_matrix)

        # Create GLM
        fmri_glm = FirstLevelModel(t_r=tr,
                                   slice_time_ref=slice_time_ref,
                                   hrf_model=hrf_model,
                                   drift_model=drift_model,
                                   period_cut=period_cut,
                                   standardize=False,
                                   noise_model='ar1',
                                   n_jobs=-1)

        # Estimate GLM
        fmri_glm = fmri_glm.fit(imgs_smooth, design_matrices=design_matrices)
        design_matrix = fmri_glm.design_matrices_[0]

        # Specify contrasts
        contrast_list = {}
        dm_size = design_matrix.shape[1]

        contrast_av_a = np.zeros(dm_size)
        contrast_av_a[0] = 0.3333333
        contrast_av_a[1] = 0.3333333
# * noise_model='ar1' specifies the noise covariance model: a lag-1 dependence
# * standardize=False means that we do not want to rescale the time
# series to mean 0, variance 1
# * hrf_model='spm' means that we rely on the SPM "canonical hrf"
# * model (without time or dispersion derivatives)
# * drift_model='cosine' means that we model the signal drifts as slow oscillating time functions
# * period_cut=160(s) defines the cutoff frequency (its inverse actually).
from nistats.first_level_model import FirstLevelModel
from nilearn.image import high_variance_confounds
from nistats.reporting import plot_design_matrix

confounds = pd.DataFrame(high_variance_confounds(fmri_img, percentile=1))
fmri_glm = FirstLevelModel(t_r=2.5,
                           noise_model='ar1',
                           standardize=False,
                           hrf_model='spm',
                           drift_model='cosine',
                           period_cut=160,
                           smoothing_fwhm=smoothing)

fmri_glm = fmri_glm.fit(fmri_img, events, confounds=confounds)
design_matrix = fmri_glm.design_matrices_[0]
# Save the design matrix image to disk
if not os.path.exists(outdir): os.mkdir(outdir)
plot_design_matrix(design_matrix,
                   output_file=join(
                       outdir,
                       subject + '_block_%s_' % '_'.join(map(str, run_order)) +
                       '_design_matrix.png'))
plt.close()
print('Design matrix plot saved to: ' + join(
Esempio n. 37
0
def first_level(subject):
    subject_id = subject['subject_id']
    data_dir = subject['output_dir']
    subject_session_output_dir = os.path.join(data_dir, 'res_stats')
    if not os.path.exists(subject_session_output_dir):
             os.makedirs(subject_session_output_dir)    

    design_matrices=[]

    for e, i in enumerate(subject['func']) :
        
        # Parameters
        tr = subject['TR']
        drift_model = None
        hrf_model = 'spm'  # hemodynamic reponse function
        hfcut = 128.
        fwhm = [5, 5, 5]
        n_scans = nibabel.load(subject['func'][e]).shape[3]
 
        # Preparation of paradigm
        events_file = subject['onset'][e]
        paradigm = paradigm_contrasts.localizer_paradigm(events_file)
        
        # Motion parameter
        motion_path = subject['realignment_parameters'][e]
        motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
        motion = np.loadtxt(motion_path)
        
        
        # Build design matrix
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
        design_matrix = make_first_level_design_matrix(
                frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model,
                high_pass=hfcut, add_regs=motion,
                add_reg_names=motion_names)
        _, dmtx, names = check_design_matrix(design_matrix)
        design_matrices.append(design_matrix)
        #print(names)
    
    # Specify contrasts
    contrasts = paradigm_contrasts.localizer_contrasts(design_matrix)

    # GLM Analysis
    print('Fitting a GLM (this takes time)...')    
    
    #for mask_img; use the False or the mask of t1 mni template
    #the computed mask by default on fmri seems not always correct.
    # For a specific mask, try this: 
    #mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz")
    #mask = compute_epi_mask(fmri_f)
    #nibabel.save(mask , mask_path)
    #mask_images.append(compute_epi_mask(mask))
    
    fmri_glm = FirstLevelModel(mask_img=False, t_r=tr,
                               smoothing_fwhm=fwhm).fit(subject['func'], design_matrices=design_matrices)                                        
                
    # compute contrasts
    z_maps = {}
    for contrast_id, contrast_val in contrasts.items():
        print("\tcontrast id: %s" % contrast_id)

        # store stat maps to disk
        for map_type in ['z_score', 'stat', 'effect_size', 'effect_variance']:
            stat_map = fmri_glm.compute_contrast(
                contrast_val, output_type=map_type)
            map_dir = os.path.join(
                subject_session_output_dir, '%s_maps' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id)
            print("\t\tWriting %s ..." % map_path)
            stat_map.to_filename(map_path)

            # collect zmaps for contrasts we're interested in
            if map_type == 'z_score':
                z_maps[contrast_id] = map_path

    anat_img = glob.glob(os.path.join(data_dir, 'anat/wsub*T1w.nii.gz'))[0]
    stats_report_filename = os.path.join(
        subject_session_output_dir, 'report_stats.html')

    report = make_glm_report(fmri_glm,
                             contrasts,
                             threshold=3.0,
                             bg_img=anat_img,
                             cluster_threshold=15,
                             title="GLM for subject %s" % subject_id,
                             )
    report.save_as_html(stats_report_filename)
                
    return z_maps
Esempio n. 38
0
def main(derivatives, ds):

    if ds == 'ds-01':
        subjects = ['{:02d}'.format(s) for s in range(1, 20)]
    elif ds == 'ds-02':
        subjects = ['{:02d}'.format(s) for s in range(1, 16)]
        subjects.pop(3)  # Remove 4

    models = []

    for subject in subjects:
        print('subject {}'.format(subject))
        runs = ['{:02d}'.format(i) for i in range(1, 4)]
        if ds == 'ds-01':
            if subject == '06':
                runs = ['{:02d}'.format(i) for i in range(1, 3)]
        elif ds == 'ds-02':
            if subject == '07':
                runs = ['{:02d}'.format(i) for i in range(1, 3)]

        include = [
            u'dvars', u'framewise_displacement', u'a_comp_cor_00',
            u'a_comp_cor_01', u'a_comp_cor_02', u'a_comp_cor_03',
            u'a_comp_cor_04', u'a_comp_cor_05', u'cosine00', u'cosine01',
            u'cosine02', u'cosine03', u'cosine04', u'cosine05', u'cosine06',
            u'cosine07', u'cosine08', u'cosine09', u'cosine10', u'cosine11',
            u'cosine12', u'cosine13', u'cosine14', u'cosine15', u'trans_x',
            u'trans_y', u'trans_z', u'rot_x', u'rot_y', u'rot_z'
        ]

        images = []
        confounds = []
        behavior = []
        masks = []

        for run in runs:
            masks.append(
                op.join(
                    derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                    'sub-{subject}_task-randomdotmotion_run-{run}_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
                ).format(**locals()))

            images.append(
                op.join(
                    derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                    'sub-{subject}_task-randomdotmotion_run-{run}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
                ).format(**locals()))
            confounds.append(
                pd.read_table(
                    op.join(
                        derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
                        'sub-{subject}_task-randomdotmotion_run-{run}_desc-confounds_regressors.tsv'
                    ).format(**locals())))

            behavior.append(
                pd.read_table(
                    op.join(
                        derivatives, ds, 'event_files',
                        'sub-{subject}_task-randomdotmotion_run-{run}_events.tsv'
                    ).format(**locals())))
            behavior[-1]['duration'] = None

        confounds = [c[include].fillna(method='bfill') for c in confounds]

        model = FirstLevelModel(
            t_r=3,
            mask=masks[0],
            drift_model=None,  # Already done by fmriprep
            smoothing_fwhm=5.0,
            hrf_model='spm + derivative',
            n_jobs=10,
            subject_label=subject)

        model.fit(images, behavior, confounds)

        print(model.design_matrices_[0].columns)

        difficulty = model.compute_contrast('hard - easy',
                                            output_type='z_score')
        left_right_cue = model.compute_contrast('cue_left - cue_right',
                                                output_type='z_score')
        left_right_response = model.compute_contrast(
            'response_left - response_right', output_type='z_score')
        error = model.compute_contrast('error', output_type='z_score')
        cue = model.compute_contrast('cue_left + cue_right - 2 * cue_neutral')

        template = op.join(derivatives, ds, 'glm', 'individual_zmaps',
                           'sub-{subject}_desc-{contrast}_contrast.nii.gz')

        difficulty.to_filename(
            template.format(subject=subject, contrast='difficulty'))
        left_right_cue.to_filename(
            template.format(subject=subject, contrast='left_right_cue'))
        left_right_response.to_filename(
            template.format(subject=subject, contrast='left_right_response'))
        error.to_filename(template.format(subject=subject, contrast='error'))
        cue.to_filename(template.format(subject=subject, contrast='cue'))

        models.append(model)

    mask = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
    model2 = SecondLevelModel(mask)

    model2.fit(models)

    difficulty = model2.compute_contrast(first_level_contrast='hard - easy',
                                         output_type='z_score')
    left_right_cue = model2.compute_contrast(
        first_level_contrast='cue_left - cue_right', output_type='z_score')
    left_right_response = model2.compute_contrast(
        first_level_contrast='response_left - response_right',
        output_type='z_score')
    error = model2.compute_contrast(first_level_contrast='error',
                                    output_type='z_score')
    cue = model2.compute_contrast(
        first_level_contrast='cue_left + cue_right - 2 * cue_neutral',
        output_type='z_score')

    template = op.join(derivatives, ds, 'glm',
                       'sub-{subject}_desc-{contrast}_contrast.nii.gz')
    difficulty.to_filename(
        template.format(subject='group', contrast='difficulty'))
    left_right_cue.to_filename(
        template.format(subject='group', contrast='left_right_cue'))
    left_right_response.to_filename(
        template.format(subject='group', contrast='left_right_response'))
    error.to_filename(template.format(subject='group', contrast='error'))
    cue.to_filename(template.format(subject='group', contrast='cue'))
Esempio n. 39
0
# Create the design matrix
import numpy as np
import matplotlib.pyplot as plt
import nibabel
from nistats.design_matrix import make_design_matrix, plot_design_matrix
tr = 2.5
n_scans = nibabel.load(func_file).get_data().shape[-1]
frametimes = np.arange(0, n_scans * tr, tr)
design_matrix = make_design_matrix(frametimes, paradigm)
plot_design_matrix(design_matrix)
plt.tight_layout()

# Fit GLM
print('Fitting a GLM')
from nistats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(tr)
fmri_glm = fmri_glm.fit(func_file, design_matrices=design_matrix)

# Specify the contrasts
contrasts = {}
n_columns = len(design_matrix.columns)
for n, name in enumerate(design_matrix.columns[:3]):
    contrasts[name] = np.zeros((n_columns,))
    contrasts[name][n] = 1
contrasts['[motor audio] left - right'] = \
    contrasts['motor_audio_left'] - contrasts['motor_audio_right']

# Compute contrast maps
from nilearn import plotting
for contrast_id, contrast_val in contrasts.items():
    z_map = fmri_glm.compute_contrast(
Esempio n. 40
0
def main(sourcedata, derivatives, subject, session, tmp_dir):

    sourcedata_layout = BIDSLayout(sourcedata)
    sourcedata_df = sourcedata_layout.as_data_frame()
    events = sourcedata_df[(sourcedata_df['type'] == 'events')
                           & (sourcedata_df['subject'] == subject) &
                           (sourcedata_df['session'] == session)]

    derivatives_layout = BIDSLayout(os.path.join(derivatives, 'spynoza'))
    derivatives_df = derivatives_layout.as_data_frame()
    bold = derivatives_df[(derivatives_df['type'] == 'preproc')
                          & (derivatives_df['subject'] == subject) &
                          (derivatives_df['session'] == session)]

    mask = derivatives_layout.get(subject=subject,
                                  session=session,
                                  type='mask',
                                  return_type='file')[0]

    mask = image.math_img('(im > .5).astype(int)', im=mask)
    print(mask)

    row = bold.iloc[0]

    results_dir = os.path.join(derivatives, 'modelfitting_av',
                               'sub-{}'.format(row['subject']))
    os.makedirs(results_dir, exist_ok=True)

    av_bold_fn = os.path.join(
        results_dir,
        'sub-{}_ses-{}_bold_average.nii.gz'.format(row['subject'],
                                                   row['session']))
    av_bold = average_over_runs(bold.path.tolist(), output_filename=av_bold_fn)

    av_bold = image.math_img('(av_bold / av_bold.mean(-1)[..., np.newaxis])',
                             av_bold=av_bold)
    av_bold.to_filename(av_bold_fn)

    model = FirstLevelModel(t_r=4, mask=mask, drift_model=None)

    paradigm = pd.read_table(events.iloc[0]['path'])
    paradigm_short = paradigm.copy()
    paradigm_short['duration'] = 1
    paradigm_short['trial_type'] = paradigm_short['trial_type'].map(
        lambda x: '{}_instant'.format(x))
    paradigm = pd.concat((paradigm, paradigm_short))
    model.fit(av_bold, paradigm)

    left_right = model.compute_contrast('eye_L - eye_R', output_type='z_score')
    left_right.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_zmap.nii.gz'.format(
                row['subject'],
                row['session'],
            )))
    left_right = model.compute_contrast('eye_L - eye_R',
                                        output_type='effect_size')
    left_right.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_psc.nii.gz'.format(
                row['subject'], row['session'])))

    eye_l_instant = model.compute_contrast('eye_L_instant',
                                           output_type='z_score')
    eye_l_instant.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_eye_l_instant_zmap.nii.gz'.format(
                row['subject'],
                row['session'],
            )))
    eye_l_instant = model.compute_contrast('eye_L_instant',
                                           output_type='effect_size')
    eye_l_instant.to_filename(
        os.path.join(
            results_dir,
            'sub-{}_ses-{}_eye_l_instant_effect_size.nii.gz'.format(
                row['subject'],
                row['session'],
            )))
    left_right_both = model.compute_contrast(
        '(eye_L + eye_L_instant) - (eye_R + eye_R_instant)',
        output_type='z_score')
    left_right_both.to_filename(
        os.path.join(
            results_dir,
            'sub-{}_ses-{}_left_over_right_both_zmap.nii.gz'.format(
                row['subject'],
                row['session'],
            )))
    left_right_fast = model.compute_contrast('eye_L_instant - eye_R_instant',
                                             output_type='z_score')
    left_right_fast.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_left_over_right_instant.nii.gz'.format(
                row['subject'],
                row['session'],
            )))

    eye_r_instant = model.compute_contrast('eye_R_instant',
                                           output_type='z_score')
    eye_r_instant.to_filename(
        os.path.join(
            results_dir, 'sub-{}_ses-{}_eye_r_instant_zmap.nii.gz'.format(
                row['subject'],
                row['session'],
            )))
    eye_r_instant = model.compute_contrast('eye_R_instant',
                                           output_type='effect_size')
    eye_r_instant.to_filename(
        os.path.join(
            results_dir,
            'sub-{}_ses-{}_eye_R_instant_effect_size.nii.gz'.format(
                row['subject'],
                row['session'],
            )))
# It is now time to create and estimate a ``FirstLevelModel`` object, that will generate the *design matrix* using the  information provided by the ``events`` object.

from nistats.first_level_model import FirstLevelModel

###############################################################################
# Parameters of the first-level model
#
# * t_r=7(s) is the time of repetition of acquisitions
# * noise_model='ar1' specifies the noise covariance model: a lag-1 dependence
# * standardize=False means that we do not want to rescale the time series to mean 0, variance 1
# * hrf_model='spm' means that we rely on the SPM "canonical hrf" model (without time or dispersion derivatives)
# * drift_model='cosine' means that we model the signal drifts as slow oscillating time functions
# * period_cut=160(s) defines the cutoff frequency (its inverse actually).
fmri_glm = FirstLevelModel(t_r=7,
                           noise_model='ar1',
                           standardize=False,
                           hrf_model='spm',
                           drift_model='cosine',
                           period_cut=160)

###############################################################################
# Now that we have specified the model, we can run it on the fMRI image
fmri_glm = fmri_glm.fit(fmri_img, events)

###############################################################################
# One can inspect the design matrix (rows represent time, and
# columns contain the predictors).
design_matrix = fmri_glm.design_matrices_[0]

###############################################################################
# Formally, we have taken the first design matrix, because the model is
# implictily meant to for multiple runs.
Esempio n. 42
0
#########################################################################
# Construct design matrix
frame_times = np.linspace(0, (n_scans - 1) * tr, n_scans)
drift_model = 'Cosine'
period_cut = 4. * epoch_duration
hrf_model = 'glover + derivative'

#########################################################################
# Perform GLM analysis
# ------------------------------
# Fit GLM
print('\r\nFitting a GLM (this takes time) ..')
fmri_glm = FirstLevelModel(tr,
                           slice_time_ref,
                           noise_model='ar1',
                           standardize=False,
                           hrf_model=hrf_model,
                           drift_model=drift_model,
                           period_cut=period_cut)
fmri_glm = fmri_glm.fit(fmri_img, paradigm)

#########################################################################
# We could easily specify basic contrasts (Betas of the GLM model)
design_matrix = fmri_glm.design_matrices_[0]
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])

#########################################################################
# For this example instead lets specify one interesting contrast
contrasts = {'active-rest': contrasts['active'] - contrasts['rest']}