def report_flm_fiac():  # pragma: no cover
    data = datasets.func.fetch_fiac_first_level()
    fmri_img = [data['func1'], data['func2']]

    from nilearn.image import mean_img
    mean_img_ = mean_img(fmri_img[0])

    design_files = [data['design_matrix1'], data['design_matrix2']]
    design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

    fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)
    fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

    n_columns = design_matrices[0].shape[1]

    contrasts = {
        'SStSSp_minus_DStDSp': _pad_vector([1, 0, 0, -1], n_columns),
        'DStDSp_minus_SStSSp': _pad_vector([-1, 0, 0, 1], n_columns),
        'DSt_minus_SSt': _pad_vector([-1, -1, 1, 1], n_columns),
        'DSp_minus_SSp': _pad_vector([-1, 1, -1, 1], n_columns),
        'DSt_minus_SSt_for_DSp': _pad_vector([0, -1, 0, 1], n_columns),
        'DSp_minus_SSp_for_DSt': _pad_vector([0, 0, -1, 1], n_columns),
        'Deactivation': _pad_vector([-1, -1, -1, -1, 4], n_columns),
        'Effects_of_interest': np.eye(n_columns)[:5]
    }
    report = make_glm_report(
        fmri_glm,
        contrasts,
        bg_img=mean_img_,
        height_control='fdr',
    )
    output_filename = 'generated_report_flm_fiac.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
Пример #2
0
def test_first_level_model_design_creation():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3)
        model = model.fit(func_img, events)
        frame1, X1, names1 = check_design_matrix(model.design_matrices_[0])
        # check design computation is identical
        n_scans = get_data(func_img).shape[3]
        start_time = slice_time_ref * t_r
        end_time = (n_scans - 1 + slice_time_ref) * t_r
        frame_times = np.linspace(start_time, end_time, n_scans)
        design = make_first_level_design_matrix(frame_times,
                                                events,
                                                drift_model='polynomial',
                                                drift_order=3)
        frame2, X2, names2 = check_design_matrix(design)
        assert_array_equal(frame1, frame2)
        assert_array_equal(X1, X2)
        assert_array_equal(names1, names2)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del FUNCFILE, mask, model, func_img
Пример #3
0
def test_first_level_models_with_no_signal_scaling():
    """
    test to ensure that the FirstLevelModel works correctly with a
    signal_scaling==False. In particular, that derived theta are correct for a
    constant design matrix with a single valued fmri image
    """
    shapes, rk = [(3, 1, 1, 2)], 1
    fmri_data = list()
    design_matrices = list()
    design_matrices.append(
        pd.DataFrame(np.ones((shapes[0][-1], rk)),
                     columns=list('abcdefghijklmnopqrstuvwxyz')[:rk]))
    first_level_model = FirstLevelModel(mask_img=False,
                                        noise_model='ols',
                                        signal_scaling=False)
    fmri_data.append(Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, np.eye(4)))

    first_level_model.fit(fmri_data, design_matrices=design_matrices)
    # trivial test of signal_scaling value
    assert first_level_model.signal_scaling is False
    # assert that our design matrix has one constant
    assert first_level_model.design_matrices_[0].equals(
        pd.DataFrame([1.0, 1.0], columns=['a']))
    # assert that we only have one theta as there is only on voxel in our image
    assert first_level_model.results_[0][0].theta.shape == (1, 1)
    # assert that the theta is equal to the one voxel value
    assert_almost_equal(first_level_model.results_[0][0].theta[0, 0], 6.0, 2)
Пример #4
0
def test_first_level_model_predictions_r_square():
    shapes, rk = [(10, 10, 10, 25)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
        shapes, rk)

    for i in range(len(design_matrices)):
        design_matrices[i].iloc[:, 0] = 1

    model = FirstLevelModel(mask_img=mask,
                            signal_scaling=False,
                            minimize_memory=False,
                            noise_model='ols')
    model.fit(fmri_data, design_matrices=design_matrices)

    pred = model.predicted[0]
    data = fmri_data[0]
    r_square_3d = model.r_square[0]

    y_predicted = model.masker_.transform(pred)
    y_measured = model.masker_.transform(data)

    assert_almost_equal(np.mean(y_predicted - y_measured), 0)

    r_square_2d = model.masker_.transform(r_square_3d)
    assert_array_less(0., r_square_2d)
Пример #5
0
def test_explicit_fixed_effects():
    """ tests the fixed effects performed manually/explicitly"""
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices = write_fake_fmri_data_and_design(
            shapes, rk)
        contrast = np.eye(rk)[1]
        # session 1
        multi_session_model = FirstLevelModel(mask_img=mask).fit(
            fmri_data[0], design_matrices=design_matrices[:1])
        dic1 = multi_session_model.compute_contrast(contrast,
                                                    output_type='all')

        # session 2
        multi_session_model.fit(fmri_data[1],
                                design_matrices=design_matrices[1:])
        dic2 = multi_session_model.compute_contrast(contrast,
                                                    output_type='all')

        # fixed effects model
        multi_session_model.fit(fmri_data, design_matrices=design_matrices)
        fixed_fx_dic = multi_session_model.compute_contrast(contrast,
                                                            output_type='all')

        # manual version
        contrasts = [dic1['effect_size'], dic2['effect_size']]
        variance = [dic1['effect_variance'], dic2['effect_variance']]
        (
            fixed_fx_contrast,
            fixed_fx_variance,
            fixed_fx_stat,
        ) = compute_fixed_effects(contrasts, variance, mask)

        assert_almost_equal(fixed_fx_contrast.get_data(),
                            fixed_fx_dic['effect_size'].get_data())
        assert_almost_equal(fixed_fx_variance.get_data(),
                            fixed_fx_dic['effect_variance'].get_data())
        assert_almost_equal(fixed_fx_stat.get_data(),
                            fixed_fx_dic['stat'].get_data())

        # test without mask variable
        (
            fixed_fx_contrast,
            fixed_fx_variance,
            fixed_fx_stat,
        ) = compute_fixed_effects(contrasts, variance)
        assert_almost_equal(fixed_fx_contrast.get_data(),
                            fixed_fx_dic['effect_size'].get_data())
        assert_almost_equal(fixed_fx_variance.get_data(),
                            fixed_fx_dic['effect_variance'].get_data())
        assert_almost_equal(fixed_fx_stat.get_data(),
                            fixed_fx_dic['stat'].get_data())

        # ensure that using unbalanced effects size and variance images
        # raises an error
        with pytest.raises(ValueError):
            compute_fixed_effects(contrasts * 2, variance, mask)
        del mask, multi_session_model
Пример #6
0
def test_high_level_glm_with_paths():
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3
    with InTemporaryDirectory():
        mask_file, fmri_files, design_files = write_fake_fmri_data_and_design(
            shapes, rk)
        multi_session_model = FirstLevelModel(mask_img=None).fit(
            fmri_files, design_matrices=design_files)
        z_image = multi_session_model.compute_contrast(np.eye(rk)[1])
        assert_array_equal(z_image.affine, load(mask_file).affine)
        assert get_data(z_image).std() < 3.
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del z_image, fmri_files, multi_session_model
Пример #7
0
def test_high_level_glm_one_session():
    shapes, rk = [(7, 8, 9, 15)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
        shapes, rk)

    single_session_model = FirstLevelModel(mask_img=None).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    assert isinstance(single_session_model.masker_.mask_img_, Nifti1Image)

    single_session_model = FirstLevelModel(mask_img=mask).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    z1 = single_session_model.compute_contrast(np.eye(rk)[:1])
    assert isinstance(z1, Nifti1Image)
Пример #8
0
def test_first_level_model_residuals():
    shapes, rk = [(10, 10, 10, 100)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
        shapes, rk)

    for i in range(len(design_matrices)):
        design_matrices[i].iloc[:, 0] = 1

    model = FirstLevelModel(mask_img=mask,
                            minimize_memory=False,
                            noise_model='ols')
    model.fit(fmri_data, design_matrices=design_matrices)

    residuals = model.residuals[0]
    mean_residuals = model.masker_.transform(residuals).mean(0)
    assert_array_almost_equal(mean_residuals, 0)
Пример #9
0
def test_flm_reporting():
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices = write_fake_fmri_data_and_design(shapes, rk)
        flm = FirstLevelModel(mask_img=mask).fit(
            fmri_data, design_matrices=design_matrices)
        contrast = np.eye(3)[1]
        report_flm = glmr.make_glm_report(flm, contrast, plot_type='glass',
                                          height_control=None,
                                          min_distance=15,
                                          alpha=0.001, threshold=2.78,
                                          )
        '''
        catches & raises UnicodeEncodeError in HTMLDocument.get_iframe()
        Python2's limited unicode support causes  HTMLDocument.get_iframe() to
        mishandle certain unicode characters, like the greek alpha symbol
        and raises this error.
        Calling HTMLDocument.get_iframe() here causes the tests
        to fail on Python2, alerting us if such a situation arises
        due to future modifications.
        '''
        report_iframe = report_flm.get_iframe()
        # So flake8 doesn't complain about not using variable (F841)
        report_iframe
        del mask, flm, fmri_data
def report_flm_adhd_dmn():  # pragma: no cover
    t_r = 2.
    slice_time_ref = 0.
    n_scans = 176
    pcc_coords = (0, -53, 26)
    adhd_dataset = nilearn.datasets.fetch_adhd(n_subjects=1)
    seed_masker = NiftiSpheresMasker([pcc_coords],
                                     radius=10,
                                     detrend=True,
                                     standardize=True,
                                     low_pass=0.1,
                                     high_pass=0.01,
                                     t_r=2.,
                                     memory='nilearn_cache',
                                     memory_level=1,
                                     verbose=0)
    seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
    frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
    design_matrix = make_first_level_design_matrix(frametimes,
                                                   hrf_model='spm',
                                                   add_regs=seed_time_series,
                                                   add_reg_names=["pcc_seed"])
    dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
    contrasts = {'seed_based_glm': dmn_contrast}

    first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
    first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                              design_matrices=design_matrix)

    report = make_glm_report(
        first_level_model,
        contrasts=contrasts,
        title='ADHD DMN Report',
        cluster_threshold=15,
        height_control='bonferroni',
        min_distance=8.,
        plot_type='glass',
        report_dims=(1200, 'a'),
    )
    output_filename = 'generated_report_flm_adhd_dmn.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
Пример #11
0
def test_make_headings_with_contrasts_none_title_custom():
    model = FirstLevelModel()
    test_input = (None,
                  'Custom Title for report',
                  model,
                  )
    expected_output = ('Custom Title for report',
                       'Custom Title for report',
                       'First Level Model',
                       )
    actual_output = glmr._make_headings(*test_input)
    assert actual_output == expected_output
Пример #12
0
def test_first_level_model_glm_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3,
                                minimize_memory=False)
        model = model.fit(func_img, events)

        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del mask, FUNCFILE, func_img, model
Пример #13
0
def test_high_level_glm_different_design_matrices():
    # test that one can estimate a contrast when design matrices are different
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
        shapes, rk)

    # add a column to the second design matrix
    design_matrices[1]['new'] = np.ones((19, 1))

    # Fit a glm with two sessions and design matrices
    multi_session_model = FirstLevelModel(mask_img=mask).fit(
        fmri_data, design_matrices=design_matrices)
    z_joint = multi_session_model.compute_contrast(
        [np.eye(rk)[:1], np.eye(rk + 1)[:1]], output_type='effect_size')
    assert z_joint.shape == (7, 8, 7)

    # compare the estimated effects to seprarately-fitted models
    model1 = FirstLevelModel(mask_img=mask).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    z1 = model1.compute_contrast(np.eye(rk)[:1], output_type='effect_size')
    model2 = FirstLevelModel(mask_img=mask).fit(
        fmri_data[1], design_matrices=design_matrices[1])
    z2 = model2.compute_contrast(np.eye(rk + 1)[:1], output_type='effect_size')
    assert_almost_equal(z1.get_data() + z2.get_data(), 2 * z_joint.get_data())
Пример #14
0
def test_high_level_glm_null_contrasts():
    # test that contrast computation is resilient to 0 values.
    shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
        shapes, rk)

    multi_session_model = FirstLevelModel(mask_img=None).fit(
        fmri_data, design_matrices=design_matrices)
    single_session_model = FirstLevelModel(mask_img=None).fit(
        fmri_data[0], design_matrices=design_matrices[0])
    z1 = multi_session_model.compute_contrast(
        [np.eye(rk)[:1], np.zeros((1, rk))], output_type='stat')
    z2 = single_session_model.compute_contrast(np.eye(rk)[:1],
                                               output_type='stat')
    np.testing.assert_almost_equal(get_data(z1), get_data(z2))
Пример #15
0
mean_img_ = mean_img(fmri_img[0])

#########################################################################
# The design matrices were pre-computed, we simply put them in a list of DataFrames.
design_files = [data['design_matrix1'], data['design_matrix2']]
import pandas as pd
import numpy as np
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

#########################################################################
# GLM estimation
# ----------------------------------
# GLM specification. Note that the mask was provided in the dataset. So we use it.

from nilearn.stats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)

#########################################################################
# Let's fit the GLM.
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

#########################################################################
# Compute fixed effects of the two runs and compute related images.
# For this, we first define the contrasts as we would do for a single session.
n_columns = design_matrices[0].shape[1]


def pad_vector(contrast_, n_columns):
    """A small routine to append zeros in contrast vectors"""
    return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))
Пример #16
0
                                 memory_level=1,
                                 verbose=0)
seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
design_matrix = make_first_level_design_matrix(frametimes,
                                               hrf_model='spm',
                                               add_regs=seed_time_series,
                                               add_reg_names=["pcc_seed"])
dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
contrasts = {'seed_based_glm': dmn_contrast}

#########################################################################
# Perform first level analysis
# ----------------------------
# Setup and fit GLM.
first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                          design_matrices=design_matrix)

#########################################################################
# Estimate the contrast.
print('Contrast seed_based_glm computed.')
z_map = first_level_model.compute_contrast(contrasts['seed_based_glm'],
                                           output_type='z_score')

# Saving snapshots of the contrasts
filename = 'dmn_z_map.png'
display = plotting.plot_stat_map(z_map,
                                 threshold=3.0,
                                 title='Seed based GLM',
                                 cut_coords=pcc_coords)
# load events
events = pd.read_table(subject_data['events'])

#########################################################################
# Fit model
# ---------
# Note that `minimize_memory` is set to `False` so that `FirstLevelModel`
# stores the residuals.
# `signal_scaling` is set to False, so we keep the same scaling as the
# original data in `fmri_img`.
from nilearn.stats.first_level_model import FirstLevelModel

fmri_glm = FirstLevelModel(t_r=7,
                           drift_model='cosine',
                           signal_scaling=False,
                           mask_img=mask,
                           minimize_memory=False)

fmri_glm = fmri_glm.fit(fmri_img, events)

#########################################################################
# Calculate and plot contrast
# ---------------------------
from nilearn import plotting

z_map = fmri_glm.compute_contrast('active - rest')

plotting.plot_stat_map(z_map, bg_img=mean_img, threshold=3.1)

#########################################################################
Пример #18
0
def test_first_level_model_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3,
                                minimize_memory=False)
        c1, c2, cnull = np.eye(7)[0], np.eye(7)[1], np.zeros(7)
        # asking for contrast before model fit gives error
        with pytest.raises(ValueError):
            model.compute_contrast(c1)
        # fit model
        model = model.fit([func_img, func_img], [events, events])
        # smoke test for different contrasts in fixed effects
        model.compute_contrast([c1, c2])
        # smoke test for same contrast in fixed effects
        model.compute_contrast([c2, c2])
        # smoke test for contrast that will be repeated
        model.compute_contrast(c2)
        model.compute_contrast(c2, 'F')
        model.compute_contrast(c2, 't', 'z_score')
        model.compute_contrast(c2, 't', 'stat')
        model.compute_contrast(c2, 't', 'p_value')
        model.compute_contrast(c2, None, 'effect_size')
        model.compute_contrast(c2, None, 'effect_variance')
        # formula should work (passing varible name directly)
        model.compute_contrast('c0')
        model.compute_contrast('c1')
        model.compute_contrast('c2')
        # smoke test for one null contrast in group
        model.compute_contrast([c2, cnull])
        # only passing null contrasts should give back a value error
        with pytest.raises(ValueError):
            model.compute_contrast(cnull)
        with pytest.raises(ValueError):
            model.compute_contrast([cnull, cnull])
        # passing wrong parameters
        with pytest.raises(ValueError):
            model.compute_contrast([])
        with pytest.raises(ValueError):
            model.compute_contrast([c1, []])
        with pytest.raises(ValueError):
            model.compute_contrast(c1, '', '')
        with pytest.raises(ValueError):
            model.compute_contrast(c1, '', [])
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model
Пример #19
0
#########################################################################
# The design matrices were pre-computed, we simply put them in a list of DataFrames
design_files = [data['design_matrix1'], data['design_matrix2']]
import pandas as pd
import numpy as np
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

#########################################################################
# GLM estimation
# ----------------------------------
# GLM specification. Note that the mask was provided in the dataset. So we use it.

from nilearn.stats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(mask_img=data['mask'],
                           smoothing_fwhm=5,
                           minimize_memory=True)

#########################################################################
# Compute fixed effects of the two runs and compute related images
# For this, we first define the contrasts as we would do for a single session
n_columns = design_matrices[0].shape[1]
contrast_val = np.hstack(([-1, -1, 1, 1], np.zeros(n_columns - 4)))

#########################################################################
# Statistics for the first session
from nilearn import plotting
cut_coords = [-129, -126, 49]
contrast_id = 'DSt_minus_SSt'

fmri_glm = fmri_glm.fit(fmri_img[0], design_matrices=design_matrices[0])
Пример #20
0
def test_fmri_inputs_for_non_parametric_inference():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        # prepare fake data
        p, q = 80, 10
        X = np.random.randn(p, q)
        shapes = ((7, 8, 9, 10),)
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        des = pd.DataFrame(np.ones((T, 1)), columns=['a'])
        des_fname = 'design.csv'
        des.to_csv(des_fname)

        # prepare correct input first level models
        flm = FirstLevelModel(subject_label='01').fit(FUNCFILE,
                                                      design_matrices=des)
        # prepare correct input dataframe and lists
        shapes = ((7, 8, 9, 1),)
        _, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]

        dfcols = ['subject_label', 'map_name', 'effects_map_path']
        dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE],
                  ['03', 'a', FUNCFILE]]
        niidf = pd.DataFrame(dfrows, columns=dfcols)
        niimgs = [FUNCFILE, FUNCFILE, FUNCFILE]
        niimg_4d = concat_imgs(niimgs)
        confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]],
                                 columns=['subject_label', 'conf1'])
        sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c'])

        # test missing second-level contrast
        # niimgs as input
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs, None, sdes)
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs, confounds, sdes)
        # 4d niimg as input
        with pytest.raises(ValueError):
            non_parametric_inference(niimg_4d, None, sdes)

        # test wrong input errors
        # test first level model
        with pytest.raises(ValueError):
            non_parametric_inference(flm)
        # test list of less than two niimgs
        with pytest.raises(ValueError):
            non_parametric_inference([FUNCFILE])
        # test dataframe
        with pytest.raises(ValueError):
            non_parametric_inference(niidf)
        # test niimgs requirements
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs)
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs + [[]], confounds)
        with pytest.raises(ValueError):
            non_parametric_inference([FUNCFILE])
        # test other objects
        with pytest.raises(ValueError):
            non_parametric_inference('random string object')
        del X, FUNCFILE, func_img
Пример #21
0
# spanning the two conditions.

contrasts = {
    'faces-scrambled':
    basic_contrasts['faces'] - basic_contrasts['scrambled'],
    'scrambled-faces':
    -basic_contrasts['faces'] + basic_contrasts['scrambled'],
    'effects_of_interest':
    np.vstack((basic_contrasts['faces'], basic_contrasts['scrambled']))
}

#########################################################################
# Fit the GLM for the 2 sessions by speficying a FirstLevelModel and then fitting it.
from nilearn.stats.first_level_model import FirstLevelModel
print('Fitting a GLM')
fmri_glm = FirstLevelModel()
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

#########################################################################
# Now we can compute contrast-related statistical maps (in z-scale), and plot them.
print('Computing contrasts')
from nilearn import plotting

# Iterate on contrasts
for contrast_id, contrast_val in contrasts.items():
    print("\tcontrast id: %s" % contrast_id)
    # compute the contrasts
    z_map = fmri_glm.compute_contrast(contrast_val, output_type='z_score')
    # plot the contrasts as soon as they're generated
    # the display is overlayed on the mean fMRI image
    # a threshold of 3.0 is used, more sophisticated choices are possible
Пример #22
0
def test_high_level_glm_with_data():
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices = write_fake_fmri_data_and_design(
            shapes, rk)
        multi_session_model = FirstLevelModel(mask_img=None).fit(
            fmri_data, design_matrices=design_matrices)
        n_voxels = get_data(multi_session_model.masker_.mask_img_).sum()
        z_image = multi_session_model.compute_contrast(np.eye(rk)[1])
        assert np.sum(get_data(z_image) != 0) == n_voxels
        assert get_data(z_image).std() < 3.
        # with mask
        multi_session_model = FirstLevelModel(mask_img=mask).fit(
            fmri_data, design_matrices=design_matrices)
        z_image = multi_session_model.compute_contrast(np.eye(rk)[:2],
                                                       output_type='z_score')
        p_value = multi_session_model.compute_contrast(np.eye(rk)[:2],
                                                       output_type='p_value')
        stat_image = multi_session_model.compute_contrast(np.eye(rk)[:2],
                                                          output_type='stat')
        effect_image = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='effect_size')
        variance_image = multi_session_model.compute_contrast(
            np.eye(rk)[:2], output_type='effect_variance')
        assert_array_equal(get_data(z_image) == 0., get_data(load(mask)) == 0.)
        assert (get_data(variance_image)[get_data(load(mask)) > 0] >
                .001).all()
        all_images = multi_session_model.compute_contrast(np.eye(rk)[:2],
                                                          output_type='all')
        assert_array_equal(get_data(all_images['z_score']), get_data(z_image))
        assert_array_equal(get_data(all_images['p_value']), get_data(p_value))
        assert_array_equal(get_data(all_images['stat']), get_data(stat_image))
        assert_array_equal(get_data(all_images['effect_size']),
                           get_data(effect_image))
        assert_array_equal(get_data(all_images['effect_variance']),
                           get_data(variance_image))
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del (all_images, design_matrices, effect_image, fmri_data, mask,
             multi_session_model, n_voxels, p_value, rk, shapes, stat_image,
             variance_image, z_image)
Пример #23
0
#########################################################################
# Next solution is to try Finite Impulse Reponse (FIR) models: we just
# say that the hrf is an arbitrary function that lags behind the
# stimulus onset.  In the present case, given that the numbers of
# conditions is high, we should use a simple FIR model.
#
# Concretely, we set `hrf_model` to 'fir' and `fir_delays` to [1, 2,
# 3] (scans) corresponding to a 3-step functions on the [1 * t_r, 4 *
# t_r] seconds interval.
#

from nilearn.stats.first_level_model import FirstLevelModel
from nilearn.reporting import plot_design_matrix, plot_contrast_matrix

first_level_model = FirstLevelModel(t_r, hrf_model='fir', fir_delays=[1, 2, 3])
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]
plot_design_matrix(design_matrix)

#########################################################################
# We have to adapt contrast specification. We characterize the BOLD
# response by the sum across the three time lags. It's a bit hairy,
# sorry, but this is the price to pay for flexibility...

import numpy as np

contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])
conditions = events.trial_type.unique()
Пример #24
0
def test_fmri_inputs():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        # prepare fake data
        p, q = 80, 10
        X = np.random.randn(p, q)
        shapes = ((7, 8, 9, 10),)
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        des = pd.DataFrame(np.ones((T, 1)), columns=['a'])
        des_fname = 'design.csv'
        des.to_csv(des_fname)

        # prepare correct input first level models
        flm = FirstLevelModel(subject_label='01').fit(FUNCFILE,
                                                      design_matrices=des)
        flms = [flm, flm, flm]
        # prepare correct input dataframe and lists
        shapes = ((7, 8, 9, 1),)
        _, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]

        dfcols = ['subject_label', 'map_name', 'effects_map_path']
        dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE],
                  ['03', 'a', FUNCFILE]]
        niidf = pd.DataFrame(dfrows, columns=dfcols)
        niimgs = [FUNCFILE, FUNCFILE, FUNCFILE]
        niimg_4d = concat_imgs(niimgs)
        confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]],
                                 columns=['subject_label', 'conf1'])
        sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c'])

        # smoke tests with correct input
        # First level models as input
        SecondLevelModel(mask_img=mask).fit(flms)
        SecondLevelModel().fit(flms)
        # Note : the following one creates a singular design matrix
        SecondLevelModel().fit(flms, confounds)
        SecondLevelModel().fit(flms, None, sdes)
        # dataframes as input
        SecondLevelModel().fit(niidf)
        SecondLevelModel().fit(niidf, confounds)
        SecondLevelModel().fit(niidf, confounds, sdes)
        SecondLevelModel().fit(niidf, None, sdes)
        # niimgs as input
        SecondLevelModel().fit(niimgs, None, sdes)
        # 4d niimg as input
        SecondLevelModel().fit(niimg_4d, None, sdes)

        # test wrong input errors
        # test first level model requirements
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flm)
        with pytest.raises(ValueError):
            SecondLevelModel().fit([flm])
        # test dataframe requirements
        with pytest.raises(ValueError):
            SecondLevelModel().fit(niidf['subject_label'])
        # test niimgs requirements
        with pytest.raises(ValueError):
            SecondLevelModel().fit(niimgs)
        with pytest.raises(ValueError):
            SecondLevelModel().fit(niimgs + [[]],
                                   confounds)
        # test first_level_conditions, confounds, and design
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flms, ['', []])
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flms, [])
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flms, confounds['conf1'])
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flms, None, [])
##############################################################################
# Instantiate and run FirstLevelModel
# -----------------------------------
from nilearn.image import index_img
from nilearn.stats.first_level_model import FirstLevelModel

# we are going to generate a list of z-maps together with their session and condition index
z_maps = []
condition_idx = []
session_idx = []

# Instantiate the glm
glm = FirstLevelModel(t_r=TR,
                      mask_img=haxby_dataset.mask,
                      high_pass=.008,
                      smoothing_fwhm=4,
                      memory='nilearn_cache')

##############################################################################
# Run the glm on data from each session
# -------------------------------------
for session in unique_sessions:
    # grab the fmri data for that particular session
    fmri_session = index_img(func_filename, sessions == session)

    # fit the glm
    glm.fit(fmri_session, events=events[session])

    # set up contrasts: one per condition
    conditions = events[session].trial_type.unique()
Пример #26
0
def test_fmri_inputs():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        conf = pd.DataFrame([0, 0])
        des = pd.DataFrame(np.ones((T, 1)), columns=[''])
        des_fname = 'design.csv'
        des.to_csv(des_fname)
        for fi in func_img, FUNCFILE:
            for d in des, des_fname:
                FirstLevelModel().fit(fi, design_matrices=d)
                FirstLevelModel(mask_img=None).fit([fi], design_matrices=d)
                FirstLevelModel(mask_img=mask).fit(fi, design_matrices=[d])
                FirstLevelModel(mask_img=mask).fit([fi], design_matrices=[d])
                FirstLevelModel(mask_img=mask).fit([fi, fi],
                                                   design_matrices=[d, d])
                FirstLevelModel(mask_img=None).fit((fi, fi),
                                                   design_matrices=(d, d))
                with pytest.raises(ValueError):
                    FirstLevelModel(mask_img=None).fit([fi, fi], d)
                with pytest.raises(ValueError):
                    FirstLevelModel(mask_img=None).fit(fi, [d, d])
                # At least paradigms or design have to be given
                with pytest.raises(ValueError):
                    FirstLevelModel(mask_img=None).fit(fi)
                # If paradigms are given then both tr and slice time ref were
                # required
                with pytest.raises(ValueError):
                    FirstLevelModel(mask_img=None).fit(fi, d)
                with pytest.raises(ValueError):
                    FirstLevelModel(mask_img=None, t_r=1.0).fit(fi, d)
                with pytest.raises(ValueError):
                    FirstLevelModel(mask_img=None,
                                    slice_time_ref=0.).fit(fi, d)
            # confounds rows do not match n_scans
            with pytest.raises(ValueError):
                FirstLevelModel(mask_img=None).fit(fi, d, conf)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del fi, func_img, mask, d, des, FUNCFILE, _
# It is now time to create and estimate a ``FirstLevelModel`` object, that will generate the *design matrix* using the  information provided by the ``events`` object.

from nilearn.stats.first_level_model import FirstLevelModel

###############################################################################
# Parameters of the first-level model
#
# * t_r=7(s) is the time of repetition of acquisitions
# * noise_model='ar1' specifies the noise covariance model: a lag-1 dependence
# * standardize=False means that we do not want to rescale the time series to mean 0, variance 1
# * hrf_model='spm' means that we rely on the SPM "canonical hrf" model (without time or dispersion derivatives)
# * drift_model='cosine' means that we model the signal drifts as slow oscillating time functions
# * high_pass=0.01(Hz) defines the cutoff frequency (inverse of the time period).
fmri_glm = FirstLevelModel(t_r=7,
                           noise_model='ar1',
                           standardize=False,
                           hrf_model='spm',
                           drift_model='cosine',
                           high_pass=.01)

###############################################################################
# Now that we have specified the model, we can run it on the fMRI image
fmri_glm = fmri_glm.fit(fmri_img, events)

###############################################################################
# One can inspect the design matrix (rows represent time, and
# columns contain the predictors).
design_matrix = fmri_glm.design_matrices_[0]

###############################################################################
# Formally, we have taken the first design matrix, because the model is
# implictily meant to for multiple runs.
#

t_r = 2.4
events_file = data['events']
import pandas as pd
events = pd.read_table(events_file)

###############################################################################
# Running a basic model
# ---------------------
#
# First we specify a linear model.
# The .fit() functionality of FirstLevelModel function creates the design matrix and the beta maps.
#
from nilearn.stats.first_level_model import FirstLevelModel
first_level_model = FirstLevelModel(t_r)
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]

#########################################################################
# Let us take a look at the design matrix: it has 10 main columns corresponding to 10 experimental conditions, followed by 3 columns describing low-frequency signals (drifts) and a constant regressor.
from nilearn.reporting import plot_design_matrix
plot_design_matrix(design_matrix)
import matplotlib.pyplot as plt
plt.show()

#########################################################################
# Specification of the contrasts.
#
# For this, let's create a function that, given the design matrix,
# generates the corresponding contrasts.  This will be useful to