def test_first_level_model_predictions_r_square():
    shapes, rk = [(10, 10, 10, 25)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
        shapes, rk)

    for i in range(len(design_matrices)):
        design_matrices[i].iloc[:, 0] = 1

    model = FirstLevelModel(mask_img=mask,
                            signal_scaling=False,
                            minimize_memory=False,
                            noise_model='ols')
    model.fit(fmri_data, design_matrices=design_matrices)

    pred = model.predicted[0]
    data = fmri_data[0]
    r_square_3d = model.r_square[0]

    y_predicted = model.masker_.transform(pred)
    y_measured = model.masker_.transform(data)

    assert_almost_equal(np.mean(y_predicted - y_measured), 0)

    r_square_2d = model.masker_.transform(r_square_3d)
    assert_array_less(0., r_square_2d)
def test_first_level_models_with_no_signal_scaling():
    """
    test to ensure that the FirstLevelModel works correctly with a
    signal_scaling==False. In particular, that derived theta are correct for a
    constant design matrix with a single valued fmri image
    """
    shapes, rk = [(3, 1, 1, 2)], 1
    fmri_data = list()
    design_matrices = list()
    design_matrices.append(
        pd.DataFrame(np.ones((shapes[0][-1], rk)),
                     columns=list('abcdefghijklmnopqrstuvwxyz')[:rk]))
    first_level_model = FirstLevelModel(mask_img=False,
                                        noise_model='ols',
                                        signal_scaling=False)
    fmri_data.append(Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, np.eye(4)))

    first_level_model.fit(fmri_data, design_matrices=design_matrices)
    # trivial test of signal_scaling value
    assert first_level_model.signal_scaling is False
    # assert that our design matrix has one constant
    assert first_level_model.design_matrices_[0].equals(
        pd.DataFrame([1.0, 1.0], columns=['a']))
    # assert that we only have one theta as there is only on voxel in our image
    assert first_level_model.results_[0][0].theta.shape == (1, 1)
    # assert that the theta is equal to the one voxel value
    assert_almost_equal(first_level_model.results_[0][0].theta[0, 0], 6.0, 2)
def test_explicit_fixed_effects():
    """ tests the fixed effects performed manually/explicitly"""
    with InTemporaryDirectory():
        shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
        mask, fmri_data, design_matrices = write_fake_fmri_data_and_design(
            shapes, rk)
        contrast = np.eye(rk)[1]
        # session 1
        multi_session_model = FirstLevelModel(mask_img=mask).fit(
            fmri_data[0], design_matrices=design_matrices[:1])
        dic1 = multi_session_model.compute_contrast(contrast,
                                                    output_type='all')

        # session 2
        multi_session_model.fit(fmri_data[1],
                                design_matrices=design_matrices[1:])
        dic2 = multi_session_model.compute_contrast(contrast,
                                                    output_type='all')

        # fixed effects model
        multi_session_model.fit(fmri_data, design_matrices=design_matrices)
        fixed_fx_dic = multi_session_model.compute_contrast(contrast,
                                                            output_type='all')

        # manual version
        contrasts = [dic1['effect_size'], dic2['effect_size']]
        variance = [dic1['effect_variance'], dic2['effect_variance']]
        (
            fixed_fx_contrast,
            fixed_fx_variance,
            fixed_fx_stat,
        ) = compute_fixed_effects(contrasts, variance, mask)

        assert_almost_equal(fixed_fx_contrast.get_data(),
                            fixed_fx_dic['effect_size'].get_data())
        assert_almost_equal(fixed_fx_variance.get_data(),
                            fixed_fx_dic['effect_variance'].get_data())
        assert_almost_equal(fixed_fx_stat.get_data(),
                            fixed_fx_dic['stat'].get_data())

        # test without mask variable
        (
            fixed_fx_contrast,
            fixed_fx_variance,
            fixed_fx_stat,
        ) = compute_fixed_effects(contrasts, variance)
        assert_almost_equal(fixed_fx_contrast.get_data(),
                            fixed_fx_dic['effect_size'].get_data())
        assert_almost_equal(fixed_fx_variance.get_data(),
                            fixed_fx_dic['effect_variance'].get_data())
        assert_almost_equal(fixed_fx_stat.get_data(),
                            fixed_fx_dic['stat'].get_data())

        # ensure that using unbalanced effects size and variance images
        # raises an error
        with pytest.raises(ValueError):
            compute_fixed_effects(contrasts * 2, variance, mask)
        del mask, multi_session_model
def test_first_level_model_residuals():
    shapes, rk = [(10, 10, 10, 100)], 3
    mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
        shapes, rk)

    for i in range(len(design_matrices)):
        design_matrices[i].iloc[:, 0] = 1

    model = FirstLevelModel(mask_img=mask,
                            minimize_memory=False,
                            noise_model='ols')
    model.fit(fmri_data, design_matrices=design_matrices)

    residuals = model.residuals[0]
    mean_residuals = model.masker_.transform(residuals).mean(0)
    assert_array_almost_equal(mean_residuals, 0)
def test_first_level_model_design_creation():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3)
        model = model.fit(func_img, events)
        frame1, X1, names1 = check_design_matrix(model.design_matrices_[0])
        # check design computation is identical
        n_scans = get_data(func_img).shape[3]
        start_time = slice_time_ref * t_r
        end_time = (n_scans - 1 + slice_time_ref) * t_r
        frame_times = np.linspace(start_time, end_time, n_scans)
        design = make_first_level_design_matrix(frame_times,
                                                events,
                                                drift_model='polynomial',
                                                drift_order=3)
        frame2, X2, names2 = check_design_matrix(design)
        assert_array_equal(frame1, frame2)
        assert_array_equal(X1, X2)
        assert_array_equal(names1, names2)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del FUNCFILE, mask, model, func_img
def report_flm_fiac():  # pragma: no cover
    data = datasets.func.fetch_fiac_first_level()
    fmri_img = [data['func1'], data['func2']]

    from nilearn.image import mean_img
    mean_img_ = mean_img(fmri_img[0])

    design_files = [data['design_matrix1'], data['design_matrix2']]
    design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

    fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)
    fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

    n_columns = design_matrices[0].shape[1]

    contrasts = {
        'SStSSp_minus_DStDSp': _pad_vector([1, 0, 0, -1], n_columns),
        'DStDSp_minus_SStSSp': _pad_vector([-1, 0, 0, 1], n_columns),
        'DSt_minus_SSt': _pad_vector([-1, -1, 1, 1], n_columns),
        'DSp_minus_SSp': _pad_vector([-1, 1, -1, 1], n_columns),
        'DSt_minus_SSt_for_DSp': _pad_vector([0, -1, 0, 1], n_columns),
        'DSp_minus_SSp_for_DSt': _pad_vector([0, 0, -1, 1], n_columns),
        'Deactivation': _pad_vector([-1, -1, -1, -1, 4], n_columns),
        'Effects_of_interest': np.eye(n_columns)[:5]
    }
    report = make_glm_report(
        fmri_glm,
        contrasts,
        bg_img=mean_img_,
        height_control='fdr',
    )
    output_filename = 'generated_report_flm_fiac.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
def test_first_level_model_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # basic test based on basic_paradigm and glover hrf
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3,
                                minimize_memory=False)
        c1, c2, cnull = np.eye(7)[0], np.eye(7)[1], np.zeros(7)
        # asking for contrast before model fit gives error
        with pytest.raises(ValueError):
            model.compute_contrast(c1)
        # fit model
        model = model.fit([func_img, func_img], [events, events])
        # smoke test for different contrasts in fixed effects
        model.compute_contrast([c1, c2])
        # smoke test for same contrast in fixed effects
        model.compute_contrast([c2, c2])
        # smoke test for contrast that will be repeated
        model.compute_contrast(c2)
        model.compute_contrast(c2, 'F')
        model.compute_contrast(c2, 't', 'z_score')
        model.compute_contrast(c2, 't', 'stat')
        model.compute_contrast(c2, 't', 'p_value')
        model.compute_contrast(c2, None, 'effect_size')
        model.compute_contrast(c2, None, 'effect_variance')
        # formula should work (passing varible name directly)
        model.compute_contrast('c0')
        model.compute_contrast('c1')
        model.compute_contrast('c2')
        # smoke test for one null contrast in group
        model.compute_contrast([c2, cnull])
        # only passing null contrasts should give back a value error
        with pytest.raises(ValueError):
            model.compute_contrast(cnull)
        with pytest.raises(ValueError):
            model.compute_contrast([cnull, cnull])
        # passing wrong parameters
        with pytest.raises(ValueError):
            model.compute_contrast([])
        with pytest.raises(ValueError):
            model.compute_contrast([c1, []])
        with pytest.raises(ValueError):
            model.compute_contrast(c1, '', '')
        with pytest.raises(ValueError):
            model.compute_contrast(c1, '', [])
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model
def test_first_level_glm_computation_with_memory_caching():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # initialize FirstLevelModel with memory option enabled
        t_r = 10.0
        slice_time_ref = 0.
        events = basic_paradigm()
        # Ordinary Least Squares case
        model = FirstLevelModel(t_r,
                                slice_time_ref,
                                mask_img=mask,
                                drift_model='polynomial',
                                drift_order=3,
                                memory='nilearn_cache',
                                memory_level=1,
                                minimize_memory=False)
        model.fit(func_img, events)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del mask, func_img, FUNCFILE, model
def report_flm_adhd_dmn():  # pragma: no cover
    t_r = 2.
    slice_time_ref = 0.
    n_scans = 176
    pcc_coords = (0, -53, 26)
    adhd_dataset = nilearn.datasets.fetch_adhd(n_subjects=1)
    seed_masker = NiftiSpheresMasker([pcc_coords],
                                     radius=10,
                                     detrend=True,
                                     standardize=True,
                                     low_pass=0.1,
                                     high_pass=0.01,
                                     t_r=2.,
                                     memory='nilearn_cache',
                                     memory_level=1,
                                     verbose=0)
    seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
    frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
    design_matrix = make_first_level_design_matrix(frametimes,
                                                   hrf_model='spm',
                                                   add_regs=seed_time_series,
                                                   add_reg_names=["pcc_seed"])
    dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
    contrasts = {'seed_based_glm': dmn_contrast}

    first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
    first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                              design_matrices=design_matrix)

    report = make_glm_report(
        first_level_model,
        contrasts=contrasts,
        title='ADHD DMN Report',
        cluster_threshold=15,
        height_control='bonferroni',
        min_distance=8.,
        plot_type='glass',
        report_dims=(1200, 'a'),
    )
    output_filename = 'generated_report_flm_adhd_dmn.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
# * t_r=7(s) is the time of repetition of acquisitions
# * noise_model='ar1' specifies the noise covariance model: a lag-1 dependence
# * standardize=False means that we do not want to rescale the time series to mean 0, variance 1
# * hrf_model='spm' means that we rely on the SPM "canonical hrf" model (without time or dispersion derivatives)
# * drift_model='cosine' means that we model the signal drifts as slow oscillating time functions
# * high_pass=0.01(Hz) defines the cutoff frequency (inverse of the time period).
fmri_glm = FirstLevelModel(t_r=7,
                           noise_model='ar1',
                           standardize=False,
                           hrf_model='spm',
                           drift_model='cosine',
                           high_pass=.01)

###############################################################################
# Now that we have specified the model, we can run it on the fMRI image
fmri_glm = fmri_glm.fit(fmri_img, events)

###############################################################################
# One can inspect the design matrix (rows represent time, and
# columns contain the predictors).
design_matrix = fmri_glm.design_matrices_[0]

###############################################################################
# Formally, we have taken the first design matrix, because the model is
# implictily meant to for multiple runs.
from nilearn.reporting import plot_design_matrix
plot_design_matrix(design_matrix)
import matplotlib.pyplot as plt
plt.show()

###############################################################################
t_r = 2.4
events_file = data['events']
import pandas as pd
events = pd.read_table(events_file)

###############################################################################
# Running a basic model
# ---------------------
#
# First we specify a linear model.
# The .fit() functionality of FirstLevelModel function creates the design matrix and the beta maps.
#
from nilearn.stats.first_level_model import FirstLevelModel
first_level_model = FirstLevelModel(t_r)
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]

#########################################################################
# Let us take a look at the design matrix: it has 10 main columns corresponding to 10 experimental conditions, followed by 3 columns describing low-frequency signals (drifts) and a constant regressor.
from nilearn.reporting import plot_design_matrix
plot_design_matrix(design_matrix)
import matplotlib.pyplot as plt
plt.show()

#########################################################################
# Specification of the contrasts.
#
# For this, let's create a function that, given the design matrix,
# generates the corresponding contrasts.  This will be useful to
# repeat contrast specification when we change the design matrix.
Exemple #12
0
#########################################################################
# Next solution is to try Finite Impulse Reponse (FIR) models: we just
# say that the hrf is an arbitrary function that lags behind the
# stimulus onset.  In the present case, given that the numbers of
# conditions is high, we should use a simple FIR model.
#
# Concretely, we set `hrf_model` to 'fir' and `fir_delays` to [1, 2,
# 3] (scans) corresponding to a 3-step functions on the [1 * t_r, 4 *
# t_r] seconds interval.
#

from nilearn.stats.first_level_model import FirstLevelModel
from nilearn.reporting import plot_design_matrix, plot_contrast_matrix

first_level_model = FirstLevelModel(t_r, hrf_model='fir', fir_delays=[1, 2, 3])
first_level_model = first_level_model.fit(fmri_img, events=events)
design_matrix = first_level_model.design_matrices_[0]
plot_design_matrix(design_matrix)

#########################################################################
# We have to adapt contrast specification. We characterize the BOLD
# response by the sum across the three time lags. It's a bit hairy,
# sorry, but this is the price to pay for flexibility...

import numpy as np

contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
                  for i, column in enumerate(design_matrix.columns)])
conditions = events.trial_type.unique()
for condition in conditions:
Exemple #13
0
contrasts = {
    'faces-scrambled':
    basic_contrasts['faces'] - basic_contrasts['scrambled'],
    'scrambled-faces':
    -basic_contrasts['faces'] + basic_contrasts['scrambled'],
    'effects_of_interest':
    np.vstack((basic_contrasts['faces'], basic_contrasts['scrambled']))
}

#########################################################################
# Fit the GLM for the 2 sessions by speficying a FirstLevelModel and then fitting it.
from nilearn.stats.first_level_model import FirstLevelModel
print('Fitting a GLM')
fmri_glm = FirstLevelModel()
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

#########################################################################
# Now we can compute contrast-related statistical maps (in z-scale), and plot them.
print('Computing contrasts')
from nilearn import plotting

# Iterate on contrasts
for contrast_id, contrast_val in contrasts.items():
    print("\tcontrast id: %s" % contrast_id)
    # compute the contrasts
    z_map = fmri_glm.compute_contrast(contrast_val, output_type='z_score')
    # plot the contrasts as soon as they're generated
    # the display is overlayed on the mean fMRI image
    # a threshold of 3.0 is used, more sophisticated choices are possible
    plotting.plot_stat_map(z_map,
Exemple #14
0
                           smoothing_fwhm=5,
                           minimize_memory=True)

#########################################################################
# Compute fixed effects of the two runs and compute related images
# For this, we first define the contrasts as we would do for a single session
n_columns = design_matrices[0].shape[1]
contrast_val = np.hstack(([-1, -1, 1, 1], np.zeros(n_columns - 4)))

#########################################################################
# Statistics for the first session
from nilearn import plotting
cut_coords = [-129, -126, 49]
contrast_id = 'DSt_minus_SSt'

fmri_glm = fmri_glm.fit(fmri_img[0], design_matrices=design_matrices[0])
summary_statistics_session1 = fmri_glm.compute_contrast(contrast_val,
                                                        output_type='all')
plotting.plot_stat_map(summary_statistics_session1['z_score'],
                       bg_img=mean_img_,
                       threshold=3.0,
                       cut_coords=cut_coords,
                       title='{0}, first session'.format(contrast_id))

#########################################################################
# Statistics for the second session

fmri_glm = fmri_glm.fit(fmri_img[1], design_matrices=design_matrices[1])
summary_statistics_session2 = fmri_glm.compute_contrast(contrast_val,
                                                        output_type='all')
plotting.plot_stat_map(summary_statistics_session2['z_score'],
Exemple #15
0
                                 verbose=0)
seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
design_matrix = make_first_level_design_matrix(frametimes,
                                               hrf_model='spm',
                                               add_regs=seed_time_series,
                                               add_reg_names=["pcc_seed"])
dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
contrasts = {'seed_based_glm': dmn_contrast}

#########################################################################
# Perform first level analysis
# ----------------------------
# Setup and fit GLM.
first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                          design_matrices=design_matrix)

#########################################################################
# Estimate the contrast.
print('Contrast seed_based_glm computed.')
z_map = first_level_model.compute_contrast(contrasts['seed_based_glm'],
                                           output_type='z_score')

# Saving snapshots of the contrasts
filename = 'dmn_z_map.png'
display = plotting.plot_stat_map(z_map,
                                 threshold=3.0,
                                 title='Seed based GLM',
                                 cut_coords=pcc_coords)
display.add_markers(marker_coords=[pcc_coords],
                    marker_color='g',
# Instantiate the glm
glm = FirstLevelModel(t_r=TR,
                      mask_img=haxby_dataset.mask,
                      high_pass=.008,
                      smoothing_fwhm=4,
                      memory='nilearn_cache')

##############################################################################
# Run the glm on data from each session
# -------------------------------------
for session in unique_sessions:
    # grab the fmri data for that particular session
    fmri_session = index_img(func_filename, sessions == session)

    # fit the glm
    glm.fit(fmri_session, events=events[session])

    # set up contrasts: one per condition
    conditions = events[session].trial_type.unique()
    for condition_ in conditions:
        z_maps.append(glm.compute_contrast(condition_))
        condition_idx.append(condition_)
        session_idx.append(session)

#########################################################################
# Generating a report
# -------------------
# Since we have already computed the FirstLevelModel
# and have the contrast, we can quickly create a summary report.
from nilearn.image import mean_img
from nilearn.reporting import make_glm_report