def report_slm_oasis():  # pragma: no cover
    n_subjects = 5  # more subjects requires more memory
    oasis_dataset = nilearn.datasets.fetch_oasis_vbm(n_subjects=n_subjects)
    # Resample the images, since this mask has a different resolution
    mask_img = resample_to_img(
        nilearn.datasets.fetch_icbm152_brain_gm_mask(),
        oasis_dataset.gray_matter_maps[0],
        interpolation='nearest',
    )
    design_matrix = _make_design_matrix_slm_oasis(oasis_dataset, n_subjects)
    second_level_model = SecondLevelModel(smoothing_fwhm=2.0,
                                          mask_img=mask_img)
    second_level_model.fit(oasis_dataset.gray_matter_maps,
                           design_matrix=design_matrix)

    contrast = [[1, 0, 0], [0, 1, 0]]
    report = make_glm_report(
        model=second_level_model,
        contrasts=contrast,
        bg_img=nilearn.datasets.fetch_icbm152_2009()['t1'],
        height_control=None,
    )
    output_filename = 'generated_report_slm_oasis.html'
    output_filepath = os.path.join(REPORTS_DIR, output_filename)
    report.save_as_html(output_filepath)
    report.get_iframe()
Exemplo n.º 2
0
def test_high_level_glm_with_paths():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        # asking for contrast before model fit gives error
        with pytest.raises(ValueError):
            model.compute_contrast([])
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        c1 = np.eye(len(model.design_matrix_.columns))[0]
        z_image = model.compute_contrast(c1, output_type='z_score')
        assert isinstance(z_image, Nifti1Image)
        assert_array_equal(z_image.affine, load(mask).affine)

        # try with target_shape
        target_shape = (10, 10, 10)
        target_affine = np.eye(4)
        target_affine[0, 3] = 1
        model = SecondLevelModel(mask_img=mask,
                                 target_shape=target_shape,
                                 target_affine=target_affine)
        z_image = model.fit(Y, design_matrix=X).compute_contrast(c1)
        assert_array_equal(z_image.shape, target_shape)
        assert_array_equal(z_image.affine, target_affine)

        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del Y, FUNCFILE, func_img, model
Exemplo n.º 3
0
def test_slm_reporting():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        model = SecondLevelModel()
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        c1 = np.eye(len(model.design_matrix_.columns))[0]
        report_slm = glmr.make_glm_report(model, c1)
        # catches & raises UnicodeEncodeError in HTMLDocument.get_iframe()
        report_iframe = report_slm.get_iframe()
        # So flake8 doesn't complain about not using variable (F841)
        report_iframe
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del Y, FUNCFILE, func_img, model
Exemplo n.º 4
0
def test_make_headings_with_contrasts_title_none():
    model = SecondLevelModel()
    test_input = ({
        'contrast_0': [0, 0, 1],
        'contrast_1': [0, 1, 1],
    }, None, model)
    expected_output = (
        'Report: Second Level Model for contrast_0, contrast_1',
        'Statistical Report for contrast_0, contrast_1',
        'Second Level Model',
    )
    actual_output = glmr._make_headings(*test_input)
    assert actual_output == expected_output
Exemplo n.º 5
0
def test_make_headings_with_contrasts_title_custom():
    model = SecondLevelModel()
    test_input = ({'contrast_0': [0, 0, 1],
                   'contrast_1': [0, 1, 1],
                   },
                  'Custom Title for report',
                  model,
                  )
    expected_output = ('Custom Title for report',
                       'Custom Title for report',
                       'Second Level Model',
                       )
    actual_output = glmr._make_headings(*test_input)
    assert actual_output == expected_output
Exemplo n.º 6
0
def test_second_level_glm_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])

        model = model.fit(Y, design_matrix=X)
        model.compute_contrast()
        labels1 = model.labels_
        results1 = model.results_

        labels2, results2 = run_glm(model.masker_.transform(Y), X.values,
                                    'ols')
        assert_almost_equal(labels1, labels2, decimal=1)
        assert len(results1) == len(results2)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
Exemplo n.º 7
0
def test_second_level_voxelwise_attribute(attribute):
    """Smoke test for voxelwise attributes for SecondLevelModel."""
    shapes = ((7, 8, 9, 1), )
    mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes)
    model = SecondLevelModel(mask_img=mask, minimize_memory=False)
    Y = fmri_data * 4
    X = pd.DataFrame([[1]] * 4, columns=['intercept'])
    model.fit(Y, design_matrix=X)
    model.compute_contrast()
    getattr(model, attribute)
Exemplo n.º 8
0
def test_second_level_residuals():
    """Tests residuals computation for SecondLevelModel."""
    shapes = ((7, 8, 9, 1), )
    mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes)
    model = SecondLevelModel(mask_img=mask, minimize_memory=False)
    Y = fmri_data * 4
    X = pd.DataFrame([[1]] * 4, columns=['intercept'])
    model.fit(Y, design_matrix=X)
    model.compute_contrast()
    assert isinstance(model.residuals, Nifti1Image)
    assert model.residuals.shape == (7, 8, 9, 4)
    mean_residuals = model.masker_.transform(model.residuals).mean(0)
    assert_array_almost_equal(mean_residuals, 0)
Exemplo n.º 9
0
def test_second_level_contrast_computation_with_memory_caching():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask, memory='nilearn_cache')
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        ncol = len(model.design_matrix_.columns)
        c1 = np.eye(ncol)[0, :]
        # test memory caching for compute_contrast
        model.compute_contrast(c1, output_type='z_score')
        # or simply pass nothing
        model.compute_contrast()
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
Exemplo n.º 10
0
def test_second_level_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        # asking for contrast before model fit gives error
        with pytest.raises(ValueError):
            model.compute_contrast('intercept')
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        model = model.fit(Y, design_matrix=X)
        ncol = len(model.design_matrix_.columns)
        c1, cnull = np.eye(ncol)[0, :], np.zeros(ncol)
        # smoke test for different contrasts in fixed effects
        model.compute_contrast(c1)
        z_image = model.compute_contrast(c1, output_type='z_score')
        stat_image = model.compute_contrast(c1, output_type='stat')
        p_image = model.compute_contrast(c1, output_type='p_value')
        effect_image = model.compute_contrast(c1, output_type='effect_size')
        variance_image = \
            model.compute_contrast(c1, output_type='effect_variance')

        # Test output_type='all', and verify images are equivalent
        all_images = model.compute_contrast(c1, output_type='all')
        assert_array_equal(get_data(all_images['z_score']), get_data(z_image))
        assert_array_equal(get_data(all_images['stat']), get_data(stat_image))
        assert_array_equal(get_data(all_images['p_value']), get_data(p_image))
        assert_array_equal(get_data(all_images['effect_size']),
                           get_data(effect_image))
        assert_array_equal(get_data(all_images['effect_variance']),
                           get_data(variance_image))

        # formula should work (passing variable name directly)
        model.compute_contrast('intercept')
        # or simply pass nothing
        model.compute_contrast()
        # passing null contrast should give back a value error
        with pytest.raises(ValueError):
            model.compute_contrast(cnull)
        # passing wrong parameters
        with pytest.raises(ValueError):
            model.compute_contrast([])
        with pytest.raises(ValueError):
            model.compute_contrast(c1, None, '')
        with pytest.raises(ValueError):
            model.compute_contrast(c1, None, [])
        with pytest.raises(ValueError):
            model.compute_contrast(c1, None, None, '')
        # check that passing no explicit contrast when the design
        # matrix has more than one columns raises an error
        rng = np.random.RandomState(42)
        X = pd.DataFrame(rng.uniform(size=(4, 2)), columns=["r1", "r2"])
        model = model.fit(Y, design_matrix=X)
        with pytest.raises(ValueError):
            model.compute_contrast(None)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
Exemplo n.º 11
0
# and plot the designs.
from nilearn.plotting import plot_design_matrix
_, (ax_unpaired,
    ax_paired) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 17]})
plot_design_matrix(unpaired_design_matrix, rescale=False, ax=ax_unpaired)
plot_design_matrix(paired_design_matrix, rescale=False, ax=ax_paired)
ax_unpaired.set_title('unpaired design', fontsize=12)
ax_paired.set_title('paired design', fontsize=12)
plt.tight_layout()
plotting.show()

##########################################################################
# We specify the analysis models and fit them.
from nilearn.glm.second_level import SecondLevelModel

second_level_model_unpaired = SecondLevelModel().fit(
    second_level_input, design_matrix=unpaired_design_matrix)

second_level_model_paired = SecondLevelModel().fit(
    second_level_input, design_matrix=paired_design_matrix)

##########################################################################
# Estimating the contrast is simple. To do so, we provide the column
# name of the design matrix. The argument 'output_type' is set to return all
# available outputs so that we can compare differences in the effect size,
# variance, and z-score.
stat_maps_unpaired = second_level_model_unpaired.compute_contrast(
    'vertical vs horizontal', output_type='all')

stat_maps_paired = second_level_model_paired.compute_contrast(
    'vertical vs horizontal', output_type='all')
Exemplo n.º 12
0
def test_fmri_inputs():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        # prepare fake data
        rng = np.random.RandomState(42)
        p, q = 80, 10
        X = rng.standard_normal(size=(p, q))
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        des = pd.DataFrame(np.ones((T, 1)), columns=['a'])
        des_fname = 'design.csv'
        des.to_csv(des_fname)

        # prepare correct input first level models
        flm = FirstLevelModel(subject_label='01').fit(FUNCFILE,
                                                      design_matrices=des)
        flms = [flm, flm, flm]
        # prepare correct input dataframe and lists
        shapes = ((7, 8, 9, 1), )
        _, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]

        dfcols = ['subject_label', 'map_name', 'effects_map_path']
        dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE],
                  ['03', 'a', FUNCFILE]]
        niidf = pd.DataFrame(dfrows, columns=dfcols)
        niimgs = [FUNCFILE, FUNCFILE, FUNCFILE]
        niimg_4d = concat_imgs(niimgs)
        confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]],
                                 columns=['subject_label', 'conf1'])
        sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c'])

        # smoke tests with correct input
        # First level models as input
        SecondLevelModel(mask_img=mask).fit(flms)
        SecondLevelModel().fit(flms)
        # Note : the following one creates a singular design matrix
        SecondLevelModel().fit(flms, confounds)
        SecondLevelModel().fit(flms, None, sdes)
        # dataframes as input
        SecondLevelModel().fit(niidf)
        SecondLevelModel().fit(niidf, confounds)
        SecondLevelModel().fit(niidf, confounds, sdes)
        SecondLevelModel().fit(niidf, None, sdes)
        # niimgs as input
        SecondLevelModel().fit(niimgs, None, sdes)
        # 4d niimg as input
        SecondLevelModel().fit(niimg_4d, None, sdes)

        # test wrong input errors
        # test first level model requirements
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flm)
        with pytest.raises(ValueError):
            SecondLevelModel().fit([flm])
        # test dataframe requirements
        with pytest.raises(ValueError):
            SecondLevelModel().fit(niidf['subject_label'])
        # test niimgs requirements
        with pytest.raises(ValueError):
            SecondLevelModel().fit(niimgs)
        with pytest.raises(ValueError):
            SecondLevelModel().fit(niimgs + [[]], confounds)
        # test first_level_conditions, confounds, and design
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flms, ['', []])
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flms, [])
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flms, confounds['conf1'])
        with pytest.raises(ValueError):
            SecondLevelModel().fit(flms, None, [])
Exemplo n.º 13
0
                             columns=['age', 'sex', 'intercept'])

#############################################################################
# Let's plot the design matrix.
from nilearn.plotting import plot_design_matrix

ax = plot_design_matrix(design_matrix)
ax.set_title('Second level design matrix', fontsize=12)
ax.set_ylabel('maps')

##########################################################################
# Next, we specify and fit the second-level model when loading the data and
# also smooth a little bit to improve statistical behavior.

from nilearn.glm.second_level import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask_img=mask_img)
second_level_model.fit(gray_matter_map_filenames,
                       design_matrix=design_matrix)

##########################################################################
# Estimating the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(second_level_contrast=[1, 0, 0],
                                            output_type='z_score')

###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot it.
from nilearn import plotting
from nilearn.glm import threshold_stats_img
_, threshold = threshold_stats_img(
    z_map, alpha=.05, height_control='fdr')
Exemplo n.º 14
0
fig.suptitle('subjects z_map language network (unc p<0.001)')
plotting.show()

#########################################################################
# Second level model estimation
# -----------------------------
# We just have to provide the list of fitted FirstLevelModel objects
# to the SecondLevelModel object for estimation. We can do this because
# all subjects share a similar design matrix (same variables reflected in
# column names).
from nilearn.glm.second_level import SecondLevelModel
second_level_input = models

#########################################################################
# Note that we apply a smoothing of 8mm.
second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = second_level_model.fit(second_level_input)

#########################################################################
# Computing contrasts at the second level is as simple as at the first level.
# Since we are not providing confounders we are performing a one-sample test
# at the second level with the images determined by the specified first level
# contrast.
zmap = second_level_model.compute_contrast(
    first_level_contrast='language-string')

#########################################################################
# The group level contrast reveals a left lateralized fronto-temporal
# language network.
plotting.plot_glass_brain(zmap,
                          colorbar=True,
Exemplo n.º 15
0
############################################################################
# We then assemble those in a design matrix and
design_matrix = pd.DataFrame(np.hstack(
    (condition_effect[:, np.newaxis], subject_effect)),
                             columns=['vertical vs horizontal'] + subjects)

############################################################################
# plot the design_matrix.
from nilearn.plotting import plot_design_matrix
plot_design_matrix(design_matrix)

############################################################################
# We formally specify the analysis model and fit it.
from nilearn.glm.second_level import SecondLevelModel
second_level_model = SecondLevelModel().fit(second_level_input,
                                            design_matrix=design_matrix)

##########################################################################
# Estimating the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast('vertical vs horizontal',
                                            output_type='z_score')

###########################################################################
# We threshold the second level contrast and plot it.
threshold = 3.1  # correponds to  p < .001, uncorrected
display = plotting.plot_glass_brain(
    z_map,
    threshold=threshold,
    colorbar=True,
    plot_abs=False,
Exemplo n.º 16
0
#########################################################################
# Perform the second level analysis
# ----------------------------------
#
# First, we define a design matrix for the model. As the model is trivial
# (one-sample test), the design matrix is just one column with ones.
import pandas as pd

design_matrix = pd.DataFrame([1] * n_samples, columns=['intercept'])

#########################################################################
# Next, we specify and estimate the model.
from nilearn.glm.second_level import SecondLevelModel

second_level_model = SecondLevelModel().fit(cmap_filenames,
                                            design_matrix=design_matrix)

#########################################################################
# Compute the only possible contrast: the one-sample test. Since there
# is only one possible contrast, we don't need to specify it in detail.
z_map = second_level_model.compute_contrast(output_type='z_score')

#########################################################################
# Threshold the resulting map without multiple comparisons correction,
# abs(z) > 3.29 (equivalent to p < 0.001), cluster size > 10 voxels.
from nilearn.image import threshold_img

thresholded_map = threshold_img(
    z_map,
    threshold=3.29,
    cluster_threshold=10,
Exemplo n.º 17
0
def test_second_level_voxelwise_attribute_errors(attribute):
    """Tests that an error is raised when trying to access
    voxelwise attributes before fitting the model, before
    computing a contrast, and when not setting
    ``minimize_memory`` to ``True``.
    """
    shapes = ((7, 8, 9, 1), )
    mask, fmri_data, _ = generate_fake_fmri_data_and_design(shapes)
    model = SecondLevelModel(mask_img=mask, minimize_memory=False)
    with pytest.raises(ValueError, match="The model has no results."):
        getattr(model, attribute)
    Y = fmri_data * 4
    X = pd.DataFrame([[1]] * 4, columns=['intercept'])
    model.fit(Y, design_matrix=X)
    with pytest.raises(ValueError, match="The model has no results."):
        getattr(model, attribute)
    with pytest.raises(ValueError, match="attribute must be one of"):
        model._get_voxelwise_model_attribute("foo", True)
    model = SecondLevelModel(mask_img=mask, minimize_memory=True)
    model.fit(Y, design_matrix=X)
    model.compute_contrast()
    with pytest.raises(ValueError, match="To access voxelwise attributes"):
        getattr(model, attribute)
plt.show()

############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd
second_level_input = data['cmaps']
design_matrix = pd.DataFrame([1] * len(second_level_input),
                             columns=['intercept'])

############################################################################
# Model specification and fit.
from nilearn.glm.second_level import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=8.0)
second_level_model = second_level_model.fit(second_level_input,
                                            design_matrix=design_matrix)

##########################################################################
# To estimate the :term:`contrast` is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(output_type='z_score')

###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot it.
from scipy.stats import norm
p_val = 0.001
p001_unc = norm.isf(p_val)
display = plotting.plot_glass_brain(
    z_map, threshold=p001_unc, colorbar=True, display_mode='z', plot_abs=False,
Exemplo n.º 19
0
############################################################################
# Estimate second level model
# ---------------------------
# We define the input maps and the design matrix for the second level model
# and fit it.
import pandas as pd

design_matrix = pd.DataFrame(np.hstack((tested_var, np.ones_like(tested_var))),
                             columns=['fluency', 'intercept'])

###########################################################################
# Fit of the second-level model
from nilearn.glm.second_level import SecondLevelModel

model = SecondLevelModel(smoothing_fwhm=5.0)
model.fit(contrast_map_filenames, design_matrix=design_matrix)

##########################################################################
# To estimate the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = model.compute_contrast('fluency', output_type='z_score')

###########################################################################
# We compute the fdr-corrected p = 0.05 threshold for these data
from nilearn.glm import threshold_stats_img

_, threshold = threshold_stats_img(z_map, alpha=.05, height_control='fdr')

###########################################################################
# Let us plot the second level contrast at the computed thresholds