def test_high_level_non_parametric_inference_with_paths():
    with InTemporaryDirectory():
        n_perm = 100
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        c1 = np.eye(len(X.columns))[0]
        neg_log_pvals_img = non_parametric_inference(Y,
                                                     design_matrix=X,
                                                     second_level_contrast=c1,
                                                     mask=mask,
                                                     n_perm=n_perm)
        neg_log_pvals = get_data(neg_log_pvals_img)

        assert isinstance(neg_log_pvals_img, Nifti1Image)
        assert_array_equal(neg_log_pvals_img.affine, load(mask).affine)

        assert np.all(neg_log_pvals <= -np.log10(1.0 / (n_perm + 1)))
        assert np.all(0 <= neg_log_pvals)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory
        del X, Y, FUNCFILE, func_img, neg_log_pvals_img
Exemple #2
0
def test_non_parametric_inference_cluster_level():
    """Test non-parametric inference with cluster-level inference."""
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)

        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])

        out = non_parametric_inference(
            Y,
            design_matrix=X,
            mask=mask,
            n_perm=10,
            threshold=0.001,
        )
        assert isinstance(out, dict)
        assert "t" in out.keys()
        assert "logp_max_t" in out.keys()
        assert "logp_max_size" in out.keys()
        assert "logp_max_mass" in out.keys()

        assert get_data(out["logp_max_t"]).shape == shapes[0][:3]
        del func_img, FUNCFILE, out, X, Y
Exemple #3
0
def test_high_level_non_parametric_inference_with_paths():
    with InTemporaryDirectory():
        n_perm = 100
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        df_input = pd.DataFrame({
            'subject_label': [f'sub-{i}' for i in range(4)],
            'effects_map_path': [FUNCFILE] * 4,
            'map_name': [FUNCFILE] * 4
        })
        func_img = load(FUNCFILE)
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        c1 = np.eye(len(X.columns))[0]
        neg_log_pvals_imgs = [
            non_parametric_inference(second_level_input,
                                     design_matrix=X,
                                     second_level_contrast=c1,
                                     first_level_contrast=FUNCFILE,
                                     mask=mask,
                                     n_perm=n_perm,
                                     verbose=1)
            for second_level_input in [Y, df_input]
        ]
        assert all(
            [isinstance(img, Nifti1Image) for img in neg_log_pvals_imgs])
        for img in neg_log_pvals_imgs:
            assert_array_equal(img.affine, load(mask).affine)
        neg_log_pvals_list = [get_data(i) for i in neg_log_pvals_imgs]
        for neg_log_pvals in neg_log_pvals_list:
            assert np.all(neg_log_pvals <= -np.log10(1.0 / (n_perm + 1)))
            assert np.all(0 <= neg_log_pvals)

        masker = NiftiMasker(mask, smoothing_fwhm=2.0)
        with pytest.warns(UserWarning,
                          match="Parameter smoothing_fwhm "
                          "of the masker overridden"):
            non_parametric_inference(Y,
                                     design_matrix=X,
                                     second_level_contrast=c1,
                                     smoothing_fwhm=3.0,
                                     mask=masker,
                                     n_perm=n_perm)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory
        del X, Y, FUNCFILE, func_img, neg_log_pvals_imgs
def test_non_parametric_inference_permutation_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)

        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])

        neg_log_pvals_img = non_parametric_inference(Y,
                                                     design_matrix=X,
                                                     mask=mask,
                                                     n_perm=100)

        assert get_data(neg_log_pvals_img).shape == shapes[0][:3]
        del func_img, FUNCFILE, neg_log_pvals_img, X, Y
# than the previous one.
threshold = 1
title = ('Group left-right button press: \n'
         'parametric test (FWER < 10%)')
display = plotting.plot_glass_brain(
    neg_log_pval, colorbar=True, display_mode='z', plot_abs=False, vmax=3,
    cut_coords=cut_coords, threshold=threshold, title=title)
plotting.show()

###########################################################################
# Now, we compute the (corrected) p-values with a permutation test.
from nilearn.glm.second_level import non_parametric_inference
neg_log_pvals_permuted_ols_unmasked = \
    non_parametric_inference(second_level_input,
                             design_matrix=design_matrix,
                             model_intercept=True, n_perm=1000,
                             two_sided_test=False,
                             smoothing_fwhm=8.0, n_jobs=1)

###########################################################################
# Let us plot the (corrected) negative log p-values for the nonparametric test.
title = ('Group left-right button press: \n'
         'permutation test (FWER < 10%)')
display = plotting.plot_glass_brain(
    neg_log_pvals_permuted_ols_unmasked, colorbar=True, vmax=3,
    display_mode='z', plot_abs=False, cut_coords=cut_coords,
    threshold=threshold, title=title)
plotting.show()

# The neg-log p-values obtained with nonparametric testing are capped at 3
# since the number of permutations is 1e3.
def test_non_parametric_inference_contrast_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # asking for contrast before model fit gives error
        with pytest.raises(ValueError):
            non_parametric_inference(None, None, None, 'intercept', mask)
        # fit model
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])
        # formula should work without second-level contrast
        neg_log_pvals_img = non_parametric_inference(Y,
                                                     design_matrix=X,
                                                     mask=mask,
                                                     n_perm=100)

        ncol = len(X.columns)
        c1, cnull = np.eye(ncol)[0, :], np.zeros(ncol)
        # formula should work with second-level contrast
        neg_log_pvals_img = non_parametric_inference(Y,
                                                     design_matrix=X,
                                                     second_level_contrast=c1,
                                                     mask=mask,
                                                     n_perm=100)
        # formula should work passing variable name directly
        neg_log_pvals_img = \
            non_parametric_inference(Y, design_matrix=X,
                                     second_level_contrast='intercept',
                                     mask=mask, n_perm=100)

        # passing null contrast should give back a value error
        with pytest.raises(ValueError):
            non_parametric_inference(Y, X, cnull, 'intercept', mask)
        # passing wrong parameters
        with pytest.raises(ValueError):
            non_parametric_inference(Y, X, [], 'intercept', mask)
        # check that passing no explicit contrast when the design
        # matrix has more than one columns raises an error
        rng = np.random.RandomState(42)
        X = pd.DataFrame(rng.uniform(size=(4, 2)), columns=["r1", "r2"])
        with pytest.raises(ValueError):
            non_parametric_inference(Y, X, None)
        del func_img, FUNCFILE, neg_log_pvals_img, X, Y
def test_fmri_inputs_for_non_parametric_inference():
    # Test processing of FMRI inputs
    with InTemporaryDirectory():
        # prepare fake data
        rng = np.random.RandomState(42)
        p, q = 80, 10
        X = rng.standard_normal(size=(p, q))
        shapes = ((7, 8, 9, 10), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        T = func_img.shape[-1]
        des = pd.DataFrame(np.ones((T, 1)), columns=['a'])
        des_fname = 'design.csv'
        des.to_csv(des_fname)

        # prepare correct input first level models
        flm = FirstLevelModel(subject_label='01').fit(FUNCFILE,
                                                      design_matrices=des)
        # prepare correct input dataframe and lists
        shapes = ((7, 8, 9, 1), )
        _, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]

        dfcols = ['subject_label', 'map_name', 'effects_map_path']
        dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE],
                  ['03', 'a', FUNCFILE]]
        niidf = pd.DataFrame(dfrows, columns=dfcols)
        niimgs = [FUNCFILE, FUNCFILE, FUNCFILE]
        niimg_4d = concat_imgs(niimgs)
        confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]],
                                 columns=['subject_label', 'conf1'])
        sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c'])

        # test missing second-level contrast
        # niimgs as input
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs, None, sdes)
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs, confounds, sdes)
        # 4d niimg as input
        with pytest.raises(ValueError):
            non_parametric_inference(niimg_4d, None, sdes)

        # test wrong input errors
        # test first level model
        with pytest.raises(ValueError):
            non_parametric_inference(flm)
        # test list of less than two niimgs
        with pytest.raises(ValueError):
            non_parametric_inference([FUNCFILE])
        # test dataframe
        with pytest.raises(ValueError):
            non_parametric_inference(niidf)
        # test niimgs requirements
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs)
        with pytest.raises(ValueError):
            non_parametric_inference(niimgs + [[]], confounds)
        with pytest.raises(ValueError):
            non_parametric_inference([FUNCFILE])
        # test other objects
        with pytest.raises(ValueError):
            non_parametric_inference('random string object')
        del X, FUNCFILE, func_img
Exemple #8
0
#   cluster-level inference.
#   Performing cluster-level inference will increase the computation time of
#   the permutation procedure.
#   Increasing the number of parallel jobs (``n_jobs``) can reduce the time
#   cost.
#
# .. hint::
#   If you wish to only run voxel-level correction, set ``threshold`` to None
#   (the default).
from nilearn.glm.second_level import non_parametric_inference

out_dict = non_parametric_inference(
    second_level_input,
    design_matrix=design_matrix,
    model_intercept=True,
    n_perm=500,  # 500 for the sake of time. Ideally, this should be 10,000.
    two_sided_test=False,
    smoothing_fwhm=8.0,
    n_jobs=1,
    threshold=0.001,
)

###############################################################################
# Let us plot the (corrected) negative log p-values for the both tests.
#
# We will use a negative log10 p threshold of 1, which corresponds to p<0.1.
# This threshold indicates that there is less than 10% probability to make a
# single false discovery (90% chance that we make no false discovery at all).
# This threshold is much more conservative than an uncorrected threshold, but
# is still more liberal than a typical corrected threshold for this kind of
# analysis, which tends to be ~0.05.
#
Exemple #9
0
title = ('Group-level association between motor activity and reading: \n'
         'neg-log of parametric corrected p-values (FWER < 10%)')
plotting.plot_stat_map(neg_log_pval,
                       colorbar=True,
                       cut_coords=cut_coords,
                       threshold=threshold,
                       title=title)
plotting.show()

##############################################################################
# Computing the (corrected) negative log p-values with permutation test
from nilearn.glm.second_level import non_parametric_inference
neg_log_pvals_permuted_ols_unmasked = \
    non_parametric_inference(contrast_map_filenames,
                             design_matrix=design_matrix,
                             second_level_contrast='fluency',
                             model_intercept=True, n_perm=1000,
                             two_sided_test=False, mask=None,
                             smoothing_fwhm=5.0, n_jobs=1)

###########################################################################
# Let us plot the (corrected) negative log  p-values
title = ('Group-level association between motor activity and reading: \n'
         'neg-log of non-parametric corrected p-values (FWER < 10%)')
plotting.plot_stat_map(neg_log_pvals_permuted_ols_unmasked,
                       colorbar=True,
                       cut_coords=cut_coords,
                       threshold=threshold,
                       title=title)
plotting.show()

# The neg-log p-values obtained with non parametric testing are capped at 3