def test_permuted_ols_withcovar(random_state=0):
    """

    """
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = rng.randn(n_samples, 1)
    confounding_vars = rng.randn(n_samples, 2)
    # compute t-scores with linalg or statsmodels
    ref_score = get_tvalue_with_alternative_library(tested_var, target_var,
                                                    confounding_vars)
    # permuted OLS
    _, own_score, _ = permuted_ols(
        tested_var, target_var, confounding_vars, model_intercept=False,
        n_perm=0, random_state=random_state)
    assert_array_almost_equal(ref_score, own_score, decimal=6)

    ### Adds intercept
    # permuted OLS
    _, own_scores_intercept, _ = permuted_ols(
        tested_var, target_var, confounding_vars, model_intercept=True,
        n_perm=0, random_state=random_state)
    # compute t-scores with linalg or statsmodels
    confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1))))
    alt_score_intercept = get_tvalue_with_alternative_library(
        tested_var, target_var, confounding_vars)
    assert_array_almost_equal(alt_score_intercept, own_scores_intercept,
                              decimal=6)
def test_permuted_ols_intercept_nocovar_multivariate(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_vars = np.ones((n_samples, 1))
    # compute t-scores with nilearn's routine
    ref_scores = get_tvalue_with_alternative_library(tested_vars, target_vars)
    # permuted OLS
    _, own_scores, _ = permuted_ols(tested_vars,
                                    target_vars,
                                    confounding_vars=None,
                                    n_perm=0,
                                    random_state=random_state)
    # same thing but with model_intercept=True to check it has no effect
    _, own_scores_intercept, _ = permuted_ols(tested_vars,
                                              target_vars,
                                              confounding_vars=None,
                                              model_intercept=True,
                                              n_perm=0,
                                              random_state=random_state)
    assert_array_almost_equal(ref_scores, own_scores, decimal=6)
    assert_array_almost_equal(own_scores, own_scores_intercept, decimal=6)
Example #3
0
def test_permuted_ols_nocovar_warning(random_state=0):
    """
    Ensure that a warning is raised when a given voxel has all zeros.
    """
    rng = check_random_state(random_state)

    # design parameters
    n_samples = 50
    n_descriptors = 10
    n_regressors = 1

    # create design
    target_var = rng.randn(n_samples, n_descriptors)
    tested_var = rng.randn(n_samples, n_regressors)

    # permuted OLS
    _, own_score, _ = permuted_ols(tested_var,
                                   target_var,
                                   model_intercept=False,
                                   n_perm=100,
                                   random_state=random_state)

    # test with ravelized tested_var
    target_var[:, 0] = 0

    with pytest.warns(UserWarning):
        _, own_score2, _ = permuted_ols(np.ravel(tested_var),
                                        target_var,
                                        model_intercept=False,
                                        n_perm=100,
                                        random_state=random_state)

    assert np.array_equal(own_score[1:], own_score2[1:])
def test_permuted_ols_intercept_withcovar_multivariate(random_state=0):
    rng = check_random_state(random_state)

    # design parameters
    n_samples = 50
    n_descriptors = 10
    n_regressors = 1
    n_covars = 2

    # create design
    target_vars = rng.randn(n_samples, n_descriptors)
    tested_var = np.ones((n_samples, n_regressors))
    confounding_vars = rng.randn(n_samples, n_covars)

    # compute t-scores with linalg or statsmodels
    ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars,
                                                     confounding_vars)
    assert ref_scores.shape == (n_regressors, n_descriptors)

    # permuted OLS
    _, own_scores, _ = permuted_ols(
        tested_var, target_vars, confounding_vars, n_perm=0,
        random_state=random_state)
    assert own_scores.shape == (n_regressors, n_descriptors)
    assert_almost_equal(ref_scores, own_scores, decimal=6)

    # same thing but with model_intercept=True to check it has no effect
    _, own_scores_intercept, _ = permuted_ols(
        tested_var, target_vars, confounding_vars, model_intercept=True,
        n_perm=0, random_state=random_state)
    assert own_scores_intercept.shape == (n_regressors, n_descriptors)
    assert_array_almost_equal(own_scores, own_scores_intercept, decimal=6)
def test_permuted_ols_nocovar_multivariate(random_state=0):
    """Test permuted_ols with multiple tested variates and no covariate.

    It is equivalent to fitting several models with only one tested variate.

    """
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    n_regressors = 2
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_var = rng.randn(n_samples, n_regressors)
    # compute t-scores with linalg or statsmodels
    ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars)
    # permuted OLS
    _, own_scores, _ = permuted_ols(
        tested_var, target_vars, model_intercept=False,
        n_perm=0, random_state=random_state)
    assert_array_almost_equal(ref_scores, own_scores, decimal=6)

    ### Adds intercept (should be equivalent to centering variates)
    # permuted OLS
    _, own_scores_intercept, _ = permuted_ols(
        tested_var, target_vars, model_intercept=True,
        n_perm=0, random_state=random_state)
    target_vars -= target_vars.mean(0)
    tested_var -= tested_var.mean(0)
    # compute t-scores with linalg or statsmodels
    ref_scores_intercept = get_tvalue_with_alternative_library(
            tested_var, target_vars, np.ones((n_samples, 1)))
    assert_array_almost_equal(ref_scores_intercept, own_scores_intercept,
                              decimal=6)
def test_permuted_ols_nocovar(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = rng.randn(n_samples, 1)
    # compute t-scores with linalg or statsmodels
    ref_score = get_tvalue_with_alternative_library(tested_var, target_var)
    # permuted OLS
    _, own_score, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        n_perm=0, random_state=random_state)
    assert_array_almost_equal(ref_score, own_score, decimal=6)

    # test with ravelized tested_var
    _, own_score, _ = permuted_ols(
        np.ravel(tested_var), target_var, model_intercept=False,
        n_perm=0, random_state=random_state)
    assert_array_almost_equal(ref_score, own_score, decimal=6)

    ### Adds intercept (should be equivalent to centering variates)
    # permuted OLS
    _, own_score_intercept, _ = permuted_ols(
        tested_var, target_var, model_intercept=True,
        n_perm=0, random_state=random_state)
    target_var -= target_var.mean(0)
    tested_var -= tested_var.mean(0)
    # compute t-scores with linalg or statsmodels
    ref_score_intercept = get_tvalue_with_alternative_library(
        tested_var, target_var, np.ones((n_samples, 1)))
    assert_array_almost_equal(ref_score_intercept, own_score_intercept,
                              decimal=6)
def test_sided_test(random_state=0):
    """Check that a positive effect is always better recovered with one-sided.
    """
    rng = check_random_state(random_state)

    # design parameters
    n_samples = 50
    n_descriptors = 100
    n_regressors = 1

    # create design
    target_var = rng.randn(n_samples, n_descriptors)
    tested_var = rng.randn(n_samples, n_regressors)

    # permuted OLS
    # one-sided
    neg_log_pvals_onesided, _, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        two_sided_test=False, n_perm=100, random_state=random_state)
    assert neg_log_pvals_onesided.shape == (n_regressors, n_descriptors)

    # two-sided
    neg_log_pvals_twosided, _, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        two_sided_test=True, n_perm=100, random_state=random_state)
    assert neg_log_pvals_twosided.shape == (n_regressors, n_descriptors)

    positive_effect_location = neg_log_pvals_onesided > 1
    assert_equal(
        np.sum(neg_log_pvals_twosided[positive_effect_location]
               - neg_log_pvals_onesided[positive_effect_location] > 0),
        0)
def test_permuted_ols_intercept_nocovar(random_state=0):
    rng = check_random_state(random_state)

    # design parameters
    n_samples = 50
    n_descriptors = 10
    n_regressors = 1

    # create design
    tested_var = np.ones((n_samples, n_regressors))
    target_var = rng.randn(n_samples, n_descriptors)

    # compute t-scores with linalg or statmodels
    t_val_ref = get_tvalue_with_alternative_library(tested_var, target_var)
    assert t_val_ref.shape == (n_regressors, n_descriptors)

    # permuted OLS
    neg_log_pvals, orig_scores, _ = permuted_ols(
        tested_var, target_var, confounding_vars=None, n_perm=10,
        random_state=random_state)
    assert neg_log_pvals.shape == (n_regressors, n_descriptors)
    assert orig_scores.shape == (n_regressors, n_descriptors)
    assert_array_less(neg_log_pvals, 1.)  # ensure sign swap is correctly done
    assert_array_almost_equal(t_val_ref, orig_scores, decimal=6)

    # same thing but with model_intercept=True to check it has no effect
    _, orig_scores_addintercept, _ = permuted_ols(
        tested_var, target_var, confounding_vars=None, model_intercept=False,
        n_perm=0, random_state=random_state)
    assert orig_scores_addintercept.shape == (n_regressors, n_descriptors)
    assert_array_almost_equal(t_val_ref, orig_scores_addintercept, decimal=6)
def test_permuted_ols_intercept_sklearn_nocovar_multivariate(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_var = np.ones((n_samples, 1))
    # scikit-learn F-scores
    fvals = np.empty((n_targets, 1))
    for i in range(n_targets):
        fvals[i], _ = f_regression(target_vars[:, i], tested_var, center=False)
    # permuted OLS
    _, orig_scores, _ = permuted_ols(tested_var,
                                     target_vars,
                                     confounding_vars=None,
                                     n_perm=0,
                                     random_state=random_state)
    # same thing but with model_intercept=True to check it has no effect
    _, orig_scores_addintercept, _ = permuted_ols(tested_var,
                                                  target_vars,
                                                  confounding_vars=None,
                                                  model_intercept=True,
                                                  n_perm=0,
                                                  random_state=random_state)
    assert_array_almost_equal(fvals, orig_scores, decimal=6)
    assert_array_almost_equal(orig_scores, orig_scores_addintercept, decimal=6)
def test_sided_test2(random_state=0):
    """Check that two-sided can actually recover positive and negative effects.
    """
    # create design
    target_var1 = np.arange(0, 10).reshape((-1, 1))  # positive effect
    target_var = np.hstack((target_var1, - target_var1))

    tested_var = np.arange(0, 20, 2)
    # permuted OLS
    # one-sided
    neg_log_pvals_onesided, _, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        two_sided_test=False, n_perm=100, random_state=random_state)
    # one-sided (other side)
    neg_log_pvals_onesided2, _, _ = permuted_ols(
        tested_var, -target_var, model_intercept=False,
        two_sided_test=False, n_perm=100, random_state=random_state)
    # two-sdided
    neg_log_pvals_twosided, _, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        two_sided_test=True, n_perm=100, random_state=random_state)

    assert_array_almost_equal(neg_log_pvals_onesided[0],
                              neg_log_pvals_onesided2[0][::-1])
    assert_array_almost_equal(neg_log_pvals_onesided + neg_log_pvals_onesided2,
                              neg_log_pvals_twosided)
def test_permuted_ols_intercept_statsmodels_withcovar(random_state=0):
    """

    This test has a statsmodels dependance. There seems to be no simple,
    alternative way to perform a F-test on a linear model including
    covariates.

    """
    try:
        from statsmodels.regression.linear_model import OLS
    except:
        warnings.warn("Statsmodels is required to run this test")
        raise nose.SkipTest

    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = np.ones((n_samples, 1))
    confounding_vars = rng.randn(n_samples, 2)
    # statsmodels OLS
    ols = OLS(target_var, np.hstack((tested_var, confounding_vars))).fit()
    fvals = ols.f_test([[1.0, 0.0, 0.0]]).fvalue
    # permuted OLS
    _, orig_scores, _ = permuted_ols(tested_var, target_var, confounding_vars, n_perm=0, random_state=random_state)
    # same thing but with model_intercept=True to check it has no effect
    _, orig_scores_addintercept, _ = permuted_ols(
        tested_var, target_var, confounding_vars, model_intercept=True, n_perm=0, random_state=random_state
    )
    assert_array_almost_equal(fvals, orig_scores, decimal=6)
    assert_array_almost_equal(orig_scores, orig_scores_addintercept, decimal=6)
def test_permuted_ols_withcovar_multivariate(random_state=0):
    """Test permuted_ols with multiple tested variates and covariates.

    It is equivalent to fitting several models with only one tested variate.

    """
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    n_covars = 2
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_var = rng.randn(n_samples, 1)
    confounding_vars = rng.randn(n_samples, n_covars)
    # compute t-scores with linalg or statmodels
    ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars,
                                                     confounding_vars)
    # permuted OLS
    _, own_scores, _ = permuted_ols(
        tested_var, target_vars, confounding_vars, model_intercept=False,
        n_perm=0, random_state=random_state)
    assert_almost_equal(ref_scores, own_scores, decimal=6)

    ### Adds intercept
    # permuted OLS
    _, own_scores_intercept, _ = permuted_ols(
        tested_var, target_vars, confounding_vars, model_intercept=True,
        n_perm=0, random_state=random_state)
    # compute t-scores with linalg or statmodels
    confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1))))
    ref_scores_intercept = get_tvalue_with_alternative_library(
        tested_var, target_vars, confounding_vars)
    assert_array_almost_equal(ref_scores_intercept,
                              own_scores_intercept, decimal=6)
def test_permuted_ols_sklearn_nocovar(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = rng.randn(n_samples, 1)
    # scikit-learn F-score
    fvals, _ = f_regression(target_var, tested_var, center=False)
    # permuted OLS
    _, orig_scores, _ = permuted_ols(tested_var, target_var, model_intercept=False, n_perm=0, random_state=random_state)
    assert_array_almost_equal([fvals], orig_scores, decimal=6)

    # test with ravelized tested_var
    _, orig_scores, _ = permuted_ols(
        np.ravel(tested_var), target_var, model_intercept=False, n_perm=0, random_state=random_state
    )
    assert_array_almost_equal([fvals], orig_scores, decimal=6)

    ### Adds intercept (should be equivalent to centering variates)
    # permuted OLS
    _, orig_scores_addintercept, _ = permuted_ols(
        tested_var, target_var, model_intercept=True, n_perm=0, random_state=random_state
    )
    target_var -= target_var.mean(0)
    tested_var -= tested_var.mean(0)
    # scikit-learn F-score
    fvals_addintercept, _ = f_regression(target_var, tested_var, center=True)
    assert_array_almost_equal([fvals_addintercept], orig_scores_addintercept, decimal=6)
def test_permuted_ols_intercept_sklearn_nocovar(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = np.ones((n_samples, 1))
    # scikit-learn F-score
    fvals, _ = f_regression(target_var, tested_var, center=False)
    # permuted OLS
    neg_log_pvals, orig_scores, _ = permuted_ols(tested_var,
                                                 target_var,
                                                 confounding_vars=None,
                                                 n_perm=10,
                                                 random_state=random_state)
    assert_array_less(neg_log_pvals, 1.)  # ensure sign swap is correctly done
    # same thing but with model_intercept=True to check it has no effect
    _, orig_scores_addintercept, _ = permuted_ols(tested_var,
                                                  target_var,
                                                  confounding_vars=None,
                                                  model_intercept=True,
                                                  n_perm=0,
                                                  random_state=random_state)
    assert_array_almost_equal([fvals], orig_scores, decimal=6)
    assert_array_almost_equal(orig_scores, orig_scores_addintercept, decimal=6)
def test_permuted_ols_statsmodels_withcovar_multivariate(random_state=0):
    """Test permuted_ols with multiple tested variates and covariates.

    It is equivalent to fitting several models with only one tested variate.

    This test has a statsmodels dependance. There seems to be no simple,
    alternative way to perform a F-test on a linear model including
    covariates.

    """
    try:
        from statsmodels.regression.linear_model import OLS
    except:
        warnings.warn("Statsmodels is required to run this test")
        raise nose.SkipTest

    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    n_covars = 2
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_var = rng.randn(n_samples, 1)
    confounding_vars = rng.randn(n_samples, n_covars)
    # statsmodels OLS
    fvals = np.empty((n_targets, 1))
    test_matrix = np.array([[1.] + [0.] * n_covars])
    for i in range(n_targets):
        ols = OLS(target_vars[:, i], np.hstack((tested_var, confounding_vars)))
        fvals[i] = ols.fit().f_test(test_matrix).fvalue[0][0]
    # permuted OLS
    _, orig_scores, _ = permuted_ols(tested_var,
                                     target_vars,
                                     confounding_vars,
                                     model_intercept=False,
                                     n_perm=0,
                                     random_state=random_state)
    assert_almost_equal(fvals, orig_scores, decimal=6)

    ### Adds intercept
    # permuted OLS
    _, orig_scores_addintercept, _ = permuted_ols(tested_var,
                                                  target_vars,
                                                  confounding_vars,
                                                  model_intercept=True,
                                                  n_perm=0,
                                                  random_state=random_state)
    # statsmodels OLS
    confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1))))
    fvals_addintercept = np.empty((n_targets, 1))
    test_matrix = np.array([[1.] + [0.] * (n_covars + 1)])
    for i in range(n_targets):
        ols = OLS(target_vars[:, i], np.hstack((tested_var, confounding_vars)))
        fvals_addintercept[i] = ols.fit().f_test(test_matrix).fvalue[0][0]
    assert_array_almost_equal(fvals_addintercept,
                              orig_scores_addintercept,
                              decimal=6)
def test_permuted_ols_statsmodels_withcovar_multivariate(random_state=0):
    """Test permuted_ols with multiple tested variates and covariates.

    It is equivalent to fitting several models with only one tested variate.

    This test has a statsmodels dependance. There seems to be no simple,
    alternative way to perform a F-test on a linear model including
    covariates.

    """
    try:
        from statsmodels.regression.linear_model import OLS
    except:
        warnings.warn("Statsmodels is required to run this test")
        raise nose.SkipTest

    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    n_covars = 2
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_var = rng.randn(n_samples, 1)
    confounding_vars = rng.randn(n_samples, n_covars)
    # statsmodels OLS
    fvals = np.empty((n_targets, 1))
    test_matrix = np.array([[1.0] + [0.0] * n_covars])
    for i in range(n_targets):
        ols = OLS(target_vars[:, i], np.hstack((tested_var, confounding_vars)))
        fvals[i] = ols.fit().f_test(test_matrix).fvalue[0][0]
    # permuted OLS
    _, orig_scores, _ = permuted_ols(
        tested_var, target_vars, confounding_vars, model_intercept=False, n_perm=0, random_state=random_state
    )
    assert_almost_equal(fvals, orig_scores, decimal=6)

    ### Adds intercept
    # permuted OLS
    _, orig_scores_addintercept, _ = permuted_ols(
        tested_var, target_vars, confounding_vars, model_intercept=True, n_perm=0, random_state=random_state
    )
    # statsmodels OLS
    confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1))))
    fvals_addintercept = np.empty((n_targets, 1))
    test_matrix = np.array([[1.0] + [0.0] * (n_covars + 1)])
    for i in range(n_targets):
        ols = OLS(target_vars[:, i], np.hstack((tested_var, confounding_vars)))
        fvals_addintercept[i] = ols.fit().f_test(test_matrix).fvalue[0][0]
    assert_array_almost_equal(fvals_addintercept, orig_scores_addintercept, decimal=6)
def test_permuted_ols_statsmodels_withcovar(random_state=0):
    """

    This test has a statsmodels dependance. There seems to be no simple,
    alternative way to perform a F-test on a linear model including
    covariates.

    """
    try:
        from statsmodels.regression.linear_model import OLS
    except:
        warnings.warn("Statsmodels is required to run this test")
        raise nose.SkipTest

    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = rng.randn(n_samples, 1)
    confounding_vars = rng.randn(n_samples, 2)
    # statsmodels OLS
    ols = OLS(target_var, np.hstack((tested_var, confounding_vars))).fit()
    fvals = ols.f_test([[1., 0., 0.]]).fvalue
    # permuted OLS
    _, orig_scores, _ = permuted_ols(tested_var,
                                     target_var,
                                     confounding_vars,
                                     model_intercept=False,
                                     n_perm=0,
                                     random_state=random_state)
    assert_array_almost_equal(fvals, orig_scores, decimal=6)

    ### Adds intercept
    # permuted OLS
    _, orig_scores_addintercept, _ = permuted_ols(tested_var,
                                                  target_var,
                                                  confounding_vars,
                                                  model_intercept=True,
                                                  n_perm=0,
                                                  random_state=random_state)
    # statsmodels OLS
    confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1))))
    ols = OLS(target_var, np.hstack((tested_var, confounding_vars))).fit()
    fvals_addintercept = ols.f_test([[1., 0., 0., 0.]]).fvalue
    assert_array_almost_equal(fvals_addintercept,
                              orig_scores_addintercept,
                              decimal=6)
Example #18
0
    def _fit(self, dataset):
        self.dataset = dataset
        # Use intercept as explanatory variable
        self.parameters_["tested_vars"] = np.ones(
            (self.inputs_["z_maps"].shape[0], 1))
        self.parameters_["confounding_vars"] = None

        _, t_map, _ = permuted_ols(
            self.parameters_["tested_vars"],
            self.inputs_["z_maps"],
            confounding_vars=self.parameters_["confounding_vars"],
            model_intercept=False,  # modeled by tested_vars
            n_perm=0,
            two_sided_test=self.two_sided,
            random_state=42,
            n_jobs=1,
            verbose=0,
        )

        # Convert t to z, preserving signs
        dof = self.parameters_["tested_vars"].shape[0] - self.parameters_[
            "tested_vars"].shape[1]
        z_map = t_to_z(t_map, dof)
        images = {
            "t": _boolean_unmask(t_map.squeeze(),
                                 self.inputs_["aggressive_mask"]),
            "z": _boolean_unmask(z_map.squeeze(),
                                 self.inputs_["aggressive_mask"]),
        }
        return images
Example #19
0
    def correct_fwe_montecarlo(self, result, n_iters=10000, n_cores=-1):
        """
        Perform FWE correction using the max-value permutation method.
        Only call this method from within a Corrector.

        Parameters
        ----------
        result : :obj:`nimare.results.MetaResult`
            Result object from an ALE meta-analysis.
        n_iters : :obj:`int`, optional
            The number of iterations to run in estimating the null distribution.
            Default is 10000.
        n_cores : :obj:`int`, optional
            Number of cores to use for parallelization.
            If <=0, defaults to using all available cores. Default is -1.

        Returns
        -------
        images : :obj:`dict`
            Dictionary of 1D arrays corresponding to masked images generated by
            the correction procedure. The following arrays are generated by
            this method: 'z_vthresh', 'p_level-voxel', 'z_level-voxel', and
            'logp_level-cluster'.

        See Also
        --------
        nimare.correct.FWECorrector : The Corrector from which to call this method.
        nilearn.mass_univariate.permuted_ols : The function used for this IBMA.

        Examples
        --------
        >>> meta = PermutedOLS()
        >>> result = meta.fit(dset)
        >>> corrector = FWECorrector(method='montecarlo',
                                     n_iters=5, n_cores=1)
        >>> cresult = corrector.transform(result)
        """
        n_cores = self._check_ncores(n_cores)

        log_p_map, t_map, _ = permuted_ols(
            self.parameters_["tested_vars"],
            self.inputs_["z_maps"],
            confounding_vars=self.parameters_["confounding_vars"],
            model_intercept=False,  # modeled by tested_vars
            n_perm=n_iters,
            two_sided_test=self.two_sided,
            random_state=42,
            n_jobs=n_cores,
            verbose=0,
        )

        # Fill complete maps
        p_map = np.power(10.0, -log_p_map)

        # Convert p to z, preserving signs
        sign = np.sign(t_map)
        sign[sign == 0] = 1
        z_map = p_to_z(p_map, tail="two") * sign
        images = {"logp_level-voxel": log_p_map, "z_level-voxel": z_map}
        return images
Example #20
0
def calc_depth_epi_image_from_power(eeg):
    """ Calculate t-map image analagous to Brainstorm demo

    Parameters
    ----------
    seizure : EEG | dict
        baseline and seizure EEG data
    montage : MNE DigMontage
        EEG montage

    Returns
    -------
    ndarray
        t values
    """

    base = eeg['baseline']['ex_power'].T
    seiz = eeg['seizure']['ex_power'].T
    data = np.concatenate((base, seiz))
    size = base.shape[0]
    labels = np.zeros(2 * size, dtype=int)
    labels[size:] = 1
    __, t_scores, _ = permuted_ols(
        labels,
        data,
        # + intercept as a covariate by default
        n_perm=10000,
        two_sided_test=True,
        n_jobs=2)  # can be changed to use more CPUs

    return t_scores
def test_permuted_ols_check_h0_noeffect_signswap(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 100
    # create dummy design with no effect
    target_var = rng.randn(n_samples, 1)
    tested_var = np.ones((n_samples, 1))
    # permuted OLS
    # We check that h0 is close to the theoretical distribution, which is
    # known for this simple design (= t(n_samples - dof)).
    perm_ranges = [10, 100, 1000]  # test various number of permutations
    all_kstest_pvals = []
    # we compute the Mean Squared Error between cumulative Density Function
    # as a proof of consistency of the permutation algorithm
    all_mse = []
    for i, n_perm in enumerate(np.repeat(perm_ranges, 10)):
        pval, orig_scores, h0 = permuted_ols(
            tested_var, target_var, model_intercept=False,
            n_perm=n_perm, two_sided_test=False, random_state=i)
        assert_equal(h0.size, n_perm)
        # Kolmogorov-Smirnov test
        kstest_pval = stats.kstest(h0, stats.t(n_samples).cdf)[1]
        all_kstest_pvals.append(kstest_pval)
        mse = np.mean(
            (stats.t(n_samples).cdf(np.sort(h0))
             - np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
        all_mse.append(mse)
    all_kstest_pvals = np.array(all_kstest_pvals).reshape(
        (len(perm_ranges), -1))
    all_mse = np.array(all_mse).reshape((len(perm_ranges), -1))
    # check that a difference between distributions is not rejected by KS test
    assert_array_less(0.01 / (len(perm_ranges) * 10.), all_kstest_pvals)
    # consistency of the algorithm: the more permutations, the less the MSE
    assert_array_less(np.diff(all_mse.mean(1)), 0)
Example #22
0
def test_permuted_ols_nocovar_warning(random_state=0):
    """Ensure that a warning is raised when a given voxel has all zeros.

    This test also checks that an invalid n_jobs value will raise a ValueError.
    """
    rng = check_random_state(random_state)

    # design parameters
    n_samples = 50
    n_descriptors = 10
    n_regressors = 1

    # create design
    target_var = rng.randn(n_samples, n_descriptors)
    tested_var = rng.randn(n_samples, n_regressors)

    # permuted OLS
    _, own_score, _ = permuted_ols(tested_var,
                                   target_var,
                                   model_intercept=False,
                                   n_perm=100,
                                   random_state=random_state)

    # test with ravelized tested_var
    target_var[:, 0] = 0

    with pytest.warns(UserWarning):
        _, own_score2, _ = permuted_ols(np.ravel(tested_var),
                                        target_var,
                                        model_intercept=False,
                                        n_perm=100,
                                        random_state=random_state)

    assert np.array_equal(own_score[1:], own_score2[1:])

    # Ensure that passing an unacceptable n_jobs value will raise a ValueError
    with pytest.raises(ValueError):
        permuted_ols(
            tested_var,
            target_var,
            model_intercept=False,
            n_perm=100,
            n_jobs=0,  # not allowed
            random_state=random_state,
        )
def test_sided_test(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 100)
    tested_var = rng.randn(n_samples, 1)
    # permuted OLS
    # one-sided
    neg_log_pvals_onesided, _, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        two_sided_test=False, n_perm=100, random_state=random_state)
    # two-sdided
    neg_log_pvals_twosided, _, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        two_sided_test=True, n_perm=100, random_state=random_state)
    assert_equal(
        np.sum(neg_log_pvals_twosided - neg_log_pvals_onesided > 0), 0)
def test_permuted_ols_sklearn_nocovar_multivariate(random_state=0):
    """Test permuted_ols with multiple tested variates and no covariate.

    It is equivalent to fitting several models with only one tested variate.

    """
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    n_regressors = 2
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_var = rng.randn(n_samples, n_regressors)
    # scikit-learn F-scores
    fvals = np.empty((n_targets, n_regressors))
    for i in range(n_targets):
        fvals[i], _ = f_regression(tested_var, target_vars[:, i], center=False)
    # permuted OLS
    _, orig_scores, _ = permuted_ols(tested_var,
                                     target_vars,
                                     model_intercept=False,
                                     n_perm=0,
                                     random_state=random_state)
    assert_array_almost_equal(fvals, orig_scores, decimal=6)

    ### Adds intercept (should be equivalent to centering variates)
    # permuted OLS
    _, orig_scores_addintercept, _ = permuted_ols(tested_var,
                                                  target_vars,
                                                  model_intercept=True,
                                                  n_perm=0,
                                                  random_state=random_state)
    target_vars -= target_vars.mean(0)
    tested_var -= tested_var.mean(0)
    # scikit-learn F-score
    fvals_addintercept = np.empty((n_targets, n_regressors))
    for i in range(n_targets):
        fvals_addintercept[i], _ = f_regression(tested_var,
                                                target_vars[:, i],
                                                center=True)
    assert_array_almost_equal(fvals_addintercept,
                              orig_scores_addintercept,
                              decimal=6)
def test_permuted_ols_intercept_nocovar(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = np.ones((n_samples, 1))
    # compute t-scores with linalg or statmodels
    t_val_ref = get_tvalue_with_alternative_library(tested_var, target_var)
    # permuted OLS
    neg_log_pvals, orig_scores, _ = permuted_ols(
        tested_var, target_var, confounding_vars=None, n_perm=10,
        random_state=random_state)
    assert_array_less(neg_log_pvals, 1.)  # ensure sign swap is correctly done
    # same thing but with model_intercept=True to check it has no effect
    _, orig_scores_addintercept, _ = permuted_ols(
        tested_var, target_var, confounding_vars=None, model_intercept=True,
        n_perm=0, random_state=random_state)
    assert_array_almost_equal(t_val_ref, orig_scores, decimal=6)
    assert_array_almost_equal(orig_scores, orig_scores_addintercept, decimal=6)
def test_permuted_ols_intercept_nocovar_multivariate(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_vars = np.ones((n_samples, 1))
    # compute t-scores with nilearn routine
    ref_scores = get_tvalue_with_alternative_library(tested_vars, target_vars)
    # permuted OLS
    _, own_scores, _ = permuted_ols(
        tested_vars, target_vars, confounding_vars=None, n_perm=0,
        random_state=random_state)
    # same thing but with model_intercept=True to check it has no effect
    _, own_scores_intercept, _ = permuted_ols(
        tested_vars, target_vars, confounding_vars=None, model_intercept=True,
        n_perm=0, random_state=random_state)
    assert_array_almost_equal(ref_scores, own_scores, decimal=6)
    assert_array_almost_equal(own_scores, own_scores_intercept, decimal=6)
def test_permuted_ols_intercept_sklearn_nocovar(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = np.ones((n_samples, 1))
    # scikit-learn F-score
    fvals, _ = f_regression(target_var, tested_var, center=False)
    # permuted OLS
    neg_log_pvals, orig_scores, _ = permuted_ols(
        tested_var, target_var, confounding_vars=None, n_perm=10, random_state=random_state
    )
    assert_array_less(neg_log_pvals, 1.0)  # ensure sign swap is correctly done
    # same thing but with model_intercept=True to check it has no effect
    _, orig_scores_addintercept, _ = permuted_ols(
        tested_var, target_var, confounding_vars=None, model_intercept=True, n_perm=0, random_state=random_state
    )
    assert_array_almost_equal([fvals], orig_scores, decimal=6)
    assert_array_almost_equal(orig_scores, orig_scores_addintercept, decimal=6)
def test_permuted_ols_intercept_statsmodels_withcovar(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = np.ones((n_samples, 1))
    confounding_vars = rng.randn(n_samples, 2)
    # compute t-scores with linalg or statmodels
    ref_scores = get_tvalue_with_alternative_library(tested_var, target_var,
                                                     confounding_vars)
    # permuted OLS
    _, own_scores, _ = permuted_ols(
        tested_var, target_var, confounding_vars, n_perm=0,
        random_state=random_state)
    # same thing but with model_intercept=True to check it has no effect
    _, own_scores_intercept, _ = permuted_ols(
        tested_var, target_var, confounding_vars, model_intercept=True,
        n_perm=0, random_state=random_state)
    assert_array_almost_equal(ref_scores, own_scores, decimal=6)
    assert_array_almost_equal(ref_scores, own_scores_intercept, decimal=6)
def test_permuted_ols_intercept_sklearn_nocovar_multivariate(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_var = np.ones((n_samples, 1))
    # scikit-learn F-scores
    fvals = np.empty((n_targets, 1))
    for i in range(n_targets):
        fvals[i], _ = f_regression(target_vars[:, i], tested_var, center=False)
    # permuted OLS
    _, orig_scores, _ = permuted_ols(
        tested_var, target_vars, confounding_vars=None, n_perm=0, random_state=random_state
    )
    # same thing but with model_intercept=True to check it has no effect
    _, orig_scores_addintercept, _ = permuted_ols(
        tested_var, target_vars, confounding_vars=None, model_intercept=True, n_perm=0, random_state=random_state
    )
    assert_array_almost_equal(fvals, orig_scores, decimal=6)
    assert_array_almost_equal(orig_scores, orig_scores_addintercept, decimal=6)
def test_permuted_ols_sklearn_nocovar(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 1)
    tested_var = rng.randn(n_samples, 1)
    # scikit-learn F-score
    fvals, _ = f_regression(target_var, tested_var, center=False)
    # permuted OLS
    _, orig_scores, _ = permuted_ols(tested_var,
                                     target_var,
                                     model_intercept=False,
                                     n_perm=0,
                                     random_state=random_state)
    assert_array_almost_equal([fvals], orig_scores, decimal=6)

    # test with ravelized tested_var
    _, orig_scores, _ = permuted_ols(np.ravel(tested_var),
                                     target_var,
                                     model_intercept=False,
                                     n_perm=0,
                                     random_state=random_state)
    assert_array_almost_equal([fvals], orig_scores, decimal=6)

    ### Adds intercept (should be equivalent to centering variates)
    # permuted OLS
    _, orig_scores_addintercept, _ = permuted_ols(tested_var,
                                                  target_var,
                                                  model_intercept=True,
                                                  n_perm=0,
                                                  random_state=random_state)
    target_var -= target_var.mean(0)
    tested_var -= tested_var.mean(0)
    # scikit-learn F-score
    fvals_addintercept, _ = f_regression(target_var, tested_var, center=True)
    assert_array_almost_equal([fvals_addintercept],
                              orig_scores_addintercept,
                              decimal=6)
def test_permuted_ols_sklearn_nocovar_multivariate(random_state=0):
    """Test permuted_ols with multiple tested variates and no covariate.

    It is equivalent to fitting several models with only one tested variate.

    """
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    n_targets = 10
    n_regressors = 2
    # create design
    target_vars = rng.randn(n_samples, n_targets)
    tested_var = rng.randn(n_samples, n_regressors)
    # scikit-learn F-scores
    fvals = np.empty((n_targets, n_regressors))
    for i in range(n_targets):
        fvals[i], _ = f_regression(tested_var, target_vars[:, i], center=False)
    # permuted OLS
    _, orig_scores, _ = permuted_ols(
        tested_var, target_vars, model_intercept=False, n_perm=0, random_state=random_state
    )
    assert_array_almost_equal(fvals, orig_scores, decimal=6)

    ### Adds intercept (should be equivalent to centering variates)
    # permuted OLS
    _, orig_scores_addintercept, _ = permuted_ols(
        tested_var, target_vars, model_intercept=True, n_perm=0, random_state=random_state
    )
    target_vars -= target_vars.mean(0)
    tested_var -= tested_var.mean(0)
    # scikit-learn F-score
    fvals_addintercept = np.empty((n_targets, n_regressors))
    for i in range(n_targets):
        fvals_addintercept[i], _ = f_regression(tested_var, target_vars[:, i], center=True)
    assert_array_almost_equal(fvals_addintercept, orig_scores_addintercept, decimal=6)
def test_sided_test(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 100)
    tested_var = rng.randn(n_samples, 1)
    # permuted OLS
    # one-sided
    neg_log_pvals_onesided, _, _ = permuted_ols(tested_var,
                                                target_var,
                                                model_intercept=False,
                                                two_sided_test=False,
                                                n_perm=100,
                                                random_state=random_state)
    # two-sdided
    neg_log_pvals_twosided, _, _ = permuted_ols(tested_var,
                                                target_var,
                                                model_intercept=False,
                                                two_sided_test=True,
                                                n_perm=100,
                                                random_state=random_state)
    assert_equal(np.sum(neg_log_pvals_twosided - neg_log_pvals_onesided > 0),
                 0)
def test_sided_test(random_state=0):
    """Check that a positive effect is always better recovered with one-sided.
    """
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 50
    # create design
    target_var = rng.randn(n_samples, 100)
    tested_var = rng.randn(n_samples, 1)
    # permuted OLS
    # one-sided
    neg_log_pvals_onesided, _, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        two_sided_test=False, n_perm=100, random_state=random_state)
    # two-sided
    neg_log_pvals_twosided, _, _ = permuted_ols(
        tested_var, target_var, model_intercept=False,
        two_sided_test=True, n_perm=100, random_state=random_state)

    positive_effect_location = neg_log_pvals_onesided > 1
    assert_equal(
        np.sum(neg_log_pvals_twosided[positive_effect_location]
               - neg_log_pvals_onesided[positive_effect_location] > 0),
        0)
Example #34
0
def peaks2maps_workflow(sleuth_file,
                        output_dir=None,
                        prefix=None,
                        n_iters=10000):
    """Run the peaks2maps workflow.

    .. deprecated:: 0.0.11
        `peaks2maps_workflow` will be removed in NiMARE 0.0.13.

    """
    LGR.info("Loading coordinates...")
    dset = convert_sleuth_to_dataset(sleuth_file)

    LGR.info("Reconstructing unthresholded maps...")
    k = Peaks2MapsKernel(resample_to_mask=False)
    imgs = k.transform(dset, return_type="image")

    mask_img = resample_to_img(dset.mask, imgs[0], interpolation="nearest")
    z_data = apply_mask(imgs, mask_img)

    LGR.info("Estimating the null distribution...")
    log_p_map, t_map, _ = permuted_ols(
        np.ones((z_data.shape[0], 1)),
        z_data,
        confounding_vars=None,
        model_intercept=False,  # modeled by tested_vars
        n_perm=n_iters,
        two_sided_test=True,
        random_state=42,
        n_jobs=1,
        verbose=0,
    )
    res = {"logp": log_p_map, "t": t_map}

    res = MetaResult(permuted_ols, maps=res, mask=mask_img)

    if output_dir is None:
        output_dir = os.path.dirname(os.path.abspath(sleuth_file))
    else:
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)

    if prefix is None:
        base = os.path.basename(sleuth_file)
        prefix, _ = os.path.splitext(base)
        prefix += "_"

    LGR.info("Saving output maps...")
    res.save_maps(output_dir=output_dir, prefix=prefix)
Example #35
0
def calc_epi_image_from_power(eeg, mri, D=3, dt=0.2):
    """Create source image from power data

    Parameters
    ----------
    seizure : EEG | dict
        eeg data
    mri : string
        path to file containing MRI

    Returns
    -------
    Nifti1Image
        Image where voxel values represent corresponding t-values
    """
    eeg['baseline']['img'], eeg['seizure']['img'] = \
        map_seeg_data(eeg, mri)
    base_img = eeg['baseline']['img']
    seiz_img = eeg['seizure']['img']

    nifti_masker = NiftiMasker()
    base_masked = nifti_masker.fit_transform(base_img)
    seiz_masked = nifti_masker.transform(seiz_img)
    data = np.concatenate((base_masked, seiz_masked))
    steps = int(D / dt)
    labels = np.zeros(2 * steps, dtype=(int))
    labels[steps:] = 1
    __, t_scores, _ = permuted_ols(
        labels,
        data,
        # + intercept as a covariate by default
        n_perm=10000,
        two_sided_test=True,
        n_jobs=2)  # can be changed to use more CPUs

    return nifti_masker.inverse_transform(t_scores)
    img[np.isnan(img)] = 0.
    nonnan_images.append(nibabel.Nifti1Image(img, ref_affine))
print "Nifti masker"
# remove features with zero between-subject variance
images_masked = nifti_masker.fit_transform(images)
images_masked[:, images_masked.var(0) < 0.01] = 0.
# final masking
new_images = nifti_masker.inverse_transform(images_masked)
images_masked = nifti_masker.fit_transform(new_images)
n_samples, n_features = images_masked.shape
print n_samples, "subjects, ", n_features, "features"

### Perform massively univariate analysis with permuted OLS ###################
print "Massively univariate model"
neg_log_pvals, all_scores, _ = permuted_ols(
    cdr, images_masked, age,  # + intercept as a covariate by default
    n_perm=1000,
    n_jobs=-1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals).get_data()[..., 0]

### Show results
print "Plotting results"
# background anat
mean_anat = smooth_img(images[0], FWHM).get_data()
for img in images[1:]:
    mean_anat += smooth_img(img, FWHM).get_data()
mean_anat /= float(len(images))
ref_img = nibabel.load(images[0])
picked_slice = 36
vmin = -np.log10(0.1)  # 10% corrected
plt.figure(figsize=(5, 4))
fmri_masked = nifti_masker.fit_transform(contrast_map_filenames)

### Anova (parametric F-scores) ###############################################
from nilearn._utils.fixes import f_regression
_, pvals_anova = f_regression(fmri_masked, tested_var, center=True)
pvals_anova *= fmri_masked.shape[1]
pvals_anova[np.isnan(pvals_anova)] = 1
pvals_anova[pvals_anova > 1] = 1
neg_log_pvals_anova = - np.log10(pvals_anova)
neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_anova)

### Perform massively univariate analysis with permuted OLS ###################
neg_log_pvals_permuted_ols, _, _ = permuted_ols(
    tested_var, fmri_masked,
    model_intercept=True,
    n_perm=5000,  # 5,000 for the sake of time. Idealy, this should be 10,000
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_permuted_ols_unmasked = nifti_masker.inverse_transform(
    np.ravel(neg_log_pvals_permuted_ols))

### Visualization #############################################################
from nilearn.plotting import plot_stat_map

# Various plotting parameters
z_slice = 12  # plotted slice
from nilearn.image.resampling import coord_transform
affine = neg_log_pvals_anova_unmasked.get_affine()
_, _, k_slice = coord_transform(0, 0, z_slice,
                                linalg.inv(affine))
k_slice = round(k_slice)
# remove features with too low between-subject variance
gm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)
gm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0.
# final masking
new_images = nifti_masker.inverse_transform(gm_maps_masked)
gm_maps_masked = nifti_masker.fit_transform(new_images)
n_samples, n_features = gm_maps_masked.shape
print n_samples, "subjects, ", n_features, "features"

### Inference with massively univariate model #################################
from nilearn.mass_univariate import permuted_ols

print "Massively univariate model"
neg_log_pvals, all_scores, _ = permuted_ols(
    age,
    gm_maps_masked,  # + intercept as a covariate by default
    n_perm=1000,  # 1,000 for the sake of time. 10,000 is recommended
    two_sided_test=False,  # RPBI does not perform a two-sided test
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    np.ravel(neg_log_pvals))

### Show results
from nilearn.plotting import plot_stat_map
bg_filename = gray_matter_map_filenames[0]
z_slice = 0
from nilearn.image.resampling import coord_transform
affine = neg_log_pvals_unmasked.get_affine()
_, _, k_slice = coord_transform(0, 0, z_slice, linalg.inv(affine))
k_slice = round(k_slice)
threshold = -np.log10(0.1)  # 10% corrected
Example #39
0
def non_parametric_inference(
        second_level_input, confounds=None, design_matrix=None,
        second_level_contrast=None, mask=None, smoothing_fwhm=None,
        model_intercept=True, n_perm=10000, two_sided_test=False,
        random_state=None, n_jobs=1, verbose=0):
    """Generate p-values corresponding to the contrasts provided
    based on permutation testing. This fuction reuses the 'permuted_ols'
    function Nilearn.

    Parameters
    ----------
    second_level_input: pandas DataFrame or list of Niimg-like objects.

        If a pandas DataFrame, then they have to contain subject_label,
        map_name and effects_map_path. It can contain multiple maps that
        would be selected during contrast estimation with the argument
        first_level_contrast of the compute_contrast function. The
        DataFrame will be sorted based on the subject_label column to avoid
        order inconsistencies when extracting the maps. So the rows of the
        automatically computed design matrix, if not provided, will
        correspond to the sorted subject_label column.

        If list of Niimg-like objects then this is taken literally as Y
        for the model fit and design_matrix must be provided.

    confounds: pandas DataFrame, optional
        Must contain a subject_label column. All other columns are
        considered as confounds and included in the model. If
        design_matrix is provided then this argument is ignored.
        The resulting second level design matrix uses the same column
        names as in the given DataFrame for confounds. At least two columns
        are expected, "subject_label" and at least one confound.

    design_matrix: pandas DataFrame, optional
        Design matrix to fit the GLM. The number of rows
        in the design matrix must agree with the number of maps derived
        from second_level_input.
        Ensure that the order of maps given by a second_level_input
        list of Niimgs matches the order of the rows in the design matrix.

    second_level_contrast: str or array of shape (n_col), optional
        Where ``n_col`` is the number of columns of the design matrix.
        The default (None) is accepted if the design matrix has a single
        column, in which case the only possible contrast array([1]) is
        applied; when the design matrix has multiple columns, an error is
        raised.

    mask: Niimg-like, NiftiMasker or MultiNiftiMasker object, optional,
        Mask to be used on data. If an instance of masker is passed,
        then its mask will be used. If no mask is given,
        it will be computed automatically by a MultiNiftiMasker with default
        parameters. Automatic mask computation assumes first level imgs have
        already been masked.

    smoothing_fwhm: float, optional
        If smoothing_fwhm is not None, it gives the size in millimeters of the
        spatial smoothing to apply to the signal.

    model_intercept : bool,
      If True, a constant column is added to the confounding variates
      unless the tested variate is already the intercept.

    n_perm : int,
      Number of permutations to perform.
      Permutations are costly but the more are performed, the more precision
      one gets in the p-values estimation.

    two_sided_test : boolean,
      If True, performs an unsigned t-test. Both positive and negative
      effects are considered; the null hypothesis is that the effect is zero.
      If False, only positive effects are considered as relevant. The null
      hypothesis is that the effect is zero or negative.

    random_state : int or None,
      Seed for random number generator, to have the same permutations
      in each computing units.

    n_jobs : int,
      Number of parallel workers.
      If -1 is provided, all CPUs are used.
      A negative number indicates that all the CPUs except (abs(n_jobs) - 1)
      ones will be used.

    verbose: int, optional
        verbosity level (0 means no message).

    Returns
    -------
    neg_log_corrected_pvals_img: Nifti1Image
        The image which contains negative logarithm of the
        corrected p-values
    """
    _check_second_level_input(second_level_input, design_matrix,
                              flm_object=False, df_object=False)
    _check_confounds(confounds)
    _check_design_matrix(design_matrix)

    # Report progress
    t0 = time.time()
    if verbose > 0:
        sys.stderr.write("Fitting second level model...")

    # Select sample map for masker fit and get subjects_label for design
    sample_map = mean_img(second_level_input)

    # Learn the mask. Assume the first level imgs have been masked.
    if not isinstance(mask, NiftiMasker):
        masker = NiftiMasker(
            mask_img=mask, smoothing_fwhm=smoothing_fwhm,
            memory=Memory(None), verbose=max(0, verbose - 1),
            memory_level=1)
    else:
        masker = clone(mask)
        if smoothing_fwhm is not None:
            if getattr(masker, 'smoothing_fwhm') is not None:
                warn('Parameter smoothing_fwhm of the masker overriden')
                setattr(masker, 'smoothing_fwhm', smoothing_fwhm)
    masker.fit(sample_map)

    # Report progress
    if verbose > 0:
        sys.stderr.write("\nComputation of second level model done in "
                         "%i seconds\n" % (time.time() - t0))

    # Check and obtain the contrast
    contrast = _get_contrast(second_level_contrast, design_matrix)

    # Get effect_maps
    effect_maps = _infer_effect_maps(second_level_input, None)

    # Check design matrix and effect maps agree on number of rows
    _check_effect_maps(effect_maps, design_matrix)

    # Obtain tested_var
    if contrast in design_matrix.columns.tolist():
        tested_var = np.asarray(design_matrix[contrast])

    # Mask data
    target_vars = masker.transform(effect_maps)

    # Perform massively univariate analysis with permuted OLS
    neg_log_pvals_permuted_ols, _, _ = permuted_ols(
        tested_var, target_vars, model_intercept=model_intercept,
        n_perm=n_perm, two_sided_test=two_sided_test,
        random_state=random_state, n_jobs=n_jobs, verbose=max(0, verbose - 1))
    neg_log_corrected_pvals_img = masker.inverse_transform(
        np.ravel(neg_log_pvals_permuted_ols))

    return neg_log_corrected_pvals_img
Example #40
0
from sklearn.cross_validation import cross_val_score
cv_scores = cross_val_score(anova_svr, gm_maps_masked, age)

### Return the corresponding mean prediction accuracy
prediction_accuracy = np.mean(cv_scores)
print "=== ANOVA ==="
print "Prediction accuracy: %f" % prediction_accuracy
print

### Inference with massively univariate model #################################
print "Massively univariate model"

### Statistical inference
from nilearn.mass_univariate import permuted_ols
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
    age, gm_maps_masked,  # + intercept as a covariate by default
    n_perm=1000,  # 1,000 in the interest of time; 10000 would be better
    n_jobs=1)  # can be changed to use more CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    signed_neg_log_pvals).get_data()

### Show results
# background anat
plt.figure(figsize=(5.5, 5.5))
vmin = -np.log10(0.1)  # 10% corrected
vmax = np.amax(neg_log_pvals)
masked_pvals = np.ma.masked_inside(signed_neg_log_pvals_unmasked,
                                   -vmin, vmin)[..., 0]
print '\n%d detections' % (~masked_pvals.mask[..., picked_slice]).sum()
plt.imshow(np.rot90(background_img[:, :, picked_slice]),
           interpolation='nearest', cmap=plt.cm.gray, vmin=0., vmax=1.)
Example #41
0
 for gr in groups:
     gr1_idx = data[data.DX_Group == gr[0]].index.values
     gr2_idx = data[data.DX_Group == gr[1]].index.values
 
     gr1_f = x[gr1_idx, :]
     gr2_f = x[gr2_idx, :]
     tval_clustered, pval_clustered = stats.ttest_ind(gr1_f, gr2_f)
     pval_clustered = - np.log10(pval_clustered)
 
     gr_idx = np.hstack([gr1_idx, gr2_idx])
     gr_f = x[gr_idx, :]
     gr_labels = np.vstack([np.hstack([[1]*len(gr1_idx), [0]*len(gr2_idx)]),
                            np.hstack([[0]*len(gr1_idx), [1]*len(gr2_idx)])]).T
 
     p_clustered, t_clustered, _ = permuted_ols(gr_labels, gr_f,
                                                n_perm=1000, n_jobs=4,
                                                model_intercept=True)
 
     t_masked = np.zeros(pet_data_masked.shape[1])
     p_masked = np.zeros(pet_data_masked.shape[1])
     pval_masked = np.zeros(pet_data_masked.shape[1])
     for val in mbk_means_labels_unique:
         t_masked[(mbk_means_labels == val)] = t_clustered[0, val]
         p_masked[(mbk_means_labels == val)] = p_clustered[0, val]
         pval_masked[(mbk_means_labels == val)] = pval_clustered[val]
 
     tmap = masker.inverse_transform(t_masked)
     pmap = masker.inverse_transform(p_masked)
     pvalmap = masker.inverse_transform(pval_masked)
     header = pmap.get_header()
     header['aux_file'] = 'Hot'
# localizer_dataset = datasets.fetch_localizer_calculation_task(
#     n_subjects=n_samples)
localizer_dataset = datasets.fetch_localizer_contrasts(
    ["calculation vs sentences"],
    n_subjects=n_samples)

### Mask data #################################################################
nifti_masker = NiftiMasker(
    memory='nilearn_cache', memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(localizer_dataset.cmaps)

### Perform massively univariate analysis with permuted OLS ###################
tested_var = np.ones((n_samples, 1), dtype=float)  # intercept
neg_log_pvals, all_scores, h0 = permuted_ols(
    tested_var, fmri_masked, model_intercept=False,
    n_perm=5000,  # 5,000 for the sake of time. 10,000 is recommended
    two_sided_test=False,  # RPBI does not perform a two-sided test
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    np.ravel(neg_log_pvals))

### Randomized Parcellation Based Inference ###################################
neg_log_pvals_rpbi, _, _ = randomized_parcellation_based_inference(
    tested_var, fmri_masked,
    np.asarray(nifti_masker.mask_img_.get_data()).astype(bool),
    n_parcellations=30,  # 30 for the sake of time, 100 is recommended
    n_parcels=1000,
    threshold='auto',
    n_perm=5000,  # 5,000 for the sake of time. 10,000 is recommended
    random_state=0, memory='nilearn_cache', n_jobs=1, verbose=True)
neg_log_pvals_rpbi_unmasked = nifti_masker.inverse_transform(
nifti_masker = NiftiMasker(
    mask=dataset_files.mask,
    memory='nilearn_cache', memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(dataset_files.func)

### Restrict to faces and houses ##############################################
conditions_encoded, _ = np.loadtxt(
    dataset_files.session_target).astype("int").T
conditions = np.recfromtxt(dataset_files.conditions_target)['f0']
condition_mask = np.logical_or(conditions == 'face', conditions == 'house')
conditions_encoded = conditions_encoded[condition_mask]
fmri_masked = fmri_masked[condition_mask]

### Perform massively univariate analysis with permuted OLS ###################
neg_log_pvals, all_scores, _ = permuted_ols(
    conditions_encoded, fmri_masked,  # + intercept as a covariate by default
    n_perm=10000,
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals).get_data()

### scikit-learn F-scores for comparison ######################################
from nilearn._utils.fixes import f_regression
_, pvals_bonferroni = f_regression(
    fmri_masked, conditions_encoded)  # f_regression implicitly adds intercept
pvals_bonferroni *= fmri_masked.shape[1]
pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1
pvals_bonferroni[pvals_bonferroni > 1] = 1
neg_log_pvals_bonferroni = -np.log10(pvals_bonferroni)
neg_log_pvals_bonferroni_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_bonferroni).get_data()
    gr1_idx = data[data.DX_Group == gr[0]].index.values
    gr2_idx = data[data.DX_Group == gr[1]].index.values

    gr1_f = pet_masked[gr1_idx, :]
    gr2_f = pet_masked[gr2_idx, :]

    gr_idx = np.hstack([gr1_idx, gr2_idx])
    gr_f = pet_masked[gr_idx, :]
    gr_labels = np.vstack([np.hstack([[1]*len(gr1_idx), [0]*len(gr2_idx)]),
                           np.hstack([[0]*len(gr1_idx), [1]*len(gr2_idx)])]).T

    test_var = np.hstack([[1]*len(gr1_idx), [0]*len(gr2_idx)])


    neg_log_pvals, t_scores, _ = permuted_ols(gr_labels, gr_f,
                                              n_perm=1000, n_jobs=6,
                                              model_intercept=True)

    print('RPBI')
    neg_log_pvals_rpbi, _, _ = randomized_parcellation_based_inference(
    test_var, gr_f,  # + intercept as a covariate by default
    np.asarray(masker.mask_img_.get_data()).astype(bool),
    n_parcellations=30,  # 30 for the sake of time, 100 is recommended
    n_parcels=1000,
    threshold='auto',
    n_perm=1000,  # 1,000 for the sake of time. 10,000 is recommended
    n_jobs=6, verbose=100)
    neg_log_pvals_rpbi_unmasked = masker.inverse_transform(
        np.ravel(neg_log_pvals_rpbi))

    tscore = masker.inverse_transform(t_scores[0])
Example #45
0
from sklearn.cross_validation import cross_val_score
cv_scores = cross_val_score(anova_svr, gm_maps_masked, age)

### Return the corresponding mean prediction accuracy
prediction_accuracy = np.mean(cv_scores)
print "=== ANOVA ==="
print "Prediction accuracy: %f" % prediction_accuracy
print

### Inference with massively univariate model #################################
print "Massively univariate model"

### Statistical inference
from nilearn.mass_univariate import permuted_ols
neg_log_pvals, all_scores, _ = permuted_ols(
    age, gm_maps_masked,  # + intercept as a covariate by default
    n_perm=5000,  # In the interest of time; 10000 would be better
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals).get_data()[..., 0]

### Show results
# background anat
plt.figure(figsize=(5, 5))
vmin = -np.log10(0.1)  # 10% corrected
masked_pvals = np.ma.masked_less(neg_log_pvals_unmasked, vmin)
print '\n%d detections' % (~masked_pvals.mask[..., picked_slice]).sum()
plt.imshow(np.rot90(background_img[:, :, picked_slice]),
           interpolation='nearest', cmap=plt.cm.gray, vmin=0., vmax=1.)
im = plt.imshow(np.rot90(masked_pvals[:, :, picked_slice]),
                interpolation='nearest', cmap=plt.cm.autumn,
                vmin=vmin, vmax=np.amax(neg_log_pvals_unmasked))
def test_permuted_ols_check_h0_noeffect_labelswap(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 100
    # create dummy design with no effect
    target_var = rng.randn(n_samples, 1)
    tested_var = np.arange(n_samples).reshape((-1, 1))
    tested_var_not_centered = tested_var.copy()
    tested_var -= tested_var.mean(0)  # centered
    # permuted OLS
    # We check that h0 is close to the theoretical distribution, which is
    # known for this simple design (= t(n_samples - dof)).
    perm_ranges = [10, 100, 1000]  # test various number of permutations
    # we use two models (with and without intercept modelling)
    all_kstest_pvals = []
    all_kstest_pvals_intercept = []
    all_kstest_pvals_intercept2 = []
    # we compute the Mean Squared Error between cumulative Density Function
    # as a proof of consistency of the permutation algorithm
    all_mse = []
    all_mse_intercept = []
    all_mse_intercept2 = []
    for i, n_perm in enumerate(np.repeat(perm_ranges, 10)):
        ### Case no. 1: no intercept in the model
        pval, orig_scores, h0 = permuted_ols(
            tested_var, target_var, model_intercept=False,
            n_perm=n_perm, two_sided_test=False, random_state=i)
        assert_equal(h0.size, n_perm)
        # Kolmogorov-Smirnov test
        kstest_pval = stats.kstest(h0, stats.t(n_samples - 1).cdf)[1]
        all_kstest_pvals.append(kstest_pval)
        mse = np.mean(
            (stats.t(n_samples - 1).cdf(np.sort(h0))
             - np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
        all_mse.append(mse)
        ### Case no. 2: intercept in the model
        pval, orig_scores, h0 = permuted_ols(
            tested_var, target_var, model_intercept=True,
            n_perm=n_perm, two_sided_test=False, random_state=i)
        assert_array_less(pval, 1.)  # pval should not be significant
        # Kolmogorov-Smirnov test
        kstest_pval = stats.kstest(h0, stats.t(n_samples - 2).cdf)[1]
        all_kstest_pvals_intercept.append(kstest_pval)
        mse = np.mean(
            (stats.t(n_samples - 2).cdf(np.sort(h0))
             - np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
        all_mse_intercept.append(mse)
        ### Case no. 3: intercept in the model, no centering of tested vars
        pval, orig_scores, h0 = permuted_ols(
            tested_var_not_centered, target_var, model_intercept=True,
            n_perm=n_perm, two_sided_test=False, random_state=i)
        assert_array_less(pval, 1.)  # pval should not be significant
        # Kolmogorov-Smirnov test
        kstest_pval = stats.kstest(h0, stats.t(n_samples - 2).cdf)[1]
        all_kstest_pvals_intercept2.append(kstest_pval)
        mse = np.mean(
            (stats.t(n_samples - 2).cdf(np.sort(h0))
             - np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
        all_mse_intercept2.append(mse)
    all_kstest_pvals = np.array(all_kstest_pvals).reshape(
        (len(perm_ranges), -1))
    all_kstest_pvals_intercept = np.array(all_kstest_pvals_intercept).reshape(
        (len(perm_ranges), -1))
    all_mse = np.array(all_mse).reshape((len(perm_ranges), -1))
    all_mse_intercept = np.array(all_mse_intercept).reshape(
        (len(perm_ranges), -1))
    all_mse_intercept2 = np.array(all_mse_intercept2).reshape(
        (len(perm_ranges), -1))
    # check that a difference between distributions is not rejected by KS test
    assert_array_less(0.01, all_kstest_pvals)
    assert_array_less(0.01, all_kstest_pvals_intercept)
    assert_array_less(0.01, all_kstest_pvals_intercept2)
    # consistency of the algorithm: the more permutations, the less the MSE
    assert_array_less(np.diff(all_mse.mean(1)), 0)
    assert_array_less(np.diff(all_mse_intercept.mean(1)), 0)
    assert_array_less(np.diff(all_mse_intercept2.mean(1)), 0)
    grouped_fmri_masked[2 * s] = fmri_masked[session_house_mask].mean(0)
    grouped_fmri_masked[2 * s + 1] = fmri_masked[session_face_mask].mean(0)
    grouped_conditions_encoded[2 * s] = conditions_encoded[
        session_house_mask][0]
    grouped_conditions_encoded[2 * s + 1] = conditions_encoded[
        session_face_mask][0]

##############################################################################
# Perform massively univariate analysis with permuted OLS
#
# We use a two-sided t-test to compute p-values, but we keep trace of the
# effect sign to add it back at the end and thus observe the signed effect
from nilearn.mass_univariate import permuted_ols
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
    grouped_conditions_encoded, grouped_fmri_masked,
    # + intercept as a covariate by default
    n_perm=10000, two_sided_test=True,
    n_jobs=1)  # can be changed to use more CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    signed_neg_log_pvals)

##############################################################################
# scikit-learn F-scores for comparison
#
# F-test does not allow to observe the effect sign (pure two-sided test)
from nilearn._utils.fixes import f_regression
_, pvals_bonferroni = f_regression(
    grouped_fmri_masked,
    grouped_conditions_encoded)  # f_regression implicitly adds intercept
pvals_bonferroni *= fmri_masked.shape[1]
                                        conditions[condition_mask] == 'house')
    session_face_mask = np.logical_and(session_mask,
                                       conditions[condition_mask] == 'face')
    grouped_fmri_masked[2 * s] = fmri_masked[session_house_mask].mean(0)
    grouped_fmri_masked[2 * s + 1] = fmri_masked[session_face_mask].mean(0)
    grouped_conditions_encoded[2 * s] = conditions_encoded[
        session_house_mask][0]
    grouped_conditions_encoded[2 * s + 1] = conditions_encoded[
        session_face_mask][0]

### Perform massively univariate analysis with permuted OLS ###################
# We use a two-sided t-test to compute p-values, but we keep trace of the
# effect sign to add it back at the end and thus observe the signed effect
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
    grouped_conditions_encoded, grouped_fmri_masked,
    # + intercept as a covariate by default
    n_perm=5000,  # 5,000 for the sake of time. 10,000 is recommended
    two_sided_test=False,  # RPBI does not perform a two-sided test
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals)

### Randomized Parcellation Based Inference ###################################
neg_log_pvals_rpbi, _, _ = randomized_parcellation_based_inference(
    grouped_conditions_encoded, grouped_fmri_masked,
    # + intercept as a covariate by default
    nifti_masker.mask_img_.get_data().astype(bool),
    n_parcellations=30,  # 30 for the sake of time, 100 is recommended
    n_parcels=1000,
    threshold='auto',
    n_perm=5000,  # 5,000 for the sake of time. 10,000 is recommended
    random_state=0, memory='nilearn_cache',
Example #49
0
def conperm_workflow(contrast_images,
                     mask_image=None,
                     output_dir=None,
                     prefix="",
                     n_iters=10000):
    """
    Contrast permutation workflow.
    """
    if mask_image is None:
        target = "mni152_2mm"
        mask_image = get_template(target, mask="brain")

    n_studies = len(contrast_images)
    LGR.info("Loading contrast maps...")
    z_data = apply_mask(contrast_images, mask_image)

    boilerplate = """
A contrast permutation analysis was performed on a sample of {n_studies}
images. A brain mask derived from the MNI 152 template (Fonov et al., 2009;
Fonov et al., 2011) was applied at 2x2x2mm resolution. The sign flipping
method used was implemented as described in Maumet & Nichols (2016), with
{n_iters} iterations used to estimate the null distribution.

References
----------
- Fonov, V., Evans, A. C., Botteron, K., Almli, C. R., McKinstry, R. C.,
Collins, D. L., & Brain Development Cooperative Group. (2011).
Unbiased average age-appropriate atlases for pediatric studies.
Neuroimage, 54(1), 313-327.
- Fonov, V. S., Evans, A. C., McKinstry, R. C., Almli, C. R., & Collins, D. L.
(2009). Unbiased nonlinear average age-appropriate brain templates from birth
to adulthood. NeuroImage, (47), S102.
- Maumet, C., & Nichols, T. E. (2016). Minimal Data Needed for Valid & Accurate
Image-Based fMRI Meta-Analysis. https://doi.org/10.1101/048249
    """

    LGR.info("Performing meta-analysis.")
    log_p_map, t_map, _ = permuted_ols(
        np.ones((z_data.shape[0], 1)),
        z_data,
        confounding_vars=None,
        model_intercept=False,  # modeled by tested_vars
        n_perm=n_iters,
        two_sided_test=True,
        random_state=42,
        n_jobs=1,
        verbose=0,
    )
    res = {
        "logp": log_p_map,
        "t": t_map,
    }
    # The t_test function will stand in for the Estimator in the results object
    res = MetaResult(permuted_ols, mask_image, maps=res)

    boilerplate = boilerplate.format(n_studies=n_studies, n_iters=n_iters)

    if output_dir is None:
        output_dir = os.getcwd()
    else:
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)

    LGR.info("Saving output maps...")
    res.save_maps(output_dir=output_dir, prefix=prefix)
    LGR.info("Workflow completed.")
    LGR.info(boilerplate)
Example #50
0
plt.xlabel("subject")
plt.legend(loc="best")

###############################################################################
# Inference with massively univariate model
# -----------------------------------------
print("Massively univariate model")

gm_maps_masked = NiftiMasker().fit_transform(gray_matter_map_filenames)
data = variance_threshold.fit_transform(gm_maps_masked)

# Statistical inference
from nilearn.mass_univariate import permuted_ols
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
    age,
    data,  # + intercept as a covariate by default
    n_perm=2000,  # 1,000 in the interest of time; 10000 would be better
    n_jobs=1)  # can be changed to use more CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    variance_threshold.inverse_transform(signed_neg_log_pvals))

# Show results
threshold = -np.log10(0.1)  # 10% corrected

fig = plt.figure(figsize=(5.5, 7.5), facecolor='k')

display = plot_stat_map(signed_neg_log_pvals_unmasked,
                        bg_img=bg_filename,
                        threshold=threshold,
                        cmap=plt.cm.RdBu_r,
##############################################################################
# Anova (parametric F-scores)
from sklearn.feature_selection import f_regression
_, pvals_anova = f_regression(fmri_masked, tested_var, center=True)
pvals_anova *= fmri_masked.shape[1]
pvals_anova[np.isnan(pvals_anova)] = 1
pvals_anova[pvals_anova > 1] = 1
neg_log_pvals_anova = -np.log10(pvals_anova)
neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_anova)

##############################################################################
# Perform massively univariate analysis with permuted OLS
neg_log_pvals_permuted_ols, _, _ = permuted_ols(
    tested_var,
    fmri_masked,
    model_intercept=True,
    n_perm=5000,  # 5,000 for the sake of time. Idealy, this should be 10,000
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_permuted_ols_unmasked = nifti_masker.inverse_transform(
    np.ravel(neg_log_pvals_permuted_ols))

##############################################################################
# Visualization
from nilearn.plotting import plot_stat_map, show

# Various plotting parameters
z_slice = 12  # plotted slice

threshold = -np.log10(0.1)  # 10% corrected
vmax = min(np.amax(neg_log_pvals_permuted_ols), np.amax(neg_log_pvals_anova))
                                        conditions[condition_mask] == 'house')
    session_face_mask = np.logical_and(session_mask,
                                       conditions[condition_mask] == 'face')
    grouped_fmri_masked[2 * s] = fmri_masked[session_house_mask].mean(0)
    grouped_fmri_masked[2 * s + 1] = fmri_masked[session_face_mask].mean(0)
    grouped_conditions_encoded[2 * s] = conditions_encoded[
        session_house_mask][0]
    grouped_conditions_encoded[2 * s + 1] = conditions_encoded[
        session_face_mask][0]

### Perform massively univariate analysis with permuted OLS ###################
# We use a two-sided t-test to compute p-values, but we keep trace of the
# effect sign to add it back at the end and thus observe the signed effect
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
    grouped_conditions_encoded, grouped_fmri_masked,
    # + intercept as a covariate by default
    n_perm=10000, two_sided_test=True,
    n_jobs=1)  # can be changed to use more CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    signed_neg_log_pvals).get_data()

### scikit-learn F-scores for comparison ######################################
# F-test does not allow to observe the effect sign (pure two-sided test)
from nilearn._utils.fixes import f_regression
_, pvals_bonferroni = f_regression(
    grouped_fmri_masked,
    grouped_conditions_encoded)  # f_regression implicitly adds intercept
pvals_bonferroni *= fmri_masked.shape[1]
pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1
pvals_bonferroni[pvals_bonferroni > 1] = 1
                           memory='nilearn_cache',
                           memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(dataset_files.func)

### Restrict to faces and houses ##############################################
conditions_encoded, _ = np.loadtxt(
    dataset_files.session_target).astype("int").T
conditions = np.recfromtxt(dataset_files.conditions_target)['f0']
condition_mask = np.logical_or(conditions == 'face', conditions == 'house')
conditions_encoded = conditions_encoded[condition_mask]
fmri_masked = fmri_masked[condition_mask]

### Perform massively univariate analysis with permuted OLS ###################
neg_log_pvals, all_scores, _ = permuted_ols(
    conditions_encoded,
    fmri_masked,  # + intercept as a covariate by default
    n_perm=10000,
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals).get_data()

### scikit-learn F-scores for comparison ######################################
from nilearn._utils.fixes import f_regression
_, pvals_bonferroni = f_regression(
    fmri_masked, conditions_encoded)  # f_regression implicitly adds intercept
pvals_bonferroni *= fmri_masked.shape[1]
pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1
pvals_bonferroni[pvals_bonferroni > 1] = 1
neg_log_pvals_bonferroni = -np.log10(pvals_bonferroni)
neg_log_pvals_bonferroni_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_bonferroni).get_data()
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols

### Load Localizer motor contrast #############################################
n_samples = 20
dataset_files = datasets.fetch_localizer_calculation_task(n_subjects=n_samples)

### Mask data #################################################################
nifti_masker = NiftiMasker(
    memory='nilearn_cache', memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(dataset_files.cmaps)

### Perform massively univariate analysis with permuted OLS ###################
tested_var = np.ones((n_samples, 1), dtype=float)  # intercept
neg_log_pvals, all_scores, h0 = permuted_ols(
    tested_var, fmri_masked, model_intercept=False,
    n_perm=10000,
    n_jobs=1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    np.ravel(neg_log_pvals))

### scikit-learn F-scores for comparison ######################################
from nilearn._utils.fixes import f_regression
_, pvals_bonferroni = f_regression(fmri_masked, tested_var, center=False)
pvals_bonferroni *= fmri_masked.shape[1]
pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1
pvals_bonferroni[pvals_bonferroni > 1] = 1
neg_log_pvals_bonferroni = - np.log10(pvals_bonferroni)
neg_log_pvals_bonferroni_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_bonferroni)

### Visualization #############################################################