示例#1
0
def test_permtest_pearsonr():
    np.random.seed(12345678)
    x, y = datasets.make_correlated_xy(corr=0.1, size=100)
    r, p = stats.permtest_pearsonr(x, y)
    assert np.allclose([r, p], [0.10032564626876286, 0.3046953046953047])

    x, y = datasets.make_correlated_xy(corr=0.5, size=100)
    r, p = stats.permtest_pearsonr(x, y)
    assert np.allclose([r, p], [0.500040365781984, 0.000999000999000999])

    z = x + np.random.normal(loc=1, size=100)
    r, p = stats.permtest_pearsonr(x, np.column_stack([y, z]))
    assert np.allclose(r, np.array([0.50004037, 0.25843187]))
    assert np.allclose(p, np.array([0.000999, 0.01098901]))

    a, b = datasets.make_correlated_xy(corr=0.9, size=100)
    r, p = stats.permtest_pearsonr(np.column_stack([x, a]),
                                   np.column_stack([y, b]))
    assert np.allclose(r, np.array([0.50004037, 0.89927523]))
    assert np.allclose(p, np.array([0.000999, 0.000999]))
示例#2
0
# (i.e., only 727 out of the 1000 parcels were assigned in the first rotation),
# and (2) the distance from the original parcels to the re-assigned parcels has
# increased substantially from the `fsaverage6` data.
#
# This latter point makes sense: our parcellation provides a much sparser
# sampling of the cortical surface, so naturally parcels will be farther away
# from one another. However, this first issue of parcel re-assignment is a bit
# more problematic. At the vertex-level, when we're densely sampling the
# surface, it may not be as much of a problem that some vertices are
# re-assigned multiple times. But our parcels are a bit more "independent" and
# losing up to 300 parcels for each rotation may not be desirable.
#
# Nonetheless, we will use it to generate a spatial permutation-derived p-value
# for the correlation of our original data:

r, p = nnstats.permtest_pearsonr(rsq, grad, resamples=spins, seed=1234)
print('r = {:.2f}, p = {:.4g}'.format(r, p))

###############################################################################
# (Note that the maximum p-value from a permutation test is equal to ``1 /
# (n_perm + 1)``.)
#
# The benefit of generating our resampling array independent of a statistical
# test is that we can re-use it for any number of applications. If we wanted to
# conduct a Spearman correlation instead of a Pearson correlation we could
# easily do that:

from scipy.stats import rankdata

rho, prho = nnstats.permtest_pearsonr(rankdata(rsq), rankdata(grad),
                                      resamples=spins, seed=1234)
示例#3
0
# of this relationship via permutation tests.
#
# First, we'll generate two correlated variables:

from netneurotools import datasets
x, y = datasets.make_correlated_xy(corr=0.2, size=100)

###############################################################################
# We can generate the Pearson correlation with the standard parametric p-value:

print(stats.pearsonr(x, y))

###############################################################################
# Or use permutation testing to derive the p-value:

print(nnstats.permtest_pearsonr(x, y))

###############################################################################
# All the same arguments as with :func:`~.permtest_1samp` and
# :func:`~.permtest_rel` apply here, so you can provide same-sized arrays and
# correlations will only be calculated for paired columns:

a, b = datasets.make_correlated_xy(corr=0.9, size=100)
arr1, arr2 = np.column_stack([x, a]), np.column_stack([y, b])
print(nnstats.permtest_pearsonr(arr1, arr2))

###############################################################################
# Or you can change the number of permutations and set a seed for
# reproducibility:

print(nnstats.permtest_pearsonr(arr1, arr2, n_perm=500, seed=2222))