Пример #1
0
def simulate_two_group_example(delta=0.8, nl=90, sl=30, N=25):
    """Function for performing a simulation using the two-group model and
    visualizing the t-values.

    Input arguments:
    ================
    delta : float
        Effect size (Cohen's d).
    nl, sl : int
        The sizes of the signal and noise regions.
    N : int
        Sample size in each of the two groups.
    """
    pvals, tstats = square_grid_model(nl, sl, N, delta, equal_var=True)[0:2]
    sns.set_style('white')
    fig = plt.figure(figsize=(5, 5))
    ax = fig.add_subplot(111)
    im = ax.imshow(tstats,
                   origin='lower',
                   interpolation='none',
                   aspect='auto',
                   cmap='gray')
    color_bar = fig.colorbar(im)
    color_bar.set_label('T-statistic')
    # ax.set_title('Effect size = %1.3f' % delta)
    fig.tight_layout()
    plt.show()
Пример #2
0
def two_group_reproducibility(effect_sizes,
                              emphasis_primary,
                              nl=90,
                              sl=30,
                              alpha=0.05,
                              N=25,
                              n_iter=10,
                              method=tst):
    """Function for computing reproducibility in the two-group model under
    various effect sizes and amounts of emphasis on the primary study.

    Input arguments:
    ================
    effect_sizes : ndarray [n_effect_sizes, ]
        The tested effect sizes.

    emphasis_primary : ndarray [n_emphasis_values, ]
        The tested amounts of emphasis on the primary study.

    TODO: document rest of the parameters.

    Output arguments
    ================
    reproducibility : ndarray [n_effect_sizes, n_emphasis_values]
        The observed reproducibility at the tested effect sizes and amounts
        of emphasis on the primary study.
    """
    n_effect_sizes, n_emphasis = len(effect_sizes), len(emphasis_primary)
    """Compute the reproducibility rate for each effect size and
    primary study emphasis, for several iterations."""
    reproducible = np.zeros([n_effect_sizes, n_emphasis, n_iter])

    for ind in np.ndindex(n_effect_sizes, n_emphasis, n_iter):
        # Simulate new data.
        delta, emphasis = effect_sizes[ind[0]], emphasis_primary[ind[1]]
        X_pri = square_grid_model(nl, sl, N, delta, equal_var=True)[0]
        X_fol = square_grid_model(nl, sl, N, delta, equal_var=True)[0]
        X_pri, X_fol = X_pri.flatten(), X_fol.flatten()

        # Apply the correction and compute reproducibility.
        R = fwer_replicability(X_pri, X_fol, emphasis, method, alpha)
        R = np.reshape(R, [nl, nl])
        tp, _, _, fn = grid_model_counts(R, nl, sl)
        reproducible[ind] = tp / float(tp + fn)

    reproducible = np.mean(reproducible, axis=2)
    return reproducible
Пример #3
0
def two_group_model_power(nl=90,
                          sl=30,
                          deltas=np.linspace(0.2, 2.4, 12),
                          alpha=0.05,
                          N=25,
                          n_iter=10,
                          verbose=True):
    """Function for generating data under two-group model and visualizing
    power as a function of effect size.

    Input arguments:
    ================
    nl : int
        The length of a side of the simulated grid. There will be a total
        of nl squared tests.
    sl : int
        The length of a side of the signal region. In the simulation, there
        will be a total of sl squared tests where the alternative
        hypothesis is true.
    deltas : ndarray
        The tested effect sizes.
    alpha : float
        The desired critical level.
    N : int
        Sample size is both of the two groups.
    n_iter : int
        Number of iterations used for estimating the power.
    verbose : bool
        Flag for deciding whether to print progress reports to the console.
    """
    """Allocate memory for the results."""
    n_deltas = len(deltas)
    pwr = np.zeros([n_deltas, n_iter])
    """Simulate data at each effect size and compute empirical power."""
    for i, delta in enumerate(deltas):
        if (verbose):
            print('Effect size: %1.3f' % delta)
        for j in np.arange(0, n_iter):
            X = square_grid_model(nl, sl, N, delta, equal_var=True)[0]
            Y = tst(X.flatten(), alpha)
            Y = Y.reshape(nl, nl)
            tp, fp, tn, fn = grid_model_counts(Y, nl, sl)
            pwr[i, j] = empirical_power(tp, tp + fn)

    return np.mean(pwr, axis=1)
Пример #4
0
def permutation_test_fwer_replicability(effect_sizes,
                                        emphasis_primary,
                                        nl=90,
                                        sl=30,
                                        alpha=0.05,
                                        N=25,
                                        n_iter=20,
                                        t_threshold=1.0):
    """Estimate reproducibility in the two-group model using the
    Maris-Oostenveld permutation test with the Phipson-Smyth p-value
    correction.

    Input arguments:
    ================
    effect_sizes : ndarray
        Tested effect sizes (Cohen's d's).

    emphasis_primary : ndarray
        Amount of emphasis placed on the primary study.

    nl, sl : int
        The sizes of the noise and signal regions respectively.

    alpha : float
        The desired critical level.

    N : int
        Sample size in each of the two groups.

    n_iter : int
        Number of repetitions of the simulation at each distinct
        effect size.

    t_threshold : float
        The t-threshold used in the permutation test.
    """

    n_effect_sizes = len(effect_sizes)
    n_emphasis = len(emphasis_primary)
    reproducibility = np.zeros([n_effect_sizes, n_emphasis, n_iter])
    """Estimate reproducibility at each effect size."""
    for ind in np.ndindex(n_effect_sizes, n_emphasis, n_iter):
        # Generate new raw data.
        delta, emphasis = effect_sizes[ind[0]], emphasis_primary[ind[1]]
        T_primary = square_grid_model(nl, sl, N, delta)[1]
        T_followup = square_grid_model(nl, sl, N, delta)[1]
        ## X_raw_p, Y_raw_p = square_grid_model(nl, sl, N, delta)[2:4]
        ## X_raw_f, Y_raw_f = square_grid_model(nl, sl, N, delta)[2:4]

        # Here *_p = primary study, *_f = follow-up study.
        ## R = fwer_prep(X_raw_p, Y_raw_p, X_raw_f, Y_raw_f,
        ##               tfr_permutation_test, emphasis, alpha)
        R = fwer_rftrep(T_primary, T_followup, rft_2d, emphasis, alpha)
        tp, _, _, fn = grid_model_counts(R, nl, sl)
        reproducibility[ind] = tp / float(tp + fn)

    reproducibility = np.mean(reproducibility, axis=2)
    """Visualize the results."""
    sns.set_style('white')
    fig = plt.figure(figsize=(8, 5))
    ax = fig.add_subplot(111)
    colors = ['r', 'g', 'b']
    ax.plot(effect_sizes, reproducibility, '.')
    ax.plot(effect_sizes, reproducibility, '-')
    fig.tight_layout()
    plt.show()
Пример #5
0
def direct_replication_fwer_partial_conjunction():
    """Perform a comparison of the partial conjuction and FWER
    replicability methods using the two-group model."""

    N, nl, sl = 25, 90, 30
    effect_sizes = np.linspace(0.6, 2.4, 12)
    n_effect_sizes = len(effect_sizes)
    method = lsu  # hochberg #bonferroni
    emphasis = np.asarray(
        [0.02, 0.05, 0.10, 0.30, 0.50, 0.70, 0.90, 0.95, 0.98])
    n_emphasis = len(emphasis)
    """Generate the test data."""
    print('Simulating primary and follow-up experiments ..')

    # Allocate memory.
    pvals_pri = np.zeros([n_effect_sizes, nl, nl])
    pvals_sec = np.zeros(np.shape(pvals_pri))

    # Obtain the uncorrected p-values.
    for i, delta in enumerate(effect_sizes):
        pvals_pri[i] = square_grid_model(nl, sl, N, delta)[0]
        pvals_sec[i] = square_grid_model(nl, sl, N, delta)[0]
    """Find reproducible effects using the FWER replicability
    method."""
    print('Estimating reproducibility: FWER replicability ..')

    repr_fwer = np.zeros([n_effect_sizes, n_emphasis])

    for i in np.ndindex(n_effect_sizes, n_emphasis):
        # Find reproducible effects and rearrange the data.
        result = fwer_replicability(pvals_pri[i[0]].flatten(),
                                    pvals_sec[i[0]].flatten(), emphasis[i[1]],
                                    method)
        result = np.reshape(result, [nl, nl])

        # Compute the number reproducible true effects.
        repr_fwer[i] = (grid_model_counts(result, nl, sl)[0] / float(sl**2))
    """Find reproducible effects using the partial conjuction
    method."""
    print('Estimating reproducibility: Partial conjuction ..')

    repr_part = np.zeros([n_effect_sizes])

    for i in np.ndindex(n_effect_sizes):
        result = partial_conjuction(pvals_pri[i].flatten(),
                                    pvals_sec[i].flatten(), method)
        result = np.reshape(result, [nl, nl])
        repr_part[i] = (grid_model_counts(result, nl, sl)[0] / float(sl**2))
    """Visualize the data."""
    sns.set_style('white')
    fig = plt.figure(figsize=(8, 5))
    ax = fig.add_subplot(111)

    plot_logistic(effect_sizes,
                  repr_fwer[:, emphasis <= 0.5],
                  ax=ax,
                  color='k')
    plot_logistic(effect_sizes, repr_fwer[:, emphasis > 0.5], ax=ax, color='g')
    plot_logistic(effect_sizes, repr_part, ax=ax, color='b')

    ax.set_xlabel('Effect size')
    ax.set_ylabel('Reproducibility rate')

    fig.tight_layout()
    plt.show()
Пример #6
0
def rvalue_test(effect_sizes=np.linspace(0.2, 2.4, 12),
                emphasis=np.asarray([0.02, 0.5, 0.98]),
                method=tst,
                nl=90,
                sl=30,
                N=25,
                alpha=0.05,
                n_iter=10):
    """Function for simulating primary and follow-up experiments using the
    two-group model and testing which effects are reproducible using the FDR
    r-value method.

    Input arguments:
    ================
    effect_sizes : ndarray [n_effect_sizes, ]
        The tested effect sizes.

    emphasis : ndarray [n_emphasis, ]
        The tested amounts of emphasis placed on the primary study.

    n_iter : int
        The number of repetitions of each simulation.

    method : function
        The applied correction procedure.

    nl, sl : int
        The sizes of the noise and signal regions respectively.

    N : int
        The sample size in both groups.

    alpha : float
        The critical level. Default value is 0.05.

    n_iter : int
        The number of repetitions each simulation.
    """
    n_emphasis = len(emphasis)
    n_effect_sizes = len(effect_sizes)
    reproducibility = np.zeros([n_iter, n_effect_sizes, n_emphasis])

    for ind in np.ndindex(n_iter, n_effect_sizes, n_emphasis):
        print(ind)
        """Simulate primary and follow-up experiments."""
        delta, emph = effect_sizes[ind[1]], emphasis[ind[2]]
        p1 = square_grid_model(delta=delta, nl=nl, sl=sl, N=N)[0]
        p2 = square_grid_model(delta=delta, nl=nl, sl=sl, N=N)[0]
        """Test which hypotheses are significant in the primary study.
        This is done for selecting hypotheses for the follow-up study."""
        if (method.__name__ == 'qvalue'):
            significant_primary = method(p1.flatten(), alpha)[0]
        else:
            significant_primary = method(p1.flatten(), alpha)
        significant_primary = np.reshape(significant_primary, [nl, nl])
        """If there were significant hypotheses in the primary study,
        apply the r-value method to test which ones can be replicated in
        the follow-up study."""
        if (np.sum(significant_primary) > 0):
            rvals = fdr_rvalue(p1=p1[significant_primary],
                               p2=p2[significant_primary],
                               m=nl**2,
                               c2=emph)
            R = np.ones(np.shape(p1))
            R[significant_primary] = rvals
            tp, _, _, fn = grid_model_counts(R < alpha, nl, sl)
            reproducibility[ind] = tp / float(tp + fn)

    reproducibility = np.mean(reproducibility, axis=0)
    return reproducibility
Пример #7
0
N = 25
alpha = 0.05
d = (nl - sl) // 2
n_iterations = 20
deltas = np.linspace(0.5, 1.5, 20)
n_deltas = len(deltas)
"""Estimate reproducibility and true positive rate in simulated test-retest
experiments using the two-group model."""
n_reproducible = np.zeros([n_iterations, n_deltas])
n_tp = np.zeros([2, n_iterations, n_deltas])  # test and retest conditions

for i, delta in enumerate(deltas):
    print('Estimating reproducibility at effect size d=%1.3f' % delta)
    for j in np.arange(0, n_iterations):
        """Generate test and retest data."""
        X_test = square_grid_model(nl, sl, N, delta, equal_var=True)[0]
        X_retest = square_grid_model(nl, sl, N, delta, equal_var=True)[0]
        """Correct the statistics for multiple testing."""
        Y_test = lsu(X_test.flatten(), alpha)
        Y_test = Y_test.reshape(nl, nl)

        Y_retest = lsu(X_retest.flatten(), alpha)
        Y_retest = Y_retest.reshape(nl, nl)
        """Count how many discoveries were reproducible."""
        n_reproducible[j, i] = np.sum(Y_retest[d:(nl - d),
                                               d:(nl - d)][Y_test[d:(nl - d),
                                                                  d:(nl - d)]])
        """Count the number of true positives in test and retest sets."""
        n_tp[0, j, i] = np.sum(Y_test[d:(nl - d), d:(nl - d)])
        n_tp[1, j, i] = np.sum(Y_retest[d:(nl - d), d:(nl - d)])
"""Visualize the results."""
Пример #8
0
    Neuroscience 4(4):417-422.
"""

import numpy as np

from fdr import lsu, qvalue, tst
from fwer import sidak, holm_bonferroni, hochberg
from data import square_grid_model
from permutation import tfr_permutation_test
from rft import rft_2d
from util import grid_model_counts, roc
from viz import plot_grid_model
"""Generate the test data."""
nl, sl = 90, 30
N, delta = 25, 0.7
X, X_tstats, X_raw, Y_raw = square_grid_model(nl, sl, N, delta, equal_var=True)
alpha = 0.05
"""Apply each correction technique to the generated dataset."""
Y_sidak = sidak(X.flatten(), alpha=alpha)
Y_sidak = Y_sidak.reshape(nl, nl)

Y_fdr = lsu(X.flatten(), q=alpha)
Y_fdr = Y_fdr.reshape(nl, nl)

Y_qvalue, _ = qvalue(X.flatten(), threshold=alpha)
Y_qvalue = Y_qvalue.reshape(nl, nl)

Y_rft, Y_smooth, _ = rft_2d(X_tstats, fwhm=30, alpha=alpha, verbose=True)
# No reshape needed since already in correct form.

Y_tst = tst(X.flatten(), q=alpha)
Пример #9
0
def two_group_model_power(deltas,
                          method,
                          nl=90,
                          sl=30,
                          alpha=0.05,
                          N=25,
                          n_iter=20,
                          verbose=True):
    """Function for generating data under two-group model at various effect
    sizes and computing the corresponding empirical power.

    Input arguments:
    ================
    nl : int
        The length of a side of the simulated grid. There will be a total
        of nl squared tests.

    sl : int
        The length of a side of the signal region. In the simulation, there
        will be a total of sl squared tests where the alternative
        hypothesis is true.

    deltas : ndarray
        The tested effect sizes.

    alpha : float
        The desired critical level.

    N : int
        Sample size is both of the two groups.

    n_iter : int
        Number of iterations used for estimating the power.

    verbose : bool
        Flag for deciding whether to print progress reports to the console.

    Output arguments:
    =================
    pwr : ndarray [n_deltas, n_iter]
        The power at each tested effect size at each iteration.

    fpr : ndarray [n_deltas, n_iter]
        The corresponding false positive rates.
    """
    """Allocate memory for the results."""
    n_deltas = len(deltas)
    pwr, fpr = np.zeros([n_deltas, n_iter]), np.zeros([n_deltas, n_iter])
    """Simulate data at each effect size and compute empirical power."""
    for i, delta in enumerate(deltas):
        if (verbose):
            print('Effect size: %1.3f' % delta)
        for j in np.arange(0, n_iter):
            # NOTE: output arguments 1-4 needed for permutation testing.
            # NOTE: output arguments 1-2 needed for RFT based testing.
            X = square_grid_model(nl, sl, N, delta, equal_var=True)[0]

            # TODO: q-value method returns a tuple with the first element
            # containing the decision.
            # Y = tfr_permutation_test(X_raw, Y_raw, alpha=alpha,
            #                          n_permutations=100, threshold=1)
            # Y = rft_2d(T, fwhm=3, alpha=alpha, verbose=True)[0]
            Y = method(X.flatten(), alpha)
            Y = Y.reshape(nl, nl)
            tp, fp, _, fn = grid_model_counts(Y, nl, sl)
            pwr[i, j] = empirical_power(tp, tp + fn)
            fpr[i, j] = float(fp) / float(nl**2 - sl**2)

    return np.mean(pwr, axis=1), np.mean(fpr, axis=1)