Esempio n. 1
0
def test_normal_power_explicit():
    # a few initial test cases for NormalIndPower
    sigma = 1
    d = 0.3
    nobs = 80
    alpha = 0.05
    res1 = smp.normal_power(d, nobs/2., 0.05)
    res2 = smp.NormalIndPower().power(d, nobs, 0.05)
    res3 = smp.NormalIndPower().solve_power(effect_size=0.3, nobs1=80, alpha=0.05, power=None)
    res_R = 0.475100870572638
    assert_almost_equal(res1, res_R, decimal=13)
    assert_almost_equal(res2, res_R, decimal=13)
    assert_almost_equal(res3, res_R, decimal=13)


    norm_pow = smp.normal_power(-0.01, nobs/2., 0.05)
    norm_pow_R = 0.05045832927039234
    #value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided")
    assert_almost_equal(norm_pow, norm_pow_R, decimal=11)

    norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05,
                                          alternative="larger")
    norm_pow_R = 0.056869534873146124
    #value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater")
    assert_almost_equal(norm_pow, norm_pow_R, decimal=11)

    # Note: negative effect size is same as switching one-sided alternative
    # TODO: should I switch to larger/smaller instead of "one-sided" options
    norm_pow = smp.NormalIndPower().power(-0.01, nobs, 0.05,
                                          alternative="larger")
    norm_pow_R = 0.0438089705093578
    #value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
    assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
Esempio n. 2
0
def test_power_solver_warn():
    # messing up the solver to trigger warning
    # I wrote this with scipy 0.9,
    # convergence behavior of scipy 0.11 is different,
    # fails at a different case, but is successful where it failed before

    pow_ = 0.69219411243824214 # from previous function
    nip = smp.NormalIndPower()
    # using nobs, has one backup (fsolve)
    nip.start_bqexp['nobs1'] = {'upp': 50, 'low': -20}
    val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
                          alternative='larger')

    assert_almost_equal(val, 1600, decimal=4)
    assert_equal(nip.cache_fit_res[0], 1)
    assert_equal(len(nip.cache_fit_res), 3)

    # case that has convergence failure, and should warn
    nip.start_ttp['nobs1'] = np.nan

    from statsmodels.tools.sm_exceptions import ConvergenceWarning
    assert_warns(ConvergenceWarning, nip.solve_power, 0.1, nobs1=None,
                  alpha=0.01, power=pow_, ratio=1, alternative='larger')
    # this converges with scipy 0.11  ???
    # nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1, alternative='larger')

    with warnings.catch_warnings():  # python >= 2.6
        warnings.simplefilter("ignore")
        val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
                              alternative='larger')
        assert_equal(nip.cache_fit_res[0], 0)
        assert_equal(len(nip.cache_fit_res), 3)
Esempio n. 3
0
def test_power_solver():
    # messing up the solver to trigger backup

    nip = smp.NormalIndPower()

    # check result
    es0 = 0.1
    pow_ = nip.solve_power(es0,
                           nobs1=1600,
                           alpha=0.01,
                           power=None,
                           ratio=1,
                           alternative='larger')
    # value is regression test
    assert_almost_equal(pow_, 0.69219411243824214, decimal=5)
    es = nip.solve_power(None,
                         nobs1=1600,
                         alpha=0.01,
                         power=pow_,
                         ratio=1,
                         alternative='larger')
    assert_almost_equal(es, es0, decimal=4)
    assert_equal(nip.cache_fit_res[0], 1)
    assert_equal(len(nip.cache_fit_res), 2)

    # cause first optimizer to fail
    nip.start_bqexp['effect_size'] = {'upp': -10, 'low': -20}
    nip.start_ttp['effect_size'] = 0.14
    es = nip.solve_power(None,
                         nobs1=1600,
                         alpha=0.01,
                         power=pow_,
                         ratio=1,
                         alternative='larger')
    assert_almost_equal(es, es0, decimal=4)
    assert_equal(nip.cache_fit_res[0], 1)
    assert_equal(len(nip.cache_fit_res), 3, err_msg=repr(nip.cache_fit_res))

    nip.start_ttp['effect_size'] = np.nan
    es = nip.solve_power(None,
                         nobs1=1600,
                         alpha=0.01,
                         power=pow_,
                         ratio=1,
                         alternative='larger')
    assert_almost_equal(es, es0, decimal=4)
    assert_equal(nip.cache_fit_res[0], 1)
    assert_equal(len(nip.cache_fit_res), 4)

    # I let this case fail, could be fixed for some statistical tests
    # (we shouldn't get here in the first place)
    # effect size is negative, but last stage brentq uses [1e-8, 1-1e-8]
    assert_raises(ValueError,
                  nip.solve_power,
                  None,
                  nobs1=1600,
                  alpha=0.01,
                  power=0.005,
                  ratio=1,
                  alternative='larger')
Esempio n. 4
0
def F_transf(r, n, confval=0.95, power=0.8):
    '''
    Fisher transformation and the significance of a correlation r
    :param iterable r: vectror with all the correlation tested
    :param int n: sample size
    :param float confval: confidence value
    :param float power: power threshold (as beta for type 2 error)
    '''
    fisher =  np.arctanh(r)
    confval = 1-((1-self.confval)/2)
    z_val =  norm.isf(1-confval) / sqrt(self.n-3)    
    ns = map(lambda x: smp.NormalIndPower().solve_power(
        np.arctanh(x), alpha=0.05, ratio=0, power=0.8, alternative='two-sided') 
             + 3, fisher)
    
    
Esempio n. 5
0
Author: Josef Perktold
"""

from __future__ import print_function
import numpy as np

import statsmodels.stats.power as smp
import statsmodels.stats.proportion as smpr

sigma = 1
d = 0.3
nobs = 80
alpha = 0.05
print(smp.normal_power(d, nobs / 2, 0.05))
print(smp.NormalIndPower().power(d, nobs, 0.05))
print(smp.NormalIndPower().solve_power(effect_size=0.3,
                                       nobs1=80,
                                       alpha=0.05,
                                       power=None))
print(0.475100870572638, 'R')

norm_pow = smp.normal_power(-0.01, nobs / 2, 0.05)
norm_pow_R = 0.05045832927039234
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided")
print('norm_pow', norm_pow, norm_pow - norm_pow_R)

norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05, alternative="larger")
norm_pow_R = 0.056869534873146124
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater")
print('norm_pow', norm_pow, norm_pow - norm_pow_R)