def compute(cls, observation, prediction): """Compute a t statistic and a p_value from an observation and a prediction.""" value, p_val = st.ttest_rel(prediction, observation) power = pw.TTestPower().power(effect_size=value / len(observation)**0.5, nobs=len(observation), alpha=0.05) diffmean = numpy.mean(prediction) - numpy.mean(observation) return StudentsPairedTestScore(value, related_data={ "dof": len(observation) - 1, "p_value": p_val, "power": power, "diffmean": diffmean })
chi2_pow_R = 0.675077657003721 print('chi2_pow', chi2_pow, chi2_pow - chi2_pow_R) chi2_pow = smp.GofChisquarePower().power(0.01, 100, 4, 0.05) chi2_pow_R = 0.0505845519208533 print('chi2_pow', chi2_pow, chi2_pow - chi2_pow_R) chi2_pow = smp.GofChisquarePower().power(2, 100, 4, 0.05) chi2_pow_R = 1 print('chi2_pow', chi2_pow, chi2_pow - chi2_pow_R) chi2_pow = smp.GofChisquarePower().power(0.9, 100, 4, 0.05) chi2_pow_R = 0.999999999919477 print('chi2_pow', chi2_pow, chi2_pow - chi2_pow_R, 'lower precision ?') chi2_pow = smp.GofChisquarePower().power(0.8, 100, 4, 0.05) chi2_pow_R = 0.999999968205591 print('chi2_pow', chi2_pow, chi2_pow - chi2_pow_R) def cohen_es(*args, **kwds): print( "You better check what's a meaningful effect size for your question.") #BUG: after fixing 2.sided option, 2 rejection areas tt_pow = smp.TTestPower().power(effect_size=0.01, nobs=nobs, alpha=0.05) tt_pow_R = 0.05089485285965 # value from> pwr.t.test(d=0.01,n=80,sig.level=0.05,type="one.sample",alternative="two.sided") print('tt_pow', tt_pow, tt_pow - tt_pow_R)
def calculate_paired_ttest_php(es, n_per_group): power = smp.TTestPower().power(effect_size=es, alpha=0.05, nobs=n_per_group, df=2*n_per_group-1, alternative='two-sided') print("Power = {}".format(round(power, 3)))
def compute(cls, observation, prediction): """Compute a t statistic and a p_value from an observation and a prediction.""" p_mean = prediction['mean'] p_std = prediction['std'] p_n = prediction['n'] p_var = p_std**2 #2 samples t-test if isinstance(observation, dict): o_mean = observation['mean'] o_std = observation['std'] o_n = observation['n'] o_var = o_std**2 #If the 2 variances are too different, perform a Welch t-test if p_var / o_var > 2 or o_var / p_var > 2: value, p_val = st.ttest_ind_from_stats(p_mean, p_std, p_n, o_mean, o_std, o_n, equal_var=False) vnp = p_var / p_n vno = o_var / o_n #Welch-Satherwaite equation to compute the degrees of freedom dof = (vnp + vno)**2 / (vnp**2 / (p_n - 1) + vno**2 / (o_n - 1)) #If the 2 variances are similar, perform a 2 sample independant Student t-test else: value, p_val = st.ttest_ind_from_stats(p_mean, p_std, p_n, o_mean, o_std, o_n, equal_var=True) dof = o_n + p_n - 2 #Compute the statistical power of the test power = pw.TTestIndPower().power(effect_size=CohenDScore.compute( observation, prediction).score, nobs1=p_n, ratio=float(o_n) / p_n, alpha=0.05) #1 sample t-test else: value, p_val = st.ttest_ind_from_stats(p_mean, p_std, p_n, observation, std2=0, nobs2=2, equal_var=False) #Compute the statistical power of the test power = pw.TTestPower().power(effect_size=CohenDScore.compute( { "mean": observation, "std": 0 }, prediction).score, nobs=p_n, alpha=0.05) o_mean = observation dof = p_n - 1 return StudentsTestScore(value, related_data={ "dof": dof, "p_value": p_val, "power": power, "diffmean": p_mean - o_mean })