def test_find_test_statistic(df, tails): """ """ test_statistic_s = 1.96 p_value = find_p_value(test_statistic_s, df, tails=tails) test_statistic = find_test_statistic(p_value, df, tails=tails) assert np.abs(test_statistic - test_statistic_s) <= 1e-10
def test_two_sample_z_test_validated(test1, test2, sample1, sample2, pooled): """ Check that the test outputs match validated results within an acceptable margin of error """ test_statistic, standard_error = test1(sample1=sample1, sample2=sample2, null_h=0.0, pooled=pooled) p_value = find_p_value(test_statistic=test_statistic, df=np.inf, tails=True) test_statistic_s, p_value_s = test2(x1=sample1, x2=sample2, value=0.0, usevar="pooled", alternative="two-sided") if pooled is True: assert np.abs(test_statistic - test_statistic_s) <= 1 * 10**(-10) assert np.abs(p_value - p_value_s) <= 1 * 10**(-10) else: assert np.round(np.abs(p_value - p_value_s), 2) <= 1 * 10**(-2) assert p_value < p_value_s # unpooled p-values consistently skew lower
def test_one_sample_z_test_validated(test1, test2, sample): """ Check that the test outputs match validated results within an acceptable margin of error """ test_statistic, standard_error = test1(sample=sample, null_h=0.0) p_value = find_p_value(test_statistic=test_statistic, df=np.inf, tails=True) test_statistic_s, p_value_s = test2(x1=sample, value=0.0) assert np.abs(test_statistic - test_statistic_s) <= 1 * 10**(-10) assert np.abs(p_value - p_value_s) <= 1 * 10**(-10)
def test_two_sample_t_test_validated(test1, test2, sample1, sample2, pooled): """ Check that the test outputs match validated results within an acceptable margin of error """ test_statistic, standard_error, degrees_freedom = test1(sample1=sample1, sample2=sample2, null_h=0.0, pooled=pooled) p_value = find_p_value(test_statistic=test_statistic, df=degrees_freedom, tails=True) test_statistic_s, p_value_s = test2(a=sample1, b=sample2, equal_var=pooled) assert np.abs(test_statistic - test_statistic_s) <= 1 * 10**(-10) assert np.abs(p_value - p_value_s) <= 1 * 10**(-10)
def test_one_sample_z_prop_test_validated(test1, test2, sample): """ Check that the test outputs match validated results within an acceptable margin of error """ test_statistic, standard_error = test1(sample=sample, null_h=0.0) p_value = find_p_value(test_statistic=test_statistic, df=np.inf, tails=True) test_statistic_s, p_value_s = test2(count=sample.sum(), nobs=sample.shape[0], value=0.0, alternative="two-sided", prop_var=False) assert np.abs(test_statistic - test_statistic_s) <= 1 * 10**(-2) assert np.abs(p_value - p_value_s) <= 1 * 10**(-2)
def test_md5shuffle(sample, random_flg): """ """ z_statistic = runs_test(sample) p_value = find_p_value(test_statistic=z_statistic, df=np.inf, tails=True) assert (p_value > 0.05) == random_flg