def test_search_config(): df = generate_fake_observations(n_observations=1) # test against default config template assert 'treatment' in config.search_config(df, 'experiment', 'treatment') assert 'metric' in config.search_config(df, 'experiment', 'measures') assert 'attr_0' in config.search_config(df, 'experiment', 'attributes')
def proportions_data_large(): return generate_fake_observations( distribution='bernoulli', n_treatments=3, n_attributes=4, n_observations=10000 )
#!/usr/bin/python # -*- coding: utf-8 -*- import numpy as np import pytest from abra import (Experiment, HypothesisTest, HypothesisTestSuite, MultipleComparisonCorrection, CustomMetric) from abra.utils import generate_fake_observations proportions_data_large = generate_fake_observations(distribution='bernoulli', n_treatments=3, n_attributes=4, n_observations=10000) proportions_data_small = generate_fake_observations(distribution='bernoulli', n_treatments=6, n_observations=6 * 50) means_data = generate_fake_observations(distribution='gaussian', n_treatments=6, n_observations=6 * 50) counts_data = generate_fake_observations(distribution='poisson', n_treatments=3, n_observations=3 * 100) def test_multiple_comparison(): p_values = np.arange(.001, .1, .01) mc = MultipleComparisonCorrection(p_values, method='b') assert mc.alpha_corrected < mc.alpha_orig
def counts_data(): return generate_fake_observations( distribution='poisson', n_treatments=3, n_observations=3 * 100 )
def means_data(): return generate_fake_observations( distribution='gaussian', n_treatments=6, n_observations=6 * 50 )
def proportions_data_small(): return generate_fake_observations( distribution='bernoulli', n_treatments=6, n_observations=6 * 50 )