def _test_multinomial_goodness_of_fit(dim): seed_all(0) sample_count = int(1e5) probs = numpy.random.dirichlet([1] * dim) counts = numpy.random.multinomial(sample_count, probs) p_good = multinomial_goodness_of_fit(probs, counts, sample_count) assert_greater(p_good, TEST_FAILURE_RATE) unif_counts = numpy.random.multinomial(sample_count, [1. / dim] * dim) p_bad = multinomial_goodness_of_fit(probs, unif_counts, sample_count) assert_less(p_bad, TEST_FAILURE_RATE)
def test_scipy_stats(): seed_all(0) for name in dir(scipy.stats): if hasattr(getattr(scipy.stats, name), 'rvs'): yield _test_scipy_stats, name
] def split_example(i): example = split_examples[i] discrete, continuous = split_discrete_continuous(example['mixed']) assert_equal(discrete, example['discrete']) assert_almost_equal(continuous, example['continuous']) def test_split_continuous_discrete(): for i in xrange(len(split_examples)): yield split_example, i seed_all(0) default_params = { 'bernoulli': [(0.2,)], 'binom': [(40, 0.4)], 'dirichlet': [ (1.0 + rand(2),), (1.0 + rand(3),), (1.0 + rand(4),), ], 'erlang': [(7,)], 'dlaplace': [(0.8,)], 'frechet': [tuple(2 * rand(1)) + (0,) + tuple(2 * rand(2))], 'geom': [(0.1,)], 'hypergeom': [(40, 14, 24)], 'logser': [(0.9,)], 'multivariate_normal': [