def test_multiple_samplers(self, caplog): with Model(): prob = Beta("prob", alpha=5.0, beta=3.0) Binomial("outcome", n=1, p=prob) caplog.clear() sample(3, tune=2, discard_tuned_samples=False, n_init=None, chains=1) messages = [msg.msg for msg in caplog.records] assert all("boolean index did not" not in msg for msg in messages)
def test_multiple_samplers(self): with Model(): prob = Beta('prob', alpha=5., beta=3.) Binomial('outcome', n=1, p=prob) with pytest.warns(None) as warns: sample(3, tune=2, discard_tuned_samples=False, n_init=None) messages = [warn.message.args[0] for warn in warns] assert any("contains only 3" in msg for msg in messages) assert all('boolean index did not' not in msg for msg in messages)
def test_checks_population_size(self): """Test that population samplers check the population size.""" with Model() as model: n = Normal("n", mu=0, sigma=1) for stepper in TestPopulationSamplers.steppers: step = stepper() with pytest.raises(ValueError): trace = sample(draws=100, chains=1, step=step) trace = sample(draws=100, chains=4, step=step) pass
def test_float32(self): with Model() as model: x = Normal('x', testval=np.array(1., dtype='float32')) obs = Normal('obs', mu=x, sigma=1., observed=np.random.randn(5).astype('float32')) assert x.dtype == 'float32' assert obs.dtype == 'float32' for sampler in self.samplers: with model: sample(10, sampler())
def test_multiple_samplers(self): with Model(): prob = Beta('prob', alpha=5., beta=3.) Binomial('outcome', n=1, p=prob) # Catching warnings through multiprocessing doesn't work, # so we have to use single threaded sampling. with pytest.warns(None) as warns: sample(3, tune=2, discard_tuned_samples=False, n_init=None, chains=1) messages = [warn.message.args[0] for warn in warns] assert any("contains only 3" in msg for msg in messages) assert all('boolean index did not' not in msg for msg in messages)
def test_checks_population_size(self): """Test that population samplers check the population size.""" steppers = [ DEMetropolis ] with Model() as model: n = Normal('n', mu=0, sd=1) for stepper in steppers: step = stepper() with pytest.raises(ValueError): trace = sample(draws=100, chains=1, step=step) trace = sample(draws=100, chains=4, step=step) pass
def check_trace(self, step_method): """Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods ``` on multiple commits. """ n_steps = 100 with Model() as model: x = Normal('x', mu=0, sd=1) if step_method.__name__ == 'SMC': trace = smc.sample_smc(n_steps=n_steps, n_chains=2, start=[{ 'x': 1. }, { 'x': -1. }], random_seed=1, n_jobs=1, progressbar=False, homepath=self.temp_dir) elif step_method.__name__ == 'NUTS': step = step_method(scaling=model.test_point) trace = sample(0, tune=n_steps, discard_tuned_samples=False, step=step, random_seed=1, chains=1) else: trace = sample(0, tune=n_steps, discard_tuned_samples=False, step=step_method(), random_seed=1, chains=1) assert_array_almost_equal(trace.get_values('x'), self.master_samples[step_method], decimal=select_by_precision(float64=6, float32=4))
def test_float32(self): with Model() as model: x = Normal("x", testval=np.array(1.0, dtype="float32")) obs = Normal("obs", mu=x, sigma=1.0, observed=np.random.randn(5).astype("float32")) assert x.dtype == "float32" assert obs.dtype == "float32" for sampler in self.samplers: with model: sample(10, sampler())
def test_float32(self): theano.config.floatX = 'float32' theano.config.warn_float64 = 'warn' with Model() as model: x = Normal('x', testval=np.array(1., dtype='float32')) obs = Normal('obs', mu=x, sd=1., observed=np.random.randn(5).astype('float32')) assert x.dtype == 'float32' assert obs.dtype == 'float32' for sampler in self.samplers: with model: sample(10, sampler())
def test_float64(self): theano.config.floatX = 'float64' theano.config.warn_float64 = 'ignore' with Model() as model: x = Normal('x', testval=np.array(1., dtype='float64')) obs = Normal('obs', mu=x, sd=1., observed=np.random.randn(5)) assert x.dtype == 'float64' assert obs.dtype == 'float64' for sampler in self.samplers: with model: sample(10, sampler())
def check_trace(self, step_method): """Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods ``` on multiple commits. """ n_steps = 100 with Model() as model: x = Normal("x", mu=0, sd=1) y = Normal("y", mu=x, sd=1, observed=1) if step_method.__name__ == "SMC": trace = sample(draws=200, random_seed=1, progressbar=False, step=step_method()) elif step_method.__name__ == "NUTS": step = step_method(scaling=model.test_point) trace = sample( 0, tune=n_steps, discard_tuned_samples=False, step=step, random_seed=1, chains=1, ) else: trace = sample( 0, tune=n_steps, discard_tuned_samples=False, step=step_method(), random_seed=1, chains=1, ) assert_array_almost_equal( trace["x"], self.master_samples[step_method], decimal=select_by_precision(float64=6, float32=4), )
def test_demcmc_warning_on_small_populations(self): """Test that a warning is raised when n_chains <= n_dims""" with Model() as model: Normal("n", mu=0, sigma=1, shape=(2, 3)) with pytest.warns(UserWarning) as record: sample( draws=5, tune=5, chains=6, step=DEMetropolis(), # make tests faster by not parallelizing; disable convergence warning cores=1, compute_convergence_checks=False) pass
def check_trace(self, step_method): """Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods ``` on multiple commits. """ n_steps = 100 with Model() as model: x = Normal("x", mu=0, sigma=1) y = Normal("y", mu=x, sigma=1, observed=1) if step_method.__name__ == "SMC": trace = sample( draws=200, random_seed=1, progressbar=False, step=step_method(parallel=False) ) elif step_method.__name__ == "NUTS": step = step_method(scaling=model.test_point) trace = sample( 0, tune=n_steps, discard_tuned_samples=False, step=step, random_seed=1, chains=1, ) else: trace = sample( 0, tune=n_steps, discard_tuned_samples=False, step=step_method(), random_seed=1, chains=1, ) assert_array_almost_equal( trace["x"], self.master_samples[step_method], decimal=select_by_precision(float64=6, float32=4), )
def test_linalg(self): with Model(): a = Normal('a', shape=2) a = tt.switch(a > 0, np.inf, a) b = tt.slinalg.solve(floatX(np.eye(2)), a) Normal('c', mu=b, shape=2) with pytest.warns(None) as warns: trace = sample(20, init=None, tune=5) assert np.any(trace['diverging']) assert any('diverging samples after tuning' in str(warn.message) for warn in warns) assert any('contains only' in str(warn.message) for warn in warns) with pytest.raises(SamplingError): sample(20, init=None, nuts_kwargs={'on_error': 'raise'})
def test_linalg(self): with Model(): a = Normal('a', shape=2) a = tt.switch(a > 0, np.inf, a) b = tt.slinalg.solve(floatX(np.eye(2)), a) Normal('c', mu=b, shape=2) with warnings.catch_warnings(record=True) as warns: trace = sample(20, init=None, tune=5) assert np.any(trace['diverging']) assert any('diverging samples after tuning' in str(warn.message) for warn in warns) assert any('contains only' in str(warn.message) for warn in warns) with pytest.raises(SamplingError): sample(20, init=None, nuts_kwargs={'on_error': 'raise'})
def test_sampler_stats(self): with Model() as model: x = Normal("x", mu=0, sigma=1) trace = sample(draws=10, tune=1, chains=1) # Assert stats exist and have the correct shape. expected_stat_names = { "depth", "diverging", "energy", "energy_error", "model_logp", "max_energy_error", "mean_tree_accept", "step_size", "step_size_bar", "tree_size", "tune", } assert trace.stat_names == expected_stat_names for varname in trace.stat_names: assert trace.get_sampler_stats(varname).shape == (10,) # Assert model logp is computed correctly: computing post-sampling # and tracking while sampling should give same results. model_logp_ = np.array( [ model.logp(trace.point(i, chain=c)) for c in trace.chains for i in range(len(trace)) ] ) assert (trace.model_logp == model_logp_).all()
def test_step_continuous(): start, model, (mu, C) = mv_simple() with model: mh = Metropolis() slicer = Slice() hmc = HamiltonianMC(scaling=C, is_cov=True, blocked=False) nuts = NUTS(scaling=C, is_cov=True, blocked=False) mh_blocked = Metropolis(S=C, proposal_dist=MultivariateNormalProposal, blocked=True) slicer_blocked = Slice(blocked=True) hmc_blocked = HamiltonianMC(scaling=C, is_cov=True) nuts_blocked = NUTS(scaling=C, is_cov=True) compound = CompoundStep([hmc_blocked, mh_blocked]) steps = [slicer, hmc, nuts, mh_blocked, hmc_blocked, slicer_blocked, nuts_blocked, compound] unc = np.diag(C) ** .5 check = [('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)] for st in steps: h = sample(8000, st, start, model=model, random_seed=1) for (var, stat, val, bound) in check: yield check_stat, repr(st), h, var, stat, val, bound
def test_parallel_start(): model, _, _, _ = simple_init() with model: tr = sample(5, njobs=2, start=[{'x': [10, 10]}, { 'x': [-10, -10]}], random_seed=RSEED) assert tr.get_values('x', chains=0)[0][0] > 0 assert tr.get_values('x', chains=1)[0][0] < 0
def test_linear(): lam = -0.78 sig2 = 5e-3 N = 300 dt = 1e-1 sde = lambda x, lam: (lam * x, sig2) x = floatX(_gen_sde_path(sde, (lam, ), dt, N, 5.0)) z = x + np.random.randn(x.size) * sig2 # build model with Model() as model: lamh = Flat("lamh") xh = EulerMaruyama("xh", dt, sde, (lamh, ), shape=N + 1, testval=x) Normal("zh", mu=xh, sigma=sig2, observed=z) # invert with model: trace = sample(init="advi+adapt_diag", chains=1) ppc = sample_posterior_predictive(trace, model=model) ppcf = fast_sample_posterior_predictive(trace, model=model) # test p95 = [2.5, 97.5] lo, hi = np.percentile(trace[lamh], p95, axis=0) assert (lo < lam) and (lam < hi) lo, hi = np.percentile(ppc["zh"], p95, axis=0) assert ((lo < z) * (z < hi)).mean() > 0.95 lo, hi = np.percentile(ppcf["zh"], p95, axis=0) assert ((lo < z) * (z < hi)).mean() > 0.95
def check_trace(self, step_method): """Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods ``` on multiple commits. """ n_steps = 100 with Model(): x = Normal('x', mu=0, sd=1) if step_method.__name__ == 'SMC': Deterministic('like', - 0.5 * tt.log(2 * np.pi) - 0.5 * x.T.dot(x)) trace = smc.ATMIP_sample(n_steps=n_steps, step=step_method(random_seed=1), n_jobs=1, progressbar=False, homepath=self.temp_dir) else: trace = sample(0, tune=n_steps, discard_tuned_samples=False, step=step_method(), random_seed=1) assert_array_almost_equal( trace.get_values('x'), self.master_samples[step_method], decimal=select_by_precision(float64=6, float32=4))
def test_sampler_stats(self): with Model() as model: x = Normal("x", mu=0, sigma=1) trace = sample(draws=10, tune=1, chains=1) # Assert stats exist and have the correct shape. expected_stat_names = { "depth", "diverging", "energy", "energy_error", "model_logp", "max_energy_error", "mean_tree_accept", "step_size", "step_size_bar", "tree_size", "tune", } assert trace.stat_names == expected_stat_names for varname in trace.stat_names: assert trace.get_sampler_stats(varname).shape == (10, ) # Assert model logp is computed correctly: computing post-sampling # and tracking while sampling should give same results. model_logp_ = np.array([ model.logp(trace.point(i, chain=c)) for c in trace.chains for i in range(len(trace)) ]) assert (trace.model_logp == model_logp_).all()
def test_step_continuous(self): start, model, (mu, C) = mv_simple() unc = np.diag(C)**.5 check = (('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)) with model: steps = ( Slice(), HamiltonianMC(scaling=C, is_cov=True, blocked=False), NUTS(scaling=C, is_cov=True, blocked=False), Metropolis(S=C, proposal_dist=MultivariateNormalProposal, blocked=True), Slice(blocked=True), HamiltonianMC(scaling=C, is_cov=True), NUTS(scaling=C, is_cov=True), CompoundStep([ HamiltonianMC(scaling=C, is_cov=True), HamiltonianMC(scaling=C, is_cov=True, blocked=False) ]), ) for step in steps: trace = sample(8000, step=step, start=start, model=model, random_seed=1) yield self.check_stat, check, trace, step.__class__.__name__
def test_acceptance_rate_against_coarseness(self): """Test that the acceptance rate increases when the coarse model is closer to the fine model.""" with Model() as coarse_model_0: Normal("x", 5.0, 1.0) with Model() as coarse_model_1: Normal("x", 6.0, 2.0) with Model() as coarse_model_2: Normal("x", 20.0, 5.0) possible_coarse_models = [ coarse_model_0, coarse_model_1, coarse_model_2 ] acc = [] with Model(): Normal("x", 5.0, 1.0) for coarse_model in possible_coarse_models: step = MLDA(coarse_models=[coarse_model], subsampling_rates=3, tune=True) trace = sample(chains=1, draws=500, tune=100, step=step) acc.append(trace.get_sampler_stats("accepted").mean()) assert acc[0] > acc[1] > acc[2], ("Acceptance rate is not " "strictly increasing when" "coarse model is closer to " "fine model. Acceptance rates" "were: {}".format(acc))
def test_step_continuous(): start, model, (mu, C) = mv_simple() with model: mh = Metropolis() slicer = Slice() hmc = HamiltonianMC(scaling=C, is_cov=True, blocked=False) nuts = NUTS(scaling=C, is_cov=True, blocked=False) mh_blocked = Metropolis(S=C, proposal_dist=MultivariateNormalProposal, blocked=True) slicer_blocked = Slice(blocked=True) hmc_blocked = HamiltonianMC(scaling=C, is_cov=True) nuts_blocked = NUTS(scaling=C, is_cov=True) compound = CompoundStep([hmc_blocked, mh_blocked]) steps = [ slicer, hmc, nuts, mh_blocked, hmc_blocked, slicer_blocked, nuts_blocked, compound ] unc = np.diag(C)**.5 check = [('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)] for st in steps: h = sample(8000, st, start, model=model, random_seed=1) for (var, stat, val, bound) in check: yield check_stat, repr(st), h, var, stat, val, bound
def test_constant_step(): with Model() as model: x = Normal('x', 0, 1) start = {'x':-1} tr = sample(10, step=Constant([x]), start=start) assert_almost_equal(tr['x'], start['x'], decimal=10)
def test_step_continuous(self): start, model, (mu, C) = mv_simple() unc = np.diag(C) ** 0.5 check = (("x", np.mean, mu, unc / 10.0), ("x", np.std, unc, unc / 10.0)) with model: steps = ( Slice(), HamiltonianMC(scaling=C, is_cov=True, blocked=False), NUTS(scaling=C, is_cov=True, blocked=False), Metropolis(S=C, proposal_dist=MultivariateNormalProposal, blocked=True), Slice(blocked=True), HamiltonianMC(scaling=C, is_cov=True), NUTS(scaling=C, is_cov=True), CompoundStep( [ HamiltonianMC(scaling=C, is_cov=True), HamiltonianMC(scaling=C, is_cov=True, blocked=False), ] ), ) for step in steps: trace = sample( 0, tune=8000, chains=1, discard_tuned_samples=False, step=step, start=start, model=model, random_seed=1, ) self.check_stat(check, trace, step.__class__.__name__)
def check_trace(self, step_method): """Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods ``` on multiple commits. """ test_steps = 100 n_steps = int(os.getenv('BENCHMARK', 100)) benchmarking = (n_steps != test_steps) if benchmarking: tqdm.write('Benchmarking {} with {:,d} samples'.format(step_method.__name__, n_steps)) else: tqdm.write('Checking {} has same trace as on master'.format(step_method.__name__)) with Model() as model: Normal('x', mu=0, sd=1) trace = sample(n_steps, step=step_method(), random_seed=1) if not benchmarking: assert_array_almost_equal(trace.get_values('x'), self.master_samples[step_method])
def test_constant_step(): with Model() as model: x = Normal('x', 0, 1) start = {'x': -1} tr = sample(10, step=Constant([x]), start=start) assert_almost_equal(tr['x'], start['x'], decimal=10)
def test_tuning_and_scaling_on(self): """Test that tune and base_scaling change as expected when tuning is on.""" np.random.seed(1234) ts = 100 _, model = simple_2model_continuous() _, model_coarse = simple_2model_continuous() with model: trace = sample( tune=ts, draws=20, step=MLDA( coarse_models=[model_coarse], base_tune_interval=50, base_scaling=100.0, ), chains=1, discard_tuned_samples=False, random_seed=1234, ) assert trace.get_sampler_stats("tune", chains=0)[0] assert trace.get_sampler_stats("tune", chains=0)[ts - 1] assert not trace.get_sampler_stats("tune", chains=0)[ts] assert not trace.get_sampler_stats("tune", chains=0)[-1] assert trace.get_sampler_stats("base_scaling", chains=0)[0][0] == 100.0 assert trace.get_sampler_stats("base_scaling", chains=0)[0][1] == 100.0 assert trace.get_sampler_stats("base_scaling", chains=0)[-1][0] < 100.0 assert trace.get_sampler_stats("base_scaling", chains=0)[-1][1] < 100.0
def test_float32_MLDA(self): data = np.random.randn(5).astype('float32') with Model() as coarse_model: x = Normal('x', testval=np.array(1., dtype='float32')) obs = Normal('obs', mu=x, sigma=1., observed=data + 0.5) with Model() as model: x = Normal('x', testval=np.array(1., dtype='float32')) obs = Normal('obs', mu=x, sigma=1., observed=data) assert x.dtype == 'float32' assert obs.dtype == 'float32' with model: sample(10, MLDA(coarse_models=[coarse_model]))
def test_float32_MLDA(self): data = np.random.randn(5).astype("float32") with Model() as coarse_model: x = Normal("x", testval=np.array(1.0, dtype="float32")) obs = Normal("obs", mu=x, sigma=1.0, observed=data + 0.5) with Model() as model: x = Normal("x", testval=np.array(1.0, dtype="float32")) obs = Normal("obs", mu=x, sigma=1.0, observed=data) assert x.dtype == "float32" assert obs.dtype == "float32" with model: sample(10, MLDA(coarse_models=[coarse_model]))
def check_trace(self, step_method): """Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods ``` on multiple commits. """ test_steps = 100 n_steps = int(os.getenv('BENCHMARK', 100)) benchmarking = (n_steps != test_steps) if benchmarking: tqdm.write('Benchmarking {} with {:,d} samples'.format(step_method.__name__, n_steps)) else: tqdm.write('Checking {} has same trace as on master'.format(step_method.__name__)) with Model(): Normal('x', mu=0, sd=1) trace = sample(n_steps, step=step_method(), random_seed=1) if not benchmarking: assert_array_almost_equal(trace.get_values('x'), self.master_samples[step_method])
def check_trace(self, step_method): """Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods ``` on multiple commits. """ n_steps = 100 with Model(): x = Normal('x', mu=0, sd=1) if step_method.__name__ == 'SMC': Deterministic('like', - 0.5 * tt.log(2 * np.pi) - 0.5 * x.T.dot(x)) trace = smc.ATMIP_sample(n_steps=n_steps, step=step_method(random_seed=1), n_jobs=1, progressbar=False, stage='0', homepath=self.temp_dir) else: trace = sample(n_steps, step=step_method(), random_seed=1) print(repr(trace.get_values('x'))) assert_array_almost_equal( trace.get_values('x'), self.master_samples[step_method], decimal=select_by_precision(float64=6, float32=4))
def test_step_continuous(self): start, model, (mu, C) = mv_simple() unc = np.diag(C)**0.5 check = (("x", np.mean, mu, unc / 10.0), ("x", np.std, unc, unc / 10.0)) with model: steps = ( Slice(), HamiltonianMC(scaling=C, is_cov=True, blocked=False), NUTS(scaling=C, is_cov=True, blocked=False), Metropolis(S=C, proposal_dist=MultivariateNormalProposal, blocked=True), Slice(blocked=True), HamiltonianMC(scaling=C, is_cov=True), NUTS(scaling=C, is_cov=True), CompoundStep([ HamiltonianMC(scaling=C, is_cov=True), HamiltonianMC(scaling=C, is_cov=True, blocked=False), ]), ) for step in steps: trace = sample( 0, tune=8000, chains=1, discard_tuned_samples=False, step=step, start=start, model=model, #random_seed=1, cores=1) self.check_stat(check, trace, step.__class__.__name__)
def test_linalg(self): with Model(): a = Normal('a', shape=2) a = tt.switch(a > 0, np.inf, a) b = tt.slinalg.solve(floatX(np.eye(2)), a) Normal('c', mu=b, shape=2) with pytest.warns(None) as warns: trace = sample(20, init=None, tune=5) warns = [str(warn.message) for warn in warns] assert np.any(trace['diverging']) assert any('diverging samples after tuning' in warn for warn in warns) # FIXME This test fails sporadically on py27. # It seems that capturing warnings doesn't work as expected. # assert any('contains only' in warn for warn in warns) with pytest.raises(SamplingError): sample(20, init=None, nuts_kwargs={'on_error': 'raise'})
def test_linalg(self): with Model(): a = Normal('a', shape=2) a = tt.switch(a > 0, np.inf, a) b = tt.slinalg.solve(floatX(np.eye(2)), a) Normal('c', mu=b, shape=2) with pytest.warns(None) as warns: trace = sample(20, init=None, tune=5) warns = [str(warn.message) for warn in warns] print(warns) assert np.any(trace['diverging']) assert any('diverging samples after tuning' in warn for warn in warns) # FIXME This test fails sporadically on py27. # It seems that capturing warnings doesn't work as expected. # assert any('contains only' in warn for warn in warns) with pytest.raises(SamplingError): sample(20, init=None, nuts_kwargs={'on_error': 'raise'})
def test_step_elliptical_slice(self): start, model, (K, L, mu, std, noise) = mv_prior_simple() unc = noise ** 0.5 check = (("x", np.mean, mu, unc / 10.0), ("x", np.std, std, unc / 10.0)) with model: steps = (EllipticalSlice(prior_cov=K), EllipticalSlice(prior_chol=L)) for step in steps: trace = sample( 5000, tune=0, step=step, start=start, model=model, random_seed=1, chains=1 ) self.check_stat(check, trace, step.__class__.__name__)
def test_custom_proposal_dist(self): with Model() as pmodel: D = 3 Normal('n', 0, 2, shape=(D, )) trace = sample(tune=100, draws=50, step=DEMetropolisZ(proposal_dist=NormalProposal), cores=1, chains=3, discard_tuned_samples=False) pass
def test_step_discrete(self): start, model, (mu, C) = mv_simple_discrete() unc = np.diag(C) ** .5 check = (('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)) with model: steps = ( Metropolis(S=C, proposal_dist=MultivariateNormalProposal), ) for step in steps: trace = sample(20000, step=step, start=start, model=model, random_seed=1) yield self.check_stat, check, trace, step.__class__.__name__
def test_step_categorical(self): start, model, (mu, C) = simple_categorical() unc = C ** 0.5 check = (("x", np.mean, mu, unc / 10.0), ("x", np.std, unc, unc / 10.0)) with model: steps = ( CategoricalGibbsMetropolis(model.x, proposal="uniform"), CategoricalGibbsMetropolis(model.x, proposal="proportional"), ) for step in steps: trace = sample(8000, tune=0, step=step, start=start, model=model, random_seed=1) self.check_stat(check, trace, step.__class__.__name__)
def test_step_discrete(self): start, model, (mu, C) = mv_simple_discrete() unc = np.diag(C) ** .5 check = (('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)) with model: steps = ( Metropolis(S=C, proposal_dist=MultivariateNormalProposal), ) for step in steps: trace = sample(20000, step=step, start=start, model=model, random_seed=1) self.check_stat(check, trace)
def test_parallelized_chains_are_random(self): with Model() as model: x = Normal("x", 0, 1) for stepper in TestPopulationSamplers.steppers: step = stepper() trace = sample(chains=4, cores=4, draws=20, tune=0, step=DEMetropolis()) samples = np.array(trace.get_values("x", combine=False))[:, 5] assert len(set(samples)) == 4, "Parallelized {} " "chains are identical.".format( stepper ) pass
def test_step_elliptical_slice(self): start, model, (K, mu, noise) = mv_prior_simple() unc = noise ** 0.5 check = (('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)) with model: steps = ( EllipticalSlice(prior_cov=K), ) for step in steps: trace = sample(8000, step=step, start=start, model=model, random_seed=1) yield self.check_stat, check, trace, step.__class__.__name__
def check_trace(self, step_method): """Tests whether the trace for step methods is exactly the same as on master. Code changes that effect how random numbers are drawn may change this, and require `master_samples` to be updated, but such changes should be noted and justified in the commit. This method may also be used to benchmark step methods across commits, by running, for example ``` BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods ``` on multiple commits. """ n_steps = 100 with Model() as model: x = Normal('x', mu=0, sd=1) if step_method.__name__ == 'SMC': trace = smc.sample_smc(n_steps=n_steps, n_chains=2, start=[{'x':1.}, {'x':-1.}], random_seed=1, n_jobs=1, progressbar=False, homepath=self.temp_dir) elif step_method.__name__ == 'NUTS': step = step_method(scaling=model.test_point) trace = sample(0, tune=n_steps, discard_tuned_samples=False, step=step, random_seed=1, chains=1) else: trace = sample(0, tune=n_steps, discard_tuned_samples=False, step=step_method(), random_seed=1, chains=1) assert_array_almost_equal( trace.get_values('x'), self.master_samples[step_method], decimal=select_by_precision(float64=6, float32=4))
def test_interval_missing_observations(): with Model() as model: obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1) obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1) rng = aesara.shared(np.random.RandomState(2323), borrow=True) with pytest.warns(ImputationWarning): theta1 = Uniform("theta1", 0, 5, observed=obs1, rng=rng) with pytest.warns(ImputationWarning): theta2 = Normal("theta2", mu=theta1, observed=obs2, rng=rng) assert "theta1_observed_interval__" in model.named_vars assert "theta1_missing_interval__" in model.named_vars assert isinstance( model.rvs_to_values[model.named_vars["theta1_observed"]].tag.transform, Interval ) prior_trace = sample_prior_predictive() # Make sure the observed + missing combined deterministics have the # same shape as the original observations vectors assert prior_trace["theta1"].shape[-1] == obs1.shape[0] assert prior_trace["theta2"].shape[-1] == obs2.shape[0] # Make sure that the observed values are newly generated samples assert np.all(np.var(prior_trace["theta1_observed"], 0) > 0.0) assert np.all(np.var(prior_trace["theta2_observed"], 0) > 0.0) # Make sure the missing parts of the combined deterministic matches the # sampled missing and observed variable values assert np.mean(prior_trace["theta1"][:, obs1.mask] - prior_trace["theta1_missing"]) == 0.0 assert np.mean(prior_trace["theta1"][:, ~obs1.mask] - prior_trace["theta1_observed"]) == 0.0 assert np.mean(prior_trace["theta2"][:, obs2.mask] - prior_trace["theta2_missing"]) == 0.0 assert np.mean(prior_trace["theta2"][:, ~obs2.mask] - prior_trace["theta2_observed"]) == 0.0 assert {"theta1", "theta2"} <= set(prior_trace.keys()) trace = sample(chains=1, draws=50, compute_convergence_checks=False) assert np.all(0 < trace["theta1_missing"].mean(0)) assert np.all(0 < trace["theta2_missing"].mean(0)) assert "theta1" not in trace.varnames assert "theta2" not in trace.varnames # Make sure that the observed values are newly generated samples and that # the observed and deterministic matche pp_trace = sample_posterior_predictive(trace) assert np.all(np.var(pp_trace["theta1"], 0) > 0.0) assert np.all(np.var(pp_trace["theta2"], 0) > 0.0) assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0 assert np.mean(pp_trace["theta2"][:, ~obs2.mask] - pp_trace["theta2_observed"]) == 0.0
def test_step_categorical(self): start, model, (mu, C) = simple_categorical() unc = C ** .5 check = (('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)) with model: steps = ( CategoricalGibbsMetropolis(model.x, proposal='uniform'), CategoricalGibbsMetropolis(model.x, proposal='proportional'), ) for step in steps: trace = sample(8000, step=step, start=start, model=model, random_seed=1) yield self.check_stat, check, trace, step.__class__.__name__
def test_parallelized_chains_are_random(self): with Model() as model: x = Normal('x', 0, 1) for stepper in TestPopulationSamplers.steppers: step = stepper() trace = sample(chains=4, draws=20, tune=0, step=DEMetropolis(), parallelize=True) samples = np.array(trace.get_values('x', combine=False))[:,5] assert len(set(samples)) == 4, 'Parallelized {} ' \ 'chains are identical.'.format(stepper) pass
def test_step_elliptical_slice(self): start, model, (K, L, mu, std, noise) = mv_prior_simple() unc = noise ** 0.5 check = (('x', np.mean, mu, unc / 10.), ('x', np.std, std, unc / 10.)) with model: steps = ( EllipticalSlice(prior_cov=K), EllipticalSlice(prior_chol=L), ) for step in steps: trace = sample(5000, step=step, start=start, model=model, random_seed=1) yield self.check_stat, check, trace, step.__class__.__name__
def test_step_categorical(self): start, model, (mu, C) = simple_categorical() unc = C ** 0.5 check = (("x", np.mean, mu, unc / 10.0), ("x", np.std, unc, unc / 10.0)) with model: steps = ( CategoricalGibbsMetropolis(model.x, proposal="uniform"), CategoricalGibbsMetropolis(model.x, proposal="proportional"), ) for step in steps: trace = sample( 8000, tune=0, step=step, start=start, model=model, random_seed=1 ) self.check_stat(check, trace, step.__class__.__name__)
def test_step_discrete(self): if theano.config.floatX == "float32": return # Cannot use @skip because it only skips one iteration of the yield start, model, (mu, C) = mv_simple_discrete() unc = np.diag(C) ** .5 check = (('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)) with model: steps = ( Metropolis(S=C, proposal_dist=MultivariateNormalProposal), ) for step in steps: trace = sample(20000, step=step, start=start, model=model, random_seed=1) yield self.check_stat, check, trace, step.__class__.__name__
def test_step_discrete(): start, model, (mu, C) = mv_simple_discrete() with model: mh = Metropolis(S=C, proposal_dist=MultivariateNormalProposal) slicer = Slice() steps = [mh] unc = np.diag(C) ** .5 check = [('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)] for st in steps: h = sample(20000, st, start, model=model, random_seed=1) for (var, stat, val, bound) in check: yield check_stat, repr(st), h, var, stat, val, bound
def test_linalg(self, caplog): with Model(): a = Normal("a", shape=2) a = tt.switch(a > 0, np.inf, a) b = tt.slinalg.solve(floatX(np.eye(2)), a) Normal("c", mu=b, shape=2) caplog.clear() trace = sample(20, init=None, tune=5, chains=2) warns = [msg.msg for msg in caplog.records] assert np.any(trace["diverging"]) assert ( any("divergence after tuning" in warn for warn in warns) or any("divergences after tuning" in warn for warn in warns) or any("only diverging samples" in warn for warn in warns) ) with pytest.raises(ValueError) as error: trace.report.raise_ok() error.match("issues during sampling") assert not trace.report.ok
def test_step_continuous(self): start, model, (mu, C) = mv_simple() unc = np.diag(C) ** .5 check = (('x', np.mean, mu, unc / 10.), ('x', np.std, unc, unc / 10.)) with model: steps = ( Slice(), HamiltonianMC(scaling=C, is_cov=True, blocked=False), NUTS(scaling=C, is_cov=True, blocked=False), Metropolis(S=C, proposal_dist=MultivariateNormalProposal, blocked=True), Slice(blocked=True), HamiltonianMC(scaling=C, is_cov=True), NUTS(scaling=C, is_cov=True), CompoundStep([ HamiltonianMC(scaling=C, is_cov=True), HamiltonianMC(scaling=C, is_cov=True, blocked=False)]), ) for step in steps: trace = sample(8000, step=step, start=start, model=model, random_seed=1) yield self.check_stat, check, trace
def test_posterior_estimate(self): alpha_true, sigma_true = 1., 0.5 beta_true = 1. size = 1000 X = np.random.randn(size) Y = alpha_true + beta_true * X + np.random.randn(size) * sigma_true decimal = 1 with Model() as model: alpha = Normal('alpha', mu=0, sd=100, testval=alpha_true) beta = Normal('beta', mu=0, sd=100, testval=beta_true) sigma = InverseGamma('sigma', 10., testval=sigma_true) mu = alpha + beta * X Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y) for step_method in (NUTS, Slice, Metropolis): trace = sample(100000, step=step_method(), progressbar=False) trace_ = trace[-300::5] # We do the same for beta - using more burnin. np.testing.assert_almost_equal(np.mean(trace_.alpha), alpha_true, decimal=decimal) np.testing.assert_almost_equal(np.mean(trace_.beta), beta_true, decimal=decimal) np.testing.assert_almost_equal(np.mean(trace_.sigma), sigma_true, decimal=decimal) # Make sure posteriors are normal _, p_alpha = stats.normaltest(trace_.alpha) _, p_beta = stats.normaltest(trace_.beta) # p-values should be > .05 to indiciate np.testing.assert_array_less(0.05, p_alpha, verbose=True) np.testing.assert_array_less(0.05, p_beta, verbose=True)
def test_bad_init(self): with Model(): HalfNormal('a', sd=1, testval=-1, transform=None) with pytest.raises(ValueError) as error: sample(init=None) error.match('Bad initial')
def test_bad_init_parallel(self): with Model(): HalfNormal("a", sigma=1, testval=-1, transform=None) with pytest.raises(ParallelSamplingError) as error: sample(init=None, cores=2, random_seed=1) error.match("Bad initial")