def setUpClass(cls): cls.test_point, cls.model, _ = models.non_normal(cls.shape) with cls.model: trace0 = cls.backend(cls.name) trace1 = cls.backend(cls.name) cls.draws = 5 trace0.setup(cls.draws, chain=0) trace1.setup(cls.draws, chain=1) varnames = list(cls.test_point.keys()) shapes = {varname: value.shape for varname, value in cls.test_point.items()} cls.expected = {0: {}, 1: {}} for varname in varnames: mcmc_shape = (cls.draws,) + shapes[varname] values = np.arange(cls.draws * np.prod(shapes[varname])) cls.expected[0][varname] = values.reshape(mcmc_shape) cls.expected[1][varname] = values.reshape(mcmc_shape) * 100 for idx in range(cls.draws): point0 = {varname: cls.expected[0][varname][idx, ...] for varname in varnames} point1 = {varname: cls.expected[1][varname][idx, ...] for varname in varnames} trace0.record(point=point0) trace1.record(point=point1) trace0.close() trace1.close() cls.mtrace = base.MultiTrace([trace0, trace1])
def test_leapfrog_reversible(): n = 3 np.random.seed(42) start, model, _ = models.non_normal(n) size = sum(start[n.name].size for n in model.value_vars) scaling = floatX(np.random.rand(size)) class HMC(BaseHMC): def _hamiltonian_step(self, *args, **kwargs): pass step = HMC(vars=model.value_vars, model=model, scaling=scaling) step.integrator._logp_dlogp_func.set_extra_values({}) astart = DictToArrayBijection.map(start) p = RaveledVars(floatX(step.potential.random()), astart.point_map_info) q = RaveledVars(floatX(np.random.randn(size)), astart.point_map_info) start = step.integrator.compute_state(p, q) for epsilon in [0.01, 0.1]: for n_steps in [1, 2, 3, 4, 20]: state = start for _ in range(n_steps): state = step.integrator.step(epsilon, state) for _ in range(n_steps): state = step.integrator.step(-epsilon, state) npt.assert_allclose(state.q.data, start.q.data, rtol=1e-5) npt.assert_allclose(state.p.data, start.p.data, rtol=1e-5)
def setUpClass(cls): cls.test_point, cls.model, _ = models.non_normal(cls.shape) with cls.model: trace0 = cls.backend(cls.name) trace1 = cls.backend(cls.name) cls.draws = 5 trace0.setup(cls.draws, chain=0) trace1.setup(cls.draws, chain=1) varnames = list(cls.test_point.keys()) shapes = { varname: value.shape for varname, value in cls.test_point.items() } cls.expected = {0: {}, 1: {}} for varname in varnames: mcmc_shape = (cls.draws, ) + shapes[varname] values = np.arange(cls.draws * np.prod(shapes[varname])) cls.expected[0][varname] = values.reshape(mcmc_shape) cls.expected[1][varname] = values.reshape(mcmc_shape) * 100 for idx in range(cls.draws): point0 = { varname: cls.expected[0][varname][idx, ...] for varname in varnames } point1 = { varname: cls.expected[1][varname][idx, ...] for varname in varnames } trace0.record(point=point0) trace1.record(point=point1) trace0.close() trace1.close() cls.mtrace = base.MultiTrace([trace0, trace1])
def test_accuracy_non_normal(): _, model, (mu, _) = non_normal(4) with model: newstart = find_MAP(Point(x=[0.5, 0.01, 0.95, 0.99])) close_to(newstart["x"], mu, select_by_precision(float64=1e-5, float32=1e-4))
def setUp(self): self.test_point, self.model, _ = models.non_normal(self.shape) with self.model: self.trace = self.backend(self.name) self.draws, self.chain = 3, 0 self.trace.setup(self.draws, self.chain)
def test_guess_scaling(): start, model, _ = models.non_normal(n=5) a1 = scaling.guess_scaling(start, model=model) assert all((a1 > 0) & (a1 < 1e200))