def test_density_scaling_with_generator(self): # We have different size generators def true_dens(): g = gen1() for i, point in enumerate(g): yield stats.norm.logpdf(point).sum() * 10 t = true_dens() # We have same size models with pm.Model() as model1: Normal("n", observed=gen1(), total_size=100) p1 = aesara.function([], model1.logp()) with pm.Model() as model2: gen_var = generator(gen2()) Normal("n", observed=gen_var, total_size=100) p2 = aesara.function([], model2.logp()) for i in range(10): _1, _2, _t = p1(), p2(), next(t) decimals = select_by_precision(float64=7, float32=1) np.testing.assert_almost_equal( _1, _t, decimal=decimals) # Value O(-50,000) np.testing.assert_almost_equal(_1, _2)
def test_joint_logp_subtensor(): """Make sure we can compute a log-likelihood for ``Y[I]`` where ``Y`` and ``I`` are random variables.""" size = 5 mu_base = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size) mu = np.stack([mu_base, -mu_base]) sigma = 0.001 rng = aesara.shared(np.random.RandomState(232), borrow=True) A_rv = Normal.dist(mu, sigma, rng=rng) A_rv.name = "A" p = 0.5 I_rv = Bernoulli.dist(p, size=size, rng=rng) I_rv.name = "I" A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1]:]] assert isinstance(A_idx.owner.op, (Subtensor, AdvancedSubtensor, AdvancedSubtensor1)) A_idx_value_var = A_idx.type() A_idx_value_var.name = "A_idx_value" I_value_var = I_rv.type() I_value_var.name = "I_value" A_idx_logps = joint_logp(A_idx, { A_idx: A_idx_value_var, I_rv: I_value_var }, sum=False) A_idx_logp = at.add(*A_idx_logps) logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp) # The compiled graph should not contain any `RandomVariables` assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0]) decimals = select_by_precision(float64=6, float32=4) for i in range(10): bern_sp = sp.bernoulli(p) I_value = bern_sp.rvs(size=size).astype(I_rv.dtype) norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1]:]], sigma) A_idx_value = norm_sp.rvs().astype(A_idx.dtype) exp_obs_logps = norm_sp.logpdf(A_idx_value) exp_obs_logps += bern_sp.logpmf(I_value) logp_vals = logp_vals_fn(A_idx_value, I_value) np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals)
def test_GARCH11(): # test data ~ N(0, 1) data = np.array([ -1.35078362, -0.81254164, 0.28918551, -2.87043544, -0.94353337, 0.83660719, -0.23336562, -0.58586298, -1.36856736, -1.60832975, -1.31403141, 0.05446936, -0.97213128, -0.18928725, 1.62011258, -0.95978616, -2.06536047, 0.6556103, -0.27816645, -1.26413397, ]) omega = 0.6 alpha_1 = 0.4 beta_1 = 0.5 initial_vol = np.float64(0.9) vol = np.empty_like(data) vol[0] = initial_vol for i in range(len(data) - 1): vol[i + 1] = np.sqrt(omega + beta_1 * vol[i]**2 + alpha_1 * data[i]**2) with Model() as t: y = GARCH11( "y", omega=omega, alpha_1=alpha_1, beta_1=beta_1, initial_vol=initial_vol, shape=data.shape, ) z = Normal("z", mu=0, sigma=vol, shape=data.shape) garch_like = t["y"].logp({"z": data, "y": data}) reg_like = t["z"].logp({"z": data, "y": data}) decimal = select_by_precision(float64=7, float32=4) np.testing.assert_allclose(garch_like, reg_like, 10**(-decimal))
def test_accuracy_non_normal(): _, model, (mu, _) = non_normal(4) with model: newstart = find_MAP(Point(x=[0.5, 0.01, 0.95, 0.99])) close_to(newstart["x"], mu, select_by_precision(float64=1e-5, float32=1e-4))
def test_accuracy_normal(): _, model, (mu, _) = simple_model() with model: newstart = find_MAP(Point(x=[-10.5, 100.5])) close_to(newstart["x"], [mu, mu], select_by_precision(float64=1e-5, float32=1e-4))