def test_logpt_basic(): """Make sure we can compute a log-likelihood for a hierarchical model with transforms.""" with Model() as m: a = Uniform("a", 0.0, 1.0) c = Normal("c") b_l = c * a + 2.0 b = Uniform("b", b_l, b_l + 1.0) a_value_var = m.rvs_to_values[a] assert a_value_var.tag.transform b_value_var = m.rvs_to_values[b] assert b_value_var.tag.transform c_value_var = m.rvs_to_values[c] b_logp = logpt(b, b_value_var, sum=False) res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True)) res_rv_ancestors = [ v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable) ] # There shouldn't be any `RandomVariable`s in the resulting graph assert len(res_rv_ancestors) == 0 assert b_value_var in res_ancestors assert c_value_var in res_ancestors assert a_value_var in res_ancestors
def test_model_unchanged_logprob_access(): # Issue #5007 with Model() as model: a = Normal("a") c = Uniform("c", lower=a - 1, upper=1) original_inputs = set(aesara.graph.graph_inputs([c])) # Extract model.logpt model.logpt new_inputs = set(aesara.graph.graph_inputs([c])) assert original_inputs == new_inputs
def test_interval_missing_observations(): with Model() as model: obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1) obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1) rng = aesara.shared(np.random.RandomState(2323), borrow=True) with pytest.warns(ImputationWarning): theta1 = Uniform("theta1", 0, 5, observed=obs1, rng=rng) with pytest.warns(ImputationWarning): theta2 = Normal("theta2", mu=theta1, observed=obs2, rng=rng) assert "theta1_observed_interval__" in model.named_vars assert "theta1_missing_interval__" in model.named_vars assert isinstance( model.rvs_to_values[model.named_vars["theta1_observed"]].tag.transform, interval ) prior_trace = sample_prior_predictive(return_inferencedata=False) # Make sure the observed + missing combined deterministics have the # same shape as the original observations vectors assert prior_trace["theta1"].shape[-1] == obs1.shape[0] assert prior_trace["theta2"].shape[-1] == obs2.shape[0] # Make sure that the observed values are newly generated samples assert np.all(np.var(prior_trace["theta1_observed"], 0) > 0.0) assert np.all(np.var(prior_trace["theta2_observed"], 0) > 0.0) # Make sure the missing parts of the combined deterministic matches the # sampled missing and observed variable values assert np.mean(prior_trace["theta1"][:, obs1.mask] - prior_trace["theta1_missing"]) == 0.0 assert np.mean(prior_trace["theta1"][:, ~obs1.mask] - prior_trace["theta1_observed"]) == 0.0 assert np.mean(prior_trace["theta2"][:, obs2.mask] - prior_trace["theta2_missing"]) == 0.0 assert np.mean(prior_trace["theta2"][:, ~obs2.mask] - prior_trace["theta2_observed"]) == 0.0 assert {"theta1", "theta2"} <= set(prior_trace.keys()) trace = sample( chains=1, draws=50, compute_convergence_checks=False, return_inferencedata=False ) assert np.all(0 < trace["theta1_missing"].mean(0)) assert np.all(0 < trace["theta2_missing"].mean(0)) assert "theta1" not in trace.varnames assert "theta2" not in trace.varnames # Make sure that the observed values are newly generated samples and that # the observed and deterministic matche pp_trace = sample_posterior_predictive(trace, return_inferencedata=False) assert np.all(np.var(pp_trace["theta1"], 0) > 0.0) assert np.all(np.var(pp_trace["theta2"], 0) > 0.0) assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0 assert np.mean(pp_trace["theta2"][:, ~obs2.mask] - pp_trace["theta2_observed"]) == 0.0
def test_get_scaling(): assert _get_scaling(None, (2, 3), 2).eval() == 1 # ndim >=1 & ndim<1 assert _get_scaling(45, (2, 3), 1).eval() == 22.5 assert _get_scaling(45, (2, 3), 0).eval() == 45 # list or tuple tests # total_size contains other than Ellipsis, None and Int with pytest.raises(TypeError, match="Unrecognized `total_size` type"): _get_scaling([2, 4, 5, 9, 11.5], (2, 3), 2) # check with Ellipsis with pytest.raises(ValueError, match="Double Ellipsis in `total_size` is restricted"): _get_scaling([1, 2, 5, Ellipsis, Ellipsis], (2, 3), 2) with pytest.raises( ValueError, match= "Length of `total_size` is too big, number of scalings is bigger that ndim", ): _get_scaling([1, 2, 5, Ellipsis], (2, 3), 2) assert _get_scaling([Ellipsis], (2, 3), 2).eval() == 1 assert _get_scaling([4, 5, 9, Ellipsis, 32, 12], (2, 3, 2), 5).eval() == 960 assert _get_scaling([4, 5, 9, Ellipsis], (2, 3, 2), 5).eval() == 15 # total_size with no Ellipsis (end = [ ]) with pytest.raises( ValueError, match= "Length of `total_size` is too big, number of scalings is bigger that ndim", ): _get_scaling([1, 2, 5], (2, 3), 2) assert _get_scaling([], (2, 3), 2).eval() == 1 assert _get_scaling((), (2, 3), 2).eval() == 1 # total_size invalid type with pytest.raises( TypeError, match= "Unrecognized `total_size` type, expected int or list of ints, got {1, 2, 5}", ): _get_scaling({1, 2, 5}, (2, 3), 2) # test with rvar from model graph with Model() as m2: rv_var = Uniform("a", 0.0, 1.0) total_size = [] assert _get_scaling(total_size, shape=rv_var.shape, ndim=rv_var.ndim).eval() == 1.0