def test_rvs_to_value_vars(): with pm.Model() as m: a = pm.Uniform("a", 0.0, 1.0) b = pm.Uniform("b", 0, a + 1.0) c = pm.Normal("c") d = at.log(c + b) + 2.0 a_value_var = m.rvs_to_values[a] assert a_value_var.tag.transform b_value_var = m.rvs_to_values[b] c_value_var = m.rvs_to_values[c] (res, ), replaced = rvs_to_value_vars((d, )) assert res.owner.op == at.add log_output = res.owner.inputs[0] assert log_output.owner.op == at.log log_add_output = res.owner.inputs[0].owner.inputs[0] assert log_add_output.owner.op == at.add c_output = log_add_output.owner.inputs[0] # We make sure that the random variables were replaced # with their value variables assert c_output == c_value_var b_output = log_add_output.owner.inputs[1] assert b_output == b_value_var res_ancestors = list(walk_model((res, ), walk_past_rvs=True)) res_rv_ancestors = [ v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable) ] # There shouldn't be any `RandomVariable`s in the resulting graph assert len(res_rv_ancestors) == 0 assert b_value_var in res_ancestors assert c_value_var in res_ancestors assert a_value_var not in res_ancestors (res, ), replaced = rvs_to_value_vars((d, ), apply_transforms=True) res_ancestors = list(walk_model((res, ), walk_past_rvs=True)) res_rv_ancestors = [ v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable) ] assert len(res_rv_ancestors) == 0 assert a_value_var in res_ancestors assert b_value_var in res_ancestors assert c_value_var in res_ancestors
def __init__(self, vars, proposal="uniform", order="random", model=None): model = pm.modelcontext(model) vars = [model.rvs_to_values.get(var, var) for var in vars] vars = pm.inputvars(vars) initial_point = model.initial_point() dimcats = [] # The above variable is a list of pairs (aggregate dimension, number # of categories). For example, if vars = [x, y] with x being a 2-D # variable with M categories and y being a 3-D variable with N # categories, we will have dimcats = [(0, M), (1, M), (2, N), (3, N), (4, N)]. for v in vars: v_init_val = initial_point[v.name] rv_var = model.values_to_rvs[v] distr = getattr(rv_var.owner, "op", None) if isinstance(distr, CategoricalRV): k_graph = rv_var.owner.inputs[3].shape[-1] (k_graph, ), _ = rvs_to_value_vars((k_graph, ), apply_transforms=True) k = model.compile_fn(k_graph, inputs=model.value_vars, on_unused_input="ignore")(initial_point) elif isinstance(distr, BernoulliRV): k = 2 else: raise ValueError( "All variables must be categorical or binary" + "for CategoricalGibbsMetropolis") start = len(dimcats) dimcats += [(dim, k) for dim in range(start, start + v_init_val.size)] if order == "random": self.shuffle_dims = True self.dimcats = dimcats else: if sorted(order) != list(range(len(dimcats))): raise ValueError("Argument 'order' has to be a permutation") self.shuffle_dims = False self.dimcats = [dimcats[j] for j in order] if proposal == "uniform": self.astep = self.astep_unif elif proposal == "proportional": # Use the optimized "Metropolized Gibbs Sampler" described in Liu96. self.astep = self.astep_prop else: raise ValueError( "Argument 'proposal' should either be 'uniform' or 'proportional'" ) super().__init__(vars, [model.compile_logp()])
def test_rvs_to_value_vars_nested(): # Test that calling rvs_to_value_vars in models with nested transformations # does not change the original rvs in place. See issue #5172 with pm.Model() as m: one = pm.LogNormal("one", mu=0) two = pm.LogNormal("two", mu=at.log(one)) # We add potentials or deterministics that are not in topological order pm.Potential("two_pot", two) pm.Potential("one_pot", one) before = aesara.clone_replace(m.free_RVs) # This call would change the model free_RVs in place in #5172 res, _ = rvs_to_value_vars(m.potentials, apply_transforms=True) after = aesara.clone_replace(m.free_RVs) assert equal_computations(before, after)
def logcdfpt( var: TensorVariable, rv_values: Optional[Union[TensorVariable, Dict[TensorVariable, TensorVariable]]] = None, *, scaling: bool = True, sum: bool = True, **kwargs, ) -> TensorVariable: """Create a measure-space (i.e. log-cdf) graph for a random variable at a given point. Parameters ========== var The `RandomVariable` output that determines the log-likelihood graph. rv_values A variable, or ``dict`` of variables, that represents the value of `var` in its log-likelihood. If no `rv_value` is provided, ``var.tag.value_var`` will be checked and, when available, used. jacobian Whether or not to include the Jacobian term. scaling A scaling term to apply to the generated log-likelihood graph. transformed Apply transforms. sum Sum the log-likelihood. """ if not isinstance(rv_values, Mapping): rv_values = {var: rv_values} if rv_values is not None else {} rv_var, rv_value_var = extract_rv_and_value_vars(var) rv_value = rv_values.get(rv_var, rv_value_var) if rv_var is not None and rv_value is None: raise ValueError(f"No value variable specified or associated with {rv_var}") if rv_value is not None: rv_value = at.as_tensor(rv_value) if rv_var is not None: # Make sure that the value is compatible with the random variable rv_value = rv_var.type.filter_variable(rv_value.astype(rv_var.dtype)) if rv_value_var is None: rv_value_var = rv_value rv_node = rv_var.owner rng, size, dtype, *dist_params = rv_node.inputs # Here, we plug the actual random variable into the log-likelihood graph, # because we want a log-likelihood graph that only contains # random variables. This is important, because a random variable's # parameters can contain random variables themselves. # Ultimately, with a graph containing only random variables and # "deterministics", we can simply replace all the random variables with # their value variables and be done. tmp_rv_values = rv_values.copy() tmp_rv_values[rv_var] = rv_var logp_var = _logcdf(rv_node.op, rv_var, tmp_rv_values, *dist_params, **kwargs) transform = getattr(rv_value_var.tag, "transform", None) if rv_value_var else None # Replace random variables with their value variables replacements = rv_values.copy() replacements.update({rv_var: rv_value, rv_value_var: rv_value}) (logp_var,), _ = rvs_to_value_vars( (logp_var,), apply_transforms=False, initial_replacements=replacements, ) if sum: logp_var = at.sum(logp_var) if scaling: logp_var *= _get_scaling( getattr(rv_var.tag, "total_size", None), rv_value.shape, rv_value.ndim ) # Recompute test values for the changes introduced by the replacements # above. if config.compute_test_value != "off": for node in io_toposort(graph_inputs((logp_var,)), (logp_var,)): compute_test_value(node) if rv_var.name is not None: logp_var.name = f"__logp_{rv_var.name}" return logp_var