def test_random_seed(self): seedx = aesara.shared(np.random.default_rng(1)) seedy = aesara.shared(np.random.default_rng(1)) x = at.random.normal(rng=seedx) y = at.random.normal(rng=seedy) # Shared variables are the same, so outputs will be identical f0 = aesara.function([], [x, y]) x0_eval, y0_eval = f0() assert x0_eval == y0_eval # The variables will be reseeded with new seeds by default f1 = compile_pymc([], [x, y]) x1_eval, y1_eval = f1() assert x1_eval != y1_eval # Check that seeding works f2 = compile_pymc([], [x, y], random_seed=1) x2_eval, y2_eval = f2() assert x2_eval != x1_eval assert y2_eval != y1_eval f3 = compile_pymc([], [x, y], random_seed=1) x3_eval, y3_eval = f3() assert x3_eval == x2_eval assert y3_eval == y2_eval
def _logp_forw(point, out_vars, in_vars, shared): """Compile Aesara function of the model and the input and output variables. Parameters ---------- out_vars: List containing :class:`pymc.Distribution` for the output variables in_vars: List containing :class:`pymc.Distribution` for the input variables shared: List containing :class:`aesara.tensor.Tensor` for depended shared data """ # Replace integer inputs with rounded float inputs if any(var.dtype in discrete_types for var in in_vars): replace_int_input = {} new_in_vars = [] for in_var in in_vars: if in_var.dtype in discrete_types: float_var = at.TensorType("floatX", in_var.broadcastable)(in_var.name) new_in_vars.append(float_var) replace_int_input[in_var] = at.round(float_var) else: new_in_vars.append(in_var) out_vars = clone_replace(out_vars, replace_int_input, strict=False) in_vars = new_in_vars out_list, inarray0 = join_nonshared_inputs(point, out_vars, in_vars, shared) f = compile_pymc([inarray0], out_list[0]) f.trust_input = True return f
def test_layers(self): with pm.Model(rng_seeder=232093) as model: a = pm.Uniform("a", lower=0, upper=1, size=10) b = pm.Binomial("b", n=1, p=a, size=10) b_sampler = compile_pymc([], b, mode="FAST_RUN") avg = np.stack([b_sampler() for i in range(10000)]).mean(0) npt.assert_array_almost_equal(avg, 0.5 * np.ones((10,)), decimal=2)
def test_check_bounds_flag(self): """Test that CheckParameterValue Ops are replaced or removed when using compile_pymc""" logp = at.ones(3) cond = np.array([1, 0, 1]) bound = check_parameters(logp, cond) with pm.Model() as m: pass with pytest.raises(ParameterValueError): aesara.function([], bound)() m.check_bounds = False with m: assert np.all(compile_pymc([], bound)() == 1) m.check_bounds = True with m: assert np.all(compile_pymc([], bound)() == -np.inf)
def delta_logp(point, logp, vars, shared): [logp0], inarray0 = pm.join_nonshared_inputs(point, [logp], vars, shared) tensor_type = inarray0.type inarray1 = tensor_type("inarray1") logp1 = pm.CallableTensor(logp0)(inarray1) f = compile_pymc([inarray1, inarray0], logp1 - logp0) f.trust_input = True return f
def test_compile_pymc_custom_update_op(self): """Test that custom MeasurableVariable Op updates are used by compile_pymc""" class UnmeasurableOp(OpFromGraph): def update(self, node): return {node.inputs[0]: node.inputs[0] + 1} dummy_inputs = [at.scalar(), at.scalar()] dummy_outputs = [at.add(*dummy_inputs)] dummy_x = UnmeasurableOp(dummy_inputs, dummy_outputs)(aesara.shared(1.0), 1.0) # Check that there are no updates at first fn = compile_pymc(inputs=[], outputs=dummy_x) assert fn() == fn() == 2.0 # And they are enabled once the Op is registered as Measurable MeasurableVariable.register(UnmeasurableOp) fn = compile_pymc(inputs=[], outputs=dummy_x) assert fn() == 2.0 assert fn() == 3.0
def test_compile_pymc_sets_rng_updates(self): rng = aesara.shared(np.random.default_rng(0)) x = pm.Normal.dist(rng=rng) assert x.owner.inputs[0] is rng f = compile_pymc([], x) assert not np.isclose(f(), f()) # Check that update was not done inplace assert not hasattr(rng, "default_update") f = aesara.function([], x) assert f() == f()
def test_compile_pymc_missing_default_explicit_updates(self): rng = aesara.shared(np.random.default_rng(0)) x = pm.Normal.dist(rng=rng) # By default, compile_pymc should update the rng of x f = compile_pymc([], x) assert f() != f() # An explicit update should override the default_update, like aesara.function does # For testing purposes, we use an update that leaves the rng unchanged f = compile_pymc([], x, updates={rng: rng}) assert f() == f() # If we specify a custom default_update directly it should use that instead. rng.default_update = rng f = compile_pymc([], x) assert f() == f() # And again, it should be overridden by an explicit update f = compile_pymc([], x, updates={rng: x.owner.outputs[0]}) assert f() != f()
def replace_with_values(vars_needed, replacements=None, model=None): R""" Replace random variable nodes in the graph with values given by the replacements dict. Uses untransformed versions of the inputs, performs some basic input validation. Parameters ---------- vars_needed: list of TensorVariables A list of variable outputs replacements: dict with string keys, numeric values The variable name and values to be replaced in the model graph. model: Model A PyMC model object """ model = modelcontext(model) inputs, input_names = [], [] for rv in walk_model(vars_needed, walk_past_rvs=True): if rv in model.named_vars.values() and not isinstance( rv, SharedVariable): inputs.append(rv) input_names.append(rv.name) # Then it's deterministic, no inputs are required, can eval and return if len(inputs) == 0: return tuple(v.eval() for v in vars_needed) fn = compile_pymc( inputs, vars_needed, allow_input_downcast=True, accept_inplace=True, on_unused_input="ignore", ) # Remove unneeded inputs replacements = { name: val for name, val in replacements.items() if name in input_names } missing = set(input_names) - set(replacements.keys()) # Error if more inputs are needed if len(missing) > 0: missing_str = ", ".join(missing) raise ValueError( f"Values for {missing_str} must be included in `replacements`.") return fn(**replacements)
def test_compile_pymc_updates_inputs(self): """Test that compile_pymc does not include rngs updates of variables that are inputs or ancestors to inputs """ x = at.random.normal() y = at.random.normal(x) z = at.random.normal(y) for inputs, rvs_in_graph in ( ([], 3), ([x], 2), ([y], 1), ([z], 0), ([x, y], 1), ([x, y, z], 0), ): fn = compile_pymc(inputs, z, on_unused_input="ignore") fn_fgraph = fn.maker.fgraph # Each RV adds a shared input for its rng assert len(fn_fgraph.inputs) == len(inputs) + rvs_in_graph # If the output is an input, the graph has a DeepCopyOp assert len(fn_fgraph.apply_nodes) == max(rvs_in_graph, 1) # Each RV adds a shared output for its rng assert len(fn_fgraph.outputs) == 1 + rvs_in_graph
def make_initial_point_fn( *, model, overrides: Optional[StartDict] = None, jitter_rvs: Optional[Set[TensorVariable]] = None, default_strategy: str = "moment", return_transformed: bool = True, ) -> Callable: """Create seeded function that computes initial values for all free model variables. Parameters ---------- jitter_rvs : set The set (or list or tuple) of random variables for which a U(-1, +1) jitter should be added to the initial value. Only available for variables that have a transform or real-valued support. default_strategy : str Which of { "moment", "prior" } to prefer if the initval setting for an RV is None. overrides : dict Initial value (strategies) to use instead of what's specified in `Model.initial_values`. return_transformed : bool If `True` the returned variables will correspond to transformed initial values. """ def find_rng_nodes(variables): return [ node for node in graph_inputs(variables) if isinstance( node, ( at.random.var.RandomStateSharedVariable, at.random.var.RandomGeneratorSharedVariable, ), ) ] overrides = convert_str_to_rv_dict(model, overrides or {}) initial_values = make_initial_point_expression( free_rvs=model.free_RVs, rvs_to_values=model.rvs_to_values, initval_strategies={**model.initial_values, **(overrides or {})}, jitter_rvs=jitter_rvs, default_strategy=default_strategy, return_transformed=return_transformed, ) # Replace original rng shared variables so that we don't mess with them # when calling the final seeded function graph = FunctionGraph(outputs=initial_values, clone=False) rng_nodes = find_rng_nodes(graph.outputs) new_rng_nodes = [] for rng_node in rng_nodes: if isinstance(rng_node, at.random.var.RandomStateSharedVariable): new_rng = np.random.RandomState(np.random.PCG64()) else: new_rng = np.random.Generator(np.random.PCG64()) new_rng_nodes.append(aesara.shared(new_rng)) graph.replace_all(zip(rng_nodes, new_rng_nodes), import_missing=True) func = compile_pymc(inputs=[], outputs=graph.outputs, mode=aesara.compile.mode.FAST_COMPILE) varnames = [] for var in model.free_RVs: transform = getattr(model.rvs_to_values[var].tag, "transform", None) if transform is not None and return_transformed: name = get_transformed_name(var.name, transform) else: name = var.name varnames.append(name) def make_seeded_function(func): rngs = find_rng_nodes(func.maker.fgraph.outputs) @functools.wraps(func) def inner(seed, *args, **kwargs): seeds = [ np.random.PCG64(sub_seed) for sub_seed in np.random.SeedSequence(seed).spawn(len(rngs)) ] for rng, seed in zip(rngs, seeds): if isinstance(rng, at.random.var.RandomStateSharedVariable): new_rng = np.random.RandomState(seed) else: new_rng = np.random.Generator(seed) rng.set_value(new_rng, True) values = func(*args, **kwargs) return dict(zip(varnames, values)) return inner return make_seeded_function(func)
def test_compile_pymc_with_updates(self): x = aesara.shared(0) f = compile_pymc([], x, updates={x: x + 1}) assert f() == 0 assert f() == 1