Example #1
0
def create_test_hmm():
    srng = at.random.RandomStream()

    N_tt = at.iscalar("N")
    N_tt.tag.test_value = 10
    M_tt = at.iscalar("M")
    M_tt.tag.test_value = 2

    mus_tt = at.matrix("mus")
    mus_tt.tag.test_value = np.stack(
        [np.arange(0.0, 10), np.arange(0.0, -10, -1)],
        axis=-1).astype(aesara.config.floatX)

    sigmas_tt = at.ones((N_tt, ))
    sigmas_tt.name = "sigmas"

    pi_0_rv = srng.dirichlet(at.ones((M_tt, )), name="pi_0")
    Gamma_rv = srng.dirichlet(at.ones((M_tt, M_tt)), name="Gamma")

    S_0_rv = srng.categorical(pi_0_rv, name="S_0")

    def scan_fn(mus_t, sigma_t, S_tm1, Gamma_t):
        S_t = srng.categorical(Gamma_t[S_tm1], name="S_t")
        Y_t = srng.normal(mus_t[S_t], sigma_t, name="Y_t")
        return S_t, Y_t

    (S_rv, Y_rv), scan_updates = aesara.scan(
        fn=scan_fn,
        sequences=[mus_tt, sigmas_tt],
        non_sequences=[Gamma_rv],
        outputs_info=[{
            "initial": S_0_rv,
            "taps": [-1]
        }, {}],
        strict=True,
        name="scan_rv",
    )
    Y_rv.name = "Y_rv"

    scan_op = Y_rv.owner.op
    scan_args = ScanArgs.from_node(Y_rv.owner)

    Gamma_in = scan_args.inner_in_non_seqs[0]
    Y_t = scan_args.inner_out_nit_sot[0]
    mus_t = scan_args.inner_in_seqs[0]
    sigmas_t = scan_args.inner_in_seqs[1]
    S_t = scan_args.inner_out_sit_sot[0]
    rng_in = scan_args.inner_out_shared[0]

    mus_in = Y_rv.owner.inputs[1]
    mus_in.name = "mus_in"
    sigmas_in = Y_rv.owner.inputs[2]
    sigmas_in.name = "sigmas_in"

    # The output `S_rv` is really `S_rv[1:]`, so we have to extract the actual
    # `Scan` output: `S_rv`.
    S_in = S_rv.owner.inputs[0]
    S_in.name = "S_in"

    return locals()
Example #2
0
def test_Subtensor_lift_restrictions():
    rng = shared(np.random.RandomState(1233532), borrow=False)

    std = vector("std")
    std.tag.test_value = np.array([1e-5, 2e-5, 3e-5], dtype=config.floatX)
    x = normal(aet.arange(2), aet.ones(2), rng=rng)
    y = x[1]
    # The non-`Subtensor` client depends on the RNG state, so we can't perform
    # the lift
    z = x - y

    fg = FunctionGraph([rng], [z], clone=False)
    _ = EquilibriumOptimizer([local_subtensor_rv_lift],
                             max_use_ratio=100).apply(fg)

    subtensor_node = fg.outputs[0].owner.inputs[1].owner.inputs[0].owner
    assert subtensor_node == y.owner
    assert isinstance(subtensor_node.op, Subtensor)
    assert subtensor_node.inputs[0].owner.op == normal

    # The non-`Subtensor` client doesn't depend on the RNG state, so we can
    # perform the lift
    z = aet.ones(x.shape) - x[1]

    fg = FunctionGraph([rng], [z], clone=False)
    EquilibriumOptimizer([local_subtensor_rv_lift],
                         max_use_ratio=100).apply(fg)

    rv_node = fg.outputs[0].owner.inputs[1].owner.inputs[0].owner
    assert rv_node.op == normal
    assert isinstance(rv_node.inputs[-1].owner.op, Subtensor)
    assert isinstance(rv_node.inputs[-2].owner.op, Subtensor)
Example #3
0
def test_normal_infer_shape():
    M_aet = iscalar("M")
    M_aet.tag.test_value = 3
    sd_aet = scalar("sd")
    sd_aet.tag.test_value = np.array(1.0, dtype=config.floatX)

    test_params = [
        ([aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
          sd_aet], None),
        (
            [
                aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
                sd_aet
            ],
            (M_aet, ),
        ),
        (
            [
                aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
                sd_aet
            ],
            (2, M_aet),
        ),
        ([aet.zeros((M_aet, )), sd_aet], None),
        ([aet.zeros((M_aet, )), sd_aet], (M_aet, )),
        ([aet.zeros((M_aet, )), sd_aet], (2, M_aet)),
        ([aet.zeros((M_aet, )), aet.ones((M_aet, ))], None),
        ([aet.zeros((M_aet, )), aet.ones((M_aet, ))], (2, M_aet)),
        (
            [
                np.array([[-1, 20], [300, -4000]], dtype=config.floatX),
                np.array([[1e-6, 2e-6]], dtype=config.floatX),
            ],
            (3, 2, 2),
        ),
        (
            [
                np.array([1], dtype=config.floatX),
                np.array([10], dtype=config.floatX)
            ],
            (1, 2),
        ),
    ]
    for args, size in test_params:
        rv = normal(*args, size=size)
        rv_shape = tuple(normal._infer_shape(size or (), args, None))
        assert tuple(get_test_value(rv_shape)) == tuple(
            get_test_value(rv).shape)
def test_flow_formula(formula, length, order):
    spec = flows.Formula(formula)
    flows_list = spec.flows
    assert len(flows_list) == length
    if order is not None:
        assert flows_list == order
    spec(dim=2, jitter=1)(at.ones((3, 2))).eval()  # should work
Example #5
0
def test_density_dist_default_moment_univariate(get_moment, size, expected):
    if get_moment == "custom_moment":
        get_moment = lambda rv, size, *rv_inputs: 5 * at.ones(size,
                                                              dtype=rv.dtype)
    with Model() as model:
        DensityDist("x", get_moment=get_moment, size=size)
    assert_moment_is_expected(model, expected)
def test_flows_collect_chain():
    initial = at.ones((3, 2))
    flow1 = flows.PlanarFlow(dim=2, z0=initial)
    flow2 = flows.PlanarFlow(dim=2, z0=flow1)
    assert len(flow2.params) == 3
    assert len(flow2.all_params) == 6
    np.testing.assert_allclose(flow1.logdet.eval() + flow2.logdet.eval(), flow2.sum_logdets.eval())
Example #7
0
def multidimensional_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        Normal("x", mu, tau=tau, shape=(3, 2), testval=0.1 * at.ones((3, 2)))

    return model.test_point, model, (mu, tau**-0.5)
Example #8
0
def simple_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        Normal("x", mu, tau=tau, shape=2, testval=at.ones(2) * 0.1)

    return model.test_point, model, (mu, tau**-0.5)
Example #9
0
def test_Dimshuffle_lift_restrictions():
    rng = shared(np.random.RandomState(1233532), borrow=False)

    x = normal(aet.arange(2).reshape((2, )), 100, size=(2, 2, 2), rng=rng)
    y = x.dimshuffle(1, 0, 2)
    # The non-`Dimshuffle` client depends on the RNG state, so we can't
    # perform the lift
    z = x - y

    fg = FunctionGraph([rng], [z], clone=False)
    _ = EquilibriumOptimizer([local_dimshuffle_rv_lift],
                             max_use_ratio=100).apply(fg)

    dimshuffle_node = fg.outputs[0].owner.inputs[1].owner
    assert dimshuffle_node == y.owner
    assert isinstance(dimshuffle_node.op, DimShuffle)
    assert dimshuffle_node.inputs[0].owner.op == normal

    # The non-`Dimshuffle` client doesn't depend on the RNG state, so we can
    # perform the lift
    z = aet.ones(x.shape) - y

    fg = FunctionGraph([rng], [z], clone=False)
    EquilibriumOptimizer([local_dimshuffle_rv_lift],
                         max_use_ratio=100).apply(fg)

    rv_node = fg.outputs[0].owner.inputs[1].owner
    assert rv_node.op == normal
    assert isinstance(rv_node.inputs[-1].owner.op, DimShuffle)
    assert isinstance(rv_node.inputs[-2].owner.op, DimShuffle)
Example #10
0
def test_constant_folding():
    m = aet.ones((1, ), dtype="int8")
    l = aesara.typed_list.make_list([m, m])
    f = aesara.function([], l)
    topo = f.maker.fgraph.toposort()
    assert len(topo)
    assert isinstance(topo[0].op, aesara.compile.ops.DeepCopyOp)
    assert f() == [1, 1]
Example #11
0
    def test_bounded_dist(self):
        with pm.Model() as model:
            BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
            x = BoundedNormal("x", mu=aet.zeros((3, 1)), sd=1 * aet.ones((3, 1)), shape=(3, 1))

        with model:
            prior_trace = pm.sample_prior_predictive(5)
            assert prior_trace["x"].shape == (5, 3, 1)
Example #12
0
def test_ScanArgs():
    with pytest.raises(TypeError):
        ScanArgs.from_node(at.ones(2).owner)

    hmm_model_env = create_test_hmm()
    scan_args = hmm_model_env["scan_args"]
    scan_op = hmm_model_env["scan_op"]

    # Make sure we can get alternate variables
    test_v = scan_args.outer_out_sit_sot[0]
    alt_test_v = scan_args.get_alt_field(test_v, "inner_out")
    assert alt_test_v == scan_args.inner_out_sit_sot[0]

    alt_test_v = scan_args.get_alt_field(test_v, "outer_in")
    assert alt_test_v == scan_args.outer_in_sit_sot[0]

    # Check the `__repr__` and `__str__`
    scan_args_repr = repr(scan_args)
    # Just make sure it doesn't err-out
    assert scan_args_repr.startswith("ScanArgs")

    # Check the properties that allow us to use
    # `Scan.get_oinp_iinp_iout_oout_mappings` as-is to implement
    # `ScanArgs.var_mappings`
    assert scan_args.n_nit_sot == scan_op.info.n_nit_sot
    assert scan_args.n_mit_mot == scan_op.info.n_mit_mot
    # The `scan_args` base class always clones the inner-graph;
    # here we make sure it doesn't (and that all the inputs are the same)
    assert scan_args.inputs == scan_op.inner_inputs
    assert scan_args.info == scan_op.info

    # Check that `ScanArgs.find_among_fields` works
    test_v = scan_op.inner_seqs(scan_op.inner_inputs)[1]
    field_info = scan_args.find_among_fields(test_v)
    assert field_info.name == "inner_in_seqs"
    assert field_info.index == 1
    assert field_info.inner_index is None
    assert scan_args.inner_inputs[field_info.agg_index] == test_v

    test_l = scan_op.inner_non_seqs(scan_op.inner_inputs)
    # We didn't index this argument, so it's a `list` (i.e. bad input)
    field_info = scan_args.find_among_fields(test_l)
    assert field_info is None

    test_v = test_l[0]
    field_info = scan_args.find_among_fields(test_v)
    assert field_info.name == "inner_in_non_seqs"
    assert field_info.index == 0
    assert field_info.inner_index is None
    assert scan_args.inner_inputs[field_info.agg_index] == test_v

    scan_args_copy = copy(scan_args)
    assert scan_args_copy is not scan_args
    assert scan_args_copy == scan_args

    assert scan_args_copy != test_v
    scan_args_copy.outer_in_seqs.pop()
    assert scan_args_copy != scan_args
Example #13
0
def compute_steady_state(P):
    """Compute the steady state of a transition probability matrix.

    Parameters
    ----------
    P: TensorVariable
        A transition probability matrix for `M` states with shape `(1, M, M)`.

    Returns
    -------
    A tensor representing the steady state probabilities.
    """

    P = P[0]
    N_states = P.shape[-1]
    Lam = (at.eye(N_states) - P + at.ones((N_states, N_states))).T
    u = at.slinalg.solve(Lam, at.ones((N_states, )))
    return u
Example #14
0
 def test_leftadd_matrixt(self):
     X = np.linspace(0, 1, 10)[:, None]
     M = 2 * at.ones((10, 10))
     with pm.Model() as model:
         cov = M + pm.gp.cov.ExpQuad(1, 0.1)
     K = cov(X).eval()
     npt.assert_allclose(K[0, 1], 2.53940, atol=1e-3)
     # check diagonal
     Kd = cov(X, diag=True).eval()
     npt.assert_allclose(np.diag(K), Kd, atol=1e-5)
def test_discrete_not_allowed():
    mu_true = np.array([-2, 0, 2])
    z_true = np.random.randint(len(mu_true), size=100)
    y = np.random.normal(mu_true[z_true], np.ones_like(z_true))

    with pm.Model():
        mu = pm.Normal("mu", mu=0, sigma=10, size=3)
        z = pm.Categorical("z", p=at.ones(3) / 3, size=len(y))
        pm.Normal("y_obs", mu=mu[z], sigma=1.0, observed=y)
        with pytest.raises(opvi.ParametrizationError):
            pm.fit(n=1)  # fails
Example #16
0
def test_Subtensor_lift_restrictions():
    rng = shared(np.random.default_rng(1233532), borrow=False)

    std = vector("std")
    std.tag.test_value = np.array([1e-5, 2e-5, 3e-5], dtype=config.floatX)
    x = normal(at.arange(2), at.ones(2), rng=rng)
    y = x[1]
    # The non-`Subtensor` client depends on the RNG state, so we can't perform
    # the lift
    z = x - y

    fg = FunctionGraph([rng], [z], clone=False)
    _ = EquilibriumOptimizer([local_subtensor_rv_lift],
                             max_use_ratio=100).apply(fg)

    subtensor_node = fg.outputs[0].owner.inputs[1].owner.inputs[0].owner
    assert subtensor_node == y.owner
    assert isinstance(subtensor_node.op, Subtensor)
    assert subtensor_node.inputs[0].owner.op == normal

    z = at.ones(x.shape) - x[1]

    # We add `x` as an output to make sure that `is_rv_used_in_graph` handles
    # `"output"` "nodes" correctly.
    fg = FunctionGraph([rng], [z, x], clone=False)
    EquilibriumOptimizer([local_subtensor_rv_lift],
                         max_use_ratio=100).apply(fg)

    assert fg.outputs[0] == z
    assert fg.outputs[1] == x

    # The non-`Subtensor` client doesn't depend on the RNG state, so we can
    # perform the lift
    fg = FunctionGraph([rng], [z], clone=False)
    EquilibriumOptimizer([local_subtensor_rv_lift],
                         max_use_ratio=100).apply(fg)

    rv_node = fg.outputs[0].owner.inputs[1].owner.inputs[0].owner
    assert rv_node.op == normal
    assert isinstance(rv_node.inputs[-1].owner.op, Subtensor)
    assert isinstance(rv_node.inputs[-2].owner.op, Subtensor)
Example #17
0
def test_alltrue_scalar():
    assert alltrue_scalar([]).eval()
    assert alltrue_scalar([True]).eval()
    assert alltrue_scalar([at.ones(10)]).eval()
    assert alltrue_scalar([at.ones(10), 5 * at.ones(101)]).eval()
    assert alltrue_scalar([np.ones(10), 5 * at.ones(101)]).eval()
    assert alltrue_scalar([np.ones(10), True, 5 * at.ones(101)]).eval()
    assert alltrue_scalar([np.array([1, 2, 3]), True, 5 * at.ones(101)]).eval()

    assert not alltrue_scalar([False]).eval()
    assert not alltrue_scalar([at.zeros(10)]).eval()
    assert not alltrue_scalar([True, False]).eval()
    assert not alltrue_scalar([np.array([0, -1]), at.ones(60)]).eval()
    assert not alltrue_scalar([np.ones(10), False, 5 * at.ones(101)]).eval()
Example #18
0
    def test_savemem_does_not_duplicate_number_of_scan_nodes(self):
        var = at.ones(())
        values, _ = scan(
            lambda x: ([x], (), until(x)),
            outputs_info=[var],
            n_steps=2,
        )

        tmp_fn = function([var], values, mode=self.mode)
        scan_nodes = [
            x for x in tmp_fn.maker.fgraph.toposort() if isinstance(x.op, Scan)
        ]
        assert len(scan_nodes) == 1
Example #19
0
 def __init__(
     self,
     x,
     y,
     intercept=True,
     labels=None,
     priors=None,
     vars=None,
     name="",
     model=None,
     offset=0.0,
 ):
     super().__init__(name, model)
     if len(y.shape) > 1:
         err_msg = ("Only one-dimensional observed variable objects (i.e."
                    " of shape `(n, )`) are supported")
         raise TypeError(err_msg)
     if priors is None:
         priors = {}
     if vars is None:
         vars = {}
     x, labels = any_to_tensor_and_labels(x, labels)
     # now we have x, shape and labels
     if intercept:
         x = at.concatenate([at.ones((x.shape[0], 1), x.dtype), x], axis=1)
         labels = ["Intercept"] + labels
     coeffs = list()
     for name in labels:
         if name == "Intercept":
             if name in vars:
                 v = Deterministic(name, vars[name])
             else:
                 v = self.Var(name=name,
                              dist=priors.get(name,
                                              self.default_intercept_prior))
             coeffs.append(v)
         else:
             if name in vars:
                 v = Deterministic(name, vars[name])
             else:
                 v = self.Var(
                     name=name,
                     dist=priors.get(
                         name,
                         priors.get("Regressor",
                                    self.default_regressor_prior)),
                 )
             coeffs.append(v)
     self.coeffs = at.stack(coeffs, axis=0)
     self.y_est = x.dot(self.coeffs) + offset
Example #20
0
    def test_simultaneous_size_and_dims(self, with_dims_ellipsis):
        with pm.Model() as pmodel:
            x = pm.ConstantData("x", [1, 2, 3], dims="ddata")
            assert "ddata" in pmodel.dim_lengths

            # Size does not include support dims, so this test must use a dist with support dims.
            kwargs = dict(name="y", size=2, mu=at.ones((3, 4)), cov=at.eye(4))
            if with_dims_ellipsis:
                y = pm.MvNormal(**kwargs, dims=("dsize", ...))
                assert pmodel.RV_dims["y"] == ("dsize", None, None)
            else:
                y = pm.MvNormal(**kwargs, dims=("dsize", "ddata", "dsupport"))
                assert pmodel.RV_dims["y"] == ("dsize", "ddata", "dsupport")

            assert "dsize" in pmodel.dim_lengths
            assert y.eval().shape == (2, 3, 4)
Example #21
0
def test_normal_ShapeFeature():
    M_aet = iscalar("M")
    M_aet.tag.test_value = 3
    sd_aet = scalar("sd")
    sd_aet.tag.test_value = np.array(1.0, dtype=config.floatX)

    d_rv = normal(aet.ones((M_aet, )), sd_aet, size=(2, M_aet))
    d_rv.tag.test_value

    fg = FunctionGraph(
        [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)],
        [d_rv],
        clone=False,
        features=[ShapeFeature()],
    )
    s1, s2 = fg.shape_feature.shape_of[d_rv]

    assert get_test_value(s1) == get_test_value(d_rv).shape[0]
    assert get_test_value(s2) == get_test_value(d_rv).shape[1]
Example #22
0
    def test_check_bounds_flag(self):
        """Test that CheckParameterValue Ops are replaced or removed when using compile_pymc"""
        logp = at.ones(3)
        cond = np.array([1, 0, 1])
        bound = check_parameters(logp, cond)

        with pm.Model() as m:
            pass

        with pytest.raises(ParameterValueError):
            aesara.function([], bound)()

        m.check_bounds = False
        with m:
            assert np.all(compile_pymc([], bound)() == 1)

        m.check_bounds = True
        with m:
            assert np.all(compile_pymc([], bound)() == -np.inf)
Example #23
0
def test_dirichlet_ShapeFeature():
    """Make sure `RandomVariable.infer_shape` works with `ShapeFeature`."""
    M_at = iscalar("M")
    M_at.tag.test_value = 2
    N_at = iscalar("N")
    N_at.tag.test_value = 3

    d_rv = dirichlet(at.ones((M_at, N_at)), name="Gamma")

    fg = FunctionGraph(
        outputs=[d_rv],
        clone=False,
        features=[ShapeFeature()],
    )

    s1, s2 = fg.shape_feature.shape_of[d_rv]

    assert M_at in graph_inputs([s1])
    assert N_at in graph_inputs([s2])
Example #24
0
def test_mvnormal_ShapeFeature():
    M_aet = iscalar("M")
    M_aet.tag.test_value = 2

    d_rv = multivariate_normal(aet.ones((M_aet, )), aet.eye(M_aet), size=2)

    fg = FunctionGraph(
        [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)],
        [d_rv],
        clone=False,
        features=[ShapeFeature()],
    )

    s1, s2 = fg.shape_feature.shape_of[d_rv]

    assert get_test_value(s1) == 2
    assert M_aet in graph_inputs([s2])

    # Test broadcasted shapes
    mean = tensor(config.floatX, [True, False])
    mean.tag.test_value = np.array([[0, 1, 2]], dtype=config.floatX)

    test_covar = np.diag(np.array([1, 10, 100], dtype=config.floatX))
    test_covar = np.stack([test_covar, test_covar * 10.0])
    cov = aet.as_tensor(test_covar).type()
    cov.tag.test_value = test_covar

    d_rv = multivariate_normal(mean, cov, size=[2, 3])

    fg = FunctionGraph(
        [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)],
        [d_rv],
        clone=False,
        features=[ShapeFeature()],
    )

    s1, s2, s3, s4 = fg.shape_feature.shape_of[d_rv]

    assert s1.get_test_value() == 2
    assert s2.get_test_value() == 3
    assert s3.get_test_value() == 2
    assert s4.get_test_value() == 3
Example #25
0
def test_dirichlet_ShapeFeature():
    """Make sure `RandomVariable.infer_shape` works with `ShapeFeature`."""
    M_tt = iscalar("M")
    M_tt.tag.test_value = 2
    N_tt = iscalar("N")
    N_tt.tag.test_value = 3

    d_rv = dirichlet(aet.ones((M_tt, N_tt)), name="Gamma")

    fg = FunctionGraph(
        [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)],
        [d_rv],
        clone=False,
        features=[ShapeFeature()],
    )

    s1, s2 = fg.shape_feature.shape_of[d_rv]

    assert M_tt in graph_inputs([s1])
    assert N_tt in graph_inputs([s2])
Example #26
0
def test_Dimshuffle_lift_restrictions():
    rng = shared(np.random.default_rng(1233532), borrow=False)

    x = normal(at.arange(2).reshape((2, )), 100, size=(2, 2, 2), rng=rng)
    y = x.dimshuffle(1, 0, 2)
    # The non-`Dimshuffle` client depends on the RNG state, so we can't
    # perform the lift
    z = x - y

    fg = FunctionGraph([rng], [z, y], clone=False)
    _ = EquilibriumOptimizer([local_dimshuffle_rv_lift],
                             max_use_ratio=100).apply(fg)

    dimshuffle_node = fg.outputs[0].owner.inputs[1].owner
    assert dimshuffle_node == y.owner
    assert isinstance(dimshuffle_node.op, DimShuffle)
    assert dimshuffle_node.inputs[0].owner.op == normal

    z = at.ones(x.shape) - y

    # We add `x` as an output to make sure that `is_rv_used_in_graph` handles
    # `"output"` "nodes" correctly.
    fg = FunctionGraph([rng], [z, x], clone=False)
    EquilibriumOptimizer([local_dimshuffle_rv_lift],
                         max_use_ratio=100).apply(fg)

    assert fg.outputs[0] == z
    assert fg.outputs[1] == x

    # The non-`Dimshuffle` client doesn't depend on the RNG state, so we can
    # perform the lift
    fg = FunctionGraph([rng], [z], clone=False)
    EquilibriumOptimizer([local_dimshuffle_rv_lift],
                         max_use_ratio=100).apply(fg)

    rv_node = fg.outputs[0].owner.inputs[1].owner
    assert rv_node.op == normal
    assert isinstance(rv_node.inputs[-1].owner.op, DimShuffle)
    assert isinstance(rv_node.inputs[-2].owner.op, DimShuffle)
Example #27
0
def test_dirichlet_infer_shape():
    M_aet = iscalar("M")
    M_aet.tag.test_value = 3

    test_params = [
        ([aet.ones((M_aet, ))], None),
        ([aet.ones((M_aet, ))], (M_aet + 1, )),
        ([aet.ones((M_aet, ))], (2, M_aet)),
        ([aet.ones((M_aet, M_aet + 1))], None),
        ([aet.ones((M_aet, M_aet + 1))], (M_aet + 2, )),
        ([aet.ones((M_aet, M_aet + 1))], (2, M_aet + 2, M_aet + 3)),
    ]
    for args, size in test_params:
        rv = dirichlet(*args, size=size)
        rv_shape = tuple(dirichlet._infer_shape(size or (), args, None))
        assert tuple(get_test_value(rv_shape)) == tuple(
            get_test_value(rv).shape)
Example #28
0
def test_bound():
    logp = at.ones((10, 10))
    cond = at.ones((10, 10))
    assert np.all(bound(logp, cond).eval() == logp.eval())

    logp = at.ones((10, 10))
    cond = at.zeros((10, 10))
    assert np.all(bound(logp, cond).eval() == (-np.inf * logp).eval())

    logp = at.ones((10, 10))
    cond = True
    assert np.all(bound(logp, cond).eval() == logp.eval())

    logp = at.ones(3)
    cond = np.array([1, 0, 1])
    assert not np.all(bound(logp, cond).eval() == 1)
    assert np.prod(bound(logp, cond).eval()) == -np.inf

    logp = at.ones((2, 3))
    cond = np.array([[1, 1, 1], [1, 0, 1]])
    assert not np.all(bound(logp, cond).eval() == 1)
    assert np.prod(bound(logp, cond).eval()) == -np.inf
Example #29
0
def test_ScanArgs_basics_mit_sot():

    srng = at.random.RandomStream()

    N_tt = at.iscalar("N")
    N_tt.tag.test_value = 10
    M_tt = at.iscalar("M")
    M_tt.tag.test_value = 2

    mus_tt = at.matrix("mus")
    mus_tt.tag.test_value = np.stack(
        [np.arange(0.0, 10), np.arange(0.0, -10, -1)],
        axis=-1).astype(aesara.config.floatX)

    sigmas_tt = at.ones((N_tt, ))
    sigmas_tt.name = "sigmas"

    pi_0_rv = srng.dirichlet(at.ones((M_tt, )), name="pi_0")
    Gamma_rv = srng.dirichlet(at.ones((M_tt, M_tt)), name="Gamma")

    S_0_rv = srng.categorical(pi_0_rv, name="S_0")

    def scan_fn(mus_t, sigma_t, S_tm2, S_tm1, Gamma_t):
        S_t = srng.categorical(Gamma_t[S_tm2], name="S_t")
        Y_t = srng.normal(mus_t[S_tm1], sigma_t, name="Y_t")
        return S_t, Y_t

    (S_rv, Y_rv), scan_updates = aesara.scan(
        fn=scan_fn,
        sequences=[mus_tt, sigmas_tt],
        non_sequences=[Gamma_rv],
        outputs_info=[{
            "initial": at.stack([S_0_rv, S_0_rv]),
            "taps": [-2, -1]
        }, {}],
        strict=True,
        name="scan_rv",
    )
    # Adding names should make output easier to read
    Y_rv.name = "Y_rv"
    # This `S_rv` outer-output is actually a `Subtensor` of the "real" output
    S_rv = S_rv.owner.inputs[0]
    S_rv.name = "S_rv"
    mus_in = Y_rv.owner.inputs[1]
    mus_in.name = "mus_in"
    sigmas_in = Y_rv.owner.inputs[2]
    sigmas_in.name = "sigmas_in"

    scan_args = ScanArgs.from_node(Y_rv.owner)

    test_v = scan_args.inner_in_mit_sot[0][1]
    field_info = scan_args.find_among_fields(test_v)

    assert field_info.name == "inner_in_mit_sot"
    assert field_info.index == 0
    assert field_info.inner_index == 1
    assert field_info.agg_index == 3

    rm_info = scan_args._remove_from_fields(at.ones(2))
    assert rm_info is None

    rm_info = scan_args._remove_from_fields(test_v)

    assert rm_info.name == "inner_in_mit_sot"
    assert rm_info.index == 0
    assert rm_info.inner_index == 1
    assert rm_info.agg_index == 3
Example #30
0
def test_check_parameters_shape():
    conditions = [True, at.ones(10), at.ones(5)]
    assert check_parameters(1, *conditions).eval().shape == ()