Example #1
0
    def test_allow_input_downcast_int(self):
        a = tensor.wvector("a")  # int16
        b = tensor.bvector("b")  # int8
        c = tensor.bscalar("c")  # int8

        f = pfunc([a, b, c], (a + b + c), allow_input_downcast=True)
        # Value too big for a, b, or c, silently ignored
        assert f([2**20], [1], 0) == 1
        assert f([3], [312], 0) == 59
        assert f([3], [1], 806) == 42

        g = pfunc([a, b, c], (a + b + c), allow_input_downcast=False)
        # All values are in range. Since they're not ndarrays (but lists
        # or scalars), they will be converted, and their value checked.
        assert np.all(g([3], [6], 0) == 9)

        # Values are in range, but a dtype too large has explicitly been given
        # For performance reasons, no check of the data is explicitly performed
        # (It might be OK to change this in the future.)
        with pytest.raises(TypeError):
            g([3], np.array([6], dtype="int16"), 0)

        # Value too big for b, raises TypeError
        with pytest.raises(TypeError):
            g([3], [312], 0)

        h = pfunc([a, b, c], (a + b + c))  # Default: allow_input_downcast=None
        # Everything here should behave like with False
        assert np.all(h([3], [6], 0) == 9)
        with pytest.raises(TypeError):
            h([3], np.array([6], dtype="int16"), 0)
        with pytest.raises(TypeError):
            h([3], [312], 0)
Example #2
0
    def test_param_allow_downcast_int(self):
        a = tensor.wvector("a")  # int16
        b = tensor.bvector("b")  # int8
        c = tensor.bscalar("c")  # int8
        f = pfunc(
            [
                In(a, allow_downcast=True),
                In(b, allow_downcast=False),
                In(c, allow_downcast=None),
            ],
            (a + b + c),
        )

        # Both values are in range. Since they're not ndarrays (but lists),
        # they will be converted, and their value checked.
        assert np.all(f([3], [6], 1) == 10)

        # Values are in range, but a dtype too large has explicitly been given
        # For performance reasons, no check of the data is explicitly performed
        # (It might be OK to change this in the future.)
        with pytest.raises(TypeError):
            f([3], np.array([6], dtype="int16"), 1)

        # Value too big for a, silently ignored
        assert np.all(f([2**20], np.ones(1, dtype="int8"), 1) == 2)

        # Value too big for b, raises TypeError
        with pytest.raises(TypeError):
            f([3], [312], 1)

        # Value too big for c, raises TypeError
        with pytest.raises(TypeError):
            f([3], [6], 806)
def test_replacements(binomial_model_inference):
    d = aet.bscalar()
    d.tag.test_value = 1
    approx = binomial_model_inference.approx
    p = approx.model.p
    p_t = p**3
    p_s = approx.sample_node(p_t)
    if aesara.config.compute_test_value != "off":
        assert p_s.tag.test_value.shape == p_t.tag.test_value.shape
    sampled = [p_s.eval() for _ in range(100)]
    assert any(map(operator.ne, sampled[1:], sampled[:-1]))  # stochastic

    p_d = approx.sample_node(p_t, deterministic=True)
    sampled = [p_d.eval() for _ in range(100)]
    assert all(map(operator.eq, sampled[1:], sampled[:-1]))  # deterministic

    p_r = approx.sample_node(p_t, deterministic=d)
    sampled = [p_r.eval({d: 1}) for _ in range(100)]
    assert all(map(operator.eq, sampled[1:], sampled[:-1]))  # deterministic
    sampled = [p_r.eval({d: 0}) for _ in range(100)]
    assert any(map(operator.ne, sampled[1:], sampled[:-1]))  # stochastic
def test_replacements(binomial_model_inference):
    d = at.bscalar()
    d.tag.test_value = 1
    approx = binomial_model_inference.approx
    p = approx.model.p
    p_t = p**3
    p_s = approx.sample_node(p_t)
    assert not any(
        isinstance(n.owner.op, aesara.tensor.random.basic.BetaRV)
        for n in aesara.graph.ancestors([p_s])
        if n.owner), "p should be replaced"
    if aesara.config.compute_test_value != "off":
        assert p_s.tag.test_value.shape == p_t.tag.test_value.shape
    sampled = [p_s.eval() for _ in range(100)]
    assert any(map(operator.ne, sampled[1:], sampled[:-1]))  # stochastic
    p_z = approx.sample_node(p_t, deterministic=False, size=10)
    assert p_z.shape.eval() == (10, )
    try:
        p_z = approx.sample_node(p_t, deterministic=True, size=10)
        assert p_z.shape.eval() == (10, )
    except NotImplementedInference:
        pass

    try:
        p_d = approx.sample_node(p_t, deterministic=True)
        sampled = [p_d.eval() for _ in range(100)]
        assert all(map(operator.eq, sampled[1:],
                       sampled[:-1]))  # deterministic
    except NotImplementedInference:
        pass

    p_r = approx.sample_node(p_t, deterministic=d)
    sampled = [p_r.eval({d: 1}) for _ in range(100)]
    assert all(map(operator.eq, sampled[1:], sampled[:-1]))  # deterministic
    sampled = [p_r.eval({d: 0}) for _ in range(100)]
    assert any(map(operator.ne, sampled[1:], sampled[:-1]))  # stochastic