示例#1
0
def test_Subtensor_lift_restrictions():
    rng = shared(np.random.RandomState(1233532), borrow=False)

    std = vector("std")
    std.tag.test_value = np.array([1e-5, 2e-5, 3e-5], dtype=config.floatX)
    x = normal(aet.arange(2), aet.ones(2), rng=rng)
    y = x[1]
    # The non-`Subtensor` client depends on the RNG state, so we can't perform
    # the lift
    z = x - y

    fg = FunctionGraph([rng], [z], clone=False)
    _ = EquilibriumOptimizer([local_subtensor_rv_lift],
                             max_use_ratio=100).apply(fg)

    subtensor_node = fg.outputs[0].owner.inputs[1].owner.inputs[0].owner
    assert subtensor_node == y.owner
    assert isinstance(subtensor_node.op, Subtensor)
    assert subtensor_node.inputs[0].owner.op == normal

    # The non-`Subtensor` client doesn't depend on the RNG state, so we can
    # perform the lift
    z = aet.ones(x.shape) - x[1]

    fg = FunctionGraph([rng], [z], clone=False)
    EquilibriumOptimizer([local_subtensor_rv_lift],
                         max_use_ratio=100).apply(fg)

    rv_node = fg.outputs[0].owner.inputs[1].owner.inputs[0].owner
    assert rv_node.op == normal
    assert isinstance(rv_node.inputs[-1].owner.op, Subtensor)
    assert isinstance(rv_node.inputs[-2].owner.op, Subtensor)
示例#2
0
def test_Dimshuffle_lift_restrictions():
    rng = shared(np.random.RandomState(1233532), borrow=False)

    x = normal(aet.arange(2).reshape((2, )), 100, size=(2, 2, 2), rng=rng)
    y = x.dimshuffle(1, 0, 2)
    # The non-`Dimshuffle` client depends on the RNG state, so we can't
    # perform the lift
    z = x - y

    fg = FunctionGraph([rng], [z], clone=False)
    _ = EquilibriumOptimizer([local_dimshuffle_rv_lift],
                             max_use_ratio=100).apply(fg)

    dimshuffle_node = fg.outputs[0].owner.inputs[1].owner
    assert dimshuffle_node == y.owner
    assert isinstance(dimshuffle_node.op, DimShuffle)
    assert dimshuffle_node.inputs[0].owner.op == normal

    # The non-`Dimshuffle` client doesn't depend on the RNG state, so we can
    # perform the lift
    z = aet.ones(x.shape) - y

    fg = FunctionGraph([rng], [z], clone=False)
    EquilibriumOptimizer([local_dimshuffle_rv_lift],
                         max_use_ratio=100).apply(fg)

    rv_node = fg.outputs[0].owner.inputs[1].owner
    assert rv_node.op == normal
    assert isinstance(rv_node.inputs[-1].owner.op, DimShuffle)
    assert isinstance(rv_node.inputs[-2].owner.op, DimShuffle)
示例#3
0
def test_change_rv_size():
    loc = at.as_tensor_variable([1, 2])
    rv = normal(loc=loc)
    assert rv.ndim == 1
    assert tuple(rv.shape.eval()) == (2, )

    with pytest.raises(ShapeError, match="must be ≤1-dimensional"):
        change_rv_size(rv, new_size=[[2, 3]])
    with pytest.raises(ShapeError, match="must be ≤1-dimensional"):
        change_rv_size(rv, new_size=at.as_tensor_variable([[2, 3], [4, 5]]))

    rv_new = change_rv_size(rv, new_size=(3, ), expand=True)
    assert rv_new.ndim == 2
    assert tuple(rv_new.shape.eval()) == (3, 2)

    # Make sure that the shape used to determine the expanded size doesn't
    # depend on the old `RandomVariable`.
    rv_new_ancestors = set(ancestors((rv_new, )))
    assert loc in rv_new_ancestors
    assert rv not in rv_new_ancestors

    rv_newer = change_rv_size(rv_new, new_size=(4, ), expand=True)
    assert rv_newer.ndim == 3
    assert tuple(rv_newer.shape.eval()) == (4, 3, 2)

    # Make sure we avoid introducing a `Cast` by converting the new size before
    # constructing the new `RandomVariable`
    rv = normal(0, 1)
    new_size = np.array([4, 3], dtype="int32")
    rv_newer = change_rv_size(rv, new_size=new_size, expand=False)
    assert rv_newer.ndim == 2
    assert isinstance(rv_newer.owner.inputs[1], Constant)
    assert tuple(rv_newer.shape.eval()) == (4, 3)

    rv = normal(0, 1)
    new_size = at.as_tensor(np.array([4, 3], dtype="int32"))
    rv_newer = change_rv_size(rv, new_size=new_size, expand=True)
    assert rv_newer.ndim == 2
    assert tuple(rv_newer.shape.eval()) == (4, 3)

    rv = normal(0, 1)
    new_size = at.as_tensor(2, dtype="int32")
    rv_newer = change_rv_size(rv, new_size=new_size, expand=True)
    assert rv_newer.ndim == 1
    assert tuple(rv_newer.shape.eval()) == (2, )
示例#4
0
文件: test_jax.py 项目: mgorny/aesara
def test_random_stats(at_dist, dist_params, rng, size):
    # The RNG states are not 1:1, so the best we can do is check some summary
    # statistics of the samples
    out = normal(*dist_params, rng=rng, size=size)
    fgraph = FunctionGraph([out.owner.inputs[0]], [out], clone=False)

    def assert_fn(x, y):
        (x,) = x
        (y,) = y
        assert x.dtype.kind == y.dtype.kind

        d = 2 if config.floatX == "float64" else 1
        np.testing.assert_array_almost_equal(np.abs(x.mean()), np.abs(y.mean()), d)

    compare_jax_and_py(fgraph, [], assert_fn=assert_fn)
示例#5
0
def test_normal_infer_shape():
    M_aet = iscalar("M")
    M_aet.tag.test_value = 3
    sd_aet = scalar("sd")
    sd_aet.tag.test_value = np.array(1.0, dtype=config.floatX)

    test_params = [
        ([aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
          sd_aet], None),
        (
            [
                aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
                sd_aet
            ],
            (M_aet, ),
        ),
        (
            [
                aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)),
                sd_aet
            ],
            (2, M_aet),
        ),
        ([aet.zeros((M_aet, )), sd_aet], None),
        ([aet.zeros((M_aet, )), sd_aet], (M_aet, )),
        ([aet.zeros((M_aet, )), sd_aet], (2, M_aet)),
        ([aet.zeros((M_aet, )), aet.ones((M_aet, ))], None),
        ([aet.zeros((M_aet, )), aet.ones((M_aet, ))], (2, M_aet)),
        (
            [
                np.array([[-1, 20], [300, -4000]], dtype=config.floatX),
                np.array([[1e-6, 2e-6]], dtype=config.floatX),
            ],
            (3, 2, 2),
        ),
        (
            [
                np.array([1], dtype=config.floatX),
                np.array([10], dtype=config.floatX)
            ],
            (1, 2),
        ),
    ]
    for args, size in test_params:
        rv = normal(*args, size=size)
        rv_shape = tuple(normal._infer_shape(size or (), args, None))
        assert tuple(get_test_value(rv_shape)) == tuple(
            get_test_value(rv).shape)
示例#6
0
def test_normal_ShapeFeature():
    M_aet = iscalar("M")
    M_aet.tag.test_value = 3
    sd_aet = scalar("sd")
    sd_aet.tag.test_value = np.array(1.0, dtype=config.floatX)

    d_rv = normal(aet.ones((M_aet, )), sd_aet, size=(2, M_aet))
    d_rv.tag.test_value

    fg = FunctionGraph(
        [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)],
        [d_rv],
        clone=False,
        features=[ShapeFeature()],
    )
    s1, s2 = fg.shape_feature.shape_of[d_rv]

    assert get_test_value(s1) == get_test_value(d_rv).shape[0]
    assert get_test_value(s2) == get_test_value(d_rv).shape[1]
示例#7
0
def test_normal_infer_shape(M, sd, size):
    rv = normal(M, sd, size=size)
    rv_shape = list(normal._infer_shape(size or (), [M, sd], None))

    all_args = (M, sd) + size
    fn_inputs = [
        i
        for i in graph_inputs([a for a in all_args if isinstance(a, Variable)])
        if not isinstance(i, (Constant, SharedVariable))
    ]
    aesara_fn = function(fn_inputs, [at.as_tensor(o) for o in rv_shape + [rv]],
                         mode=py_mode)

    *rv_shape_val, rv_val = aesara_fn(*[
        i.tag.test_value for i in fn_inputs
        if not isinstance(i, (SharedVariable, Constant))
    ])

    assert tuple(rv_shape_val) == tuple(rv_val.shape)
示例#8
0
def test_inplace_optimization():

    out = normal(0, 1)

    assert out.owner.op.inplace is False

    f = function(
        [],
        out,
        mode=inplace_mode,
    )

    (new_out, ) = f.maker.fgraph.outputs
    assert new_out.type == out.type
    assert isinstance(new_out.owner.op, type(out.owner.op))
    assert new_out.owner.op.inplace is True
    assert all(
        np.array_equal(a.data, b.data)
        for a, b in zip(new_out.owner.inputs[1:], out.owner.inputs[1:]))
示例#9
0
def test_change_rv_size_default_update():
    rng = aesara.shared(np.random.default_rng(0))
    x = normal(rng=rng)

    # Test that "traditional" default_update is updated
    rng.default_update = x.owner.outputs[0]
    new_x = change_rv_size(x, new_size=(2, ))
    assert rng.default_update is not x.owner.outputs[0]
    assert rng.default_update is new_x.owner.outputs[0]

    # Test that "non-traditional" default_update is left unchanged
    next_rng = aesara.shared(np.random.default_rng(1))
    rng.default_update = next_rng
    new_x = change_rv_size(x, new_size=(2, ))
    assert rng.default_update is next_rng

    # Test that default_update is not set if there was none before
    del rng.default_update
    new_x = change_rv_size(x, new_size=(2, ))
    assert not hasattr(rng, "default_update")
示例#10
0
文件: test_opt.py 项目: mgorny/aesara
def test_Subtensor_lift_restrictions():
    rng = shared(np.random.default_rng(1233532), borrow=False)

    std = vector("std")
    std.tag.test_value = np.array([1e-5, 2e-5, 3e-5], dtype=config.floatX)
    x = normal(at.arange(2), at.ones(2), rng=rng)
    y = x[1]
    # The non-`Subtensor` client depends on the RNG state, so we can't perform
    # the lift
    z = x - y

    fg = FunctionGraph([rng], [z], clone=False)
    _ = EquilibriumOptimizer([local_subtensor_rv_lift],
                             max_use_ratio=100).apply(fg)

    subtensor_node = fg.outputs[0].owner.inputs[1].owner.inputs[0].owner
    assert subtensor_node == y.owner
    assert isinstance(subtensor_node.op, Subtensor)
    assert subtensor_node.inputs[0].owner.op == normal

    z = at.ones(x.shape) - x[1]

    # We add `x` as an output to make sure that `is_rv_used_in_graph` handles
    # `"output"` "nodes" correctly.
    fg = FunctionGraph([rng], [z, x], clone=False)
    EquilibriumOptimizer([local_subtensor_rv_lift],
                         max_use_ratio=100).apply(fg)

    assert fg.outputs[0] == z
    assert fg.outputs[1] == x

    # The non-`Subtensor` client doesn't depend on the RNG state, so we can
    # perform the lift
    fg = FunctionGraph([rng], [z], clone=False)
    EquilibriumOptimizer([local_subtensor_rv_lift],
                         max_use_ratio=100).apply(fg)

    rv_node = fg.outputs[0].owner.inputs[1].owner.inputs[0].owner
    assert rv_node.op == normal
    assert isinstance(rv_node.inputs[-1].owner.op, Subtensor)
    assert isinstance(rv_node.inputs[-2].owner.op, Subtensor)
示例#11
0
文件: test_opt.py 项目: mgorny/aesara
def test_inplace_optimization():

    out = normal(0, 1)
    out.owner.inputs[0].default_update = out.owner.outputs[0]

    assert out.owner.op.inplace is False

    f = function(
        [],
        out,
        mode="FAST_RUN",
    )

    (new_out, new_rng) = f.maker.fgraph.outputs
    assert new_out.type == out.type
    assert isinstance(new_out.owner.op, type(out.owner.op))
    assert new_out.owner.op.inplace is True
    assert all(
        np.array_equal(a.data, b.data)
        for a, b in zip(new_out.owner.inputs[2:], out.owner.inputs[2:]))
    assert np.array_equal(new_out.owner.inputs[1].data, [])
示例#12
0
def test_walk_model():
    d = at.vector("d")
    b = at.vector("b")
    c = uniform(0.0, d)
    c.name = "c"
    e = at.log(c)
    a = normal(e, b)
    a.name = "a"

    test_graph = at.exp(a + 1)
    res = list(walk_model((test_graph,)))
    assert a in res
    assert c not in res

    res = list(walk_model((test_graph,), walk_past_rvs=True))
    assert a in res
    assert c in res

    res = list(walk_model((test_graph,), walk_past_rvs=True, stop_at_vars={e}))
    assert a in res
    assert c not in res
示例#13
0
文件: test_opt.py 项目: mgorny/aesara
def test_Dimshuffle_lift_restrictions():
    rng = shared(np.random.default_rng(1233532), borrow=False)

    x = normal(at.arange(2).reshape((2, )), 100, size=(2, 2, 2), rng=rng)
    y = x.dimshuffle(1, 0, 2)
    # The non-`Dimshuffle` client depends on the RNG state, so we can't
    # perform the lift
    z = x - y

    fg = FunctionGraph([rng], [z, y], clone=False)
    _ = EquilibriumOptimizer([local_dimshuffle_rv_lift],
                             max_use_ratio=100).apply(fg)

    dimshuffle_node = fg.outputs[0].owner.inputs[1].owner
    assert dimshuffle_node == y.owner
    assert isinstance(dimshuffle_node.op, DimShuffle)
    assert dimshuffle_node.inputs[0].owner.op == normal

    z = at.ones(x.shape) - y

    # We add `x` as an output to make sure that `is_rv_used_in_graph` handles
    # `"output"` "nodes" correctly.
    fg = FunctionGraph([rng], [z, x], clone=False)
    EquilibriumOptimizer([local_dimshuffle_rv_lift],
                         max_use_ratio=100).apply(fg)

    assert fg.outputs[0] == z
    assert fg.outputs[1] == x

    # The non-`Dimshuffle` client doesn't depend on the RNG state, so we can
    # perform the lift
    fg = FunctionGraph([rng], [z], clone=False)
    EquilibriumOptimizer([local_dimshuffle_rv_lift],
                         max_use_ratio=100).apply(fg)

    rv_node = fg.outputs[0].owner.inputs[1].owner
    assert rv_node.op == normal
    assert isinstance(rv_node.inputs[-1].owner.op, DimShuffle)
    assert isinstance(rv_node.inputs[-2].owner.op, DimShuffle)
示例#14
0
def test_observed():
    rv_var = normal(0, 1, size=3)
    obs_var = observed(rv_var, np.array([0.2, 0.1, -2.4], dtype=config.floatX))

    assert obs_var.owner.inputs[0] is rv_var

    with raises(TypeError):
        observed(rv_var, np.array([1, 2], dtype=int))

    with raises(TypeError):
        observed(rv_var, np.array([[1.0, 2.0]], dtype=rv_var.dtype))

    obs_rv = observed(None, np.array([0.2, 0.1, -2.4], dtype=config.floatX))

    assert isinstance(obs_rv.owner.inputs[0].type, NoneTypeT)

    rv_val = vector()
    rv_val.tag.test_value = np.array([0.2, 0.1, -2.4], dtype=config.floatX)

    obs_var = observed(rv_var, rv_val)

    with raises(NullTypeGradError):
        grad(obs_var.sum(), [rv_val])
示例#15
0
def test_random():
    rng = shared(np.random.RandomState(123))
    out = normal(rng=rng)
    fgraph = FunctionGraph([out.owner.inputs[0]], [out], clone=False)
    compare_jax_and_py(fgraph, [])