Пример #1
0
def test_assoccomm():
    x, a, b, c = tt.dvectors("xabc")
    test_expr = x + 1
    q = var()

    res = run(1, q, applyo(tt.add, etuple(*test_expr.owner.inputs), test_expr))
    assert q == res[0]

    res = run(1, q, applyo(q, etuple(*test_expr.owner.inputs), test_expr))
    assert tt.add == res[0].reify()

    res = run(1, q, applyo(tt.add, q, test_expr))
    assert mt(tuple(test_expr.owner.inputs)) == res[0]

    x = var()
    res = run(0, x, eq_comm(mt.mul(a, b), mt.mul(b, x)))
    assert (mt(a), ) == res

    res = run(0, x, eq_comm(mt.add(a, b), mt.add(b, x)))
    assert (mt(a), ) == res

    (res, ) = run(0, x, eq_assoc(mt.add(a, b, c), mt.add(a, x)))
    assert res == mt(b + c)

    (res, ) = run(0, x, eq_assoc(mt.mul(a, b, c), mt.mul(a, x)))
    assert res == mt(b * c)
Пример #2
0
def test_assoccomm():
    from symbolic_pymc.relations import buildo

    x, a, b, c = tt.dvectors('xabc')
    test_expr = x + 1
    q = var('q')

    res = run(1, q, buildo(tt.add, test_expr.owner.inputs, test_expr))
    assert q == res[0]

    res = run(1, q, buildo(q, test_expr.owner.inputs, test_expr))
    assert tt.add == res[0].reify()

    res = run(1, q, buildo(tt.add, q, test_expr))
    assert mt(tuple(test_expr.owner.inputs)) == res[0]

    res = run(0, var('x'), eq_comm(mt.mul(a, b), mt.mul(b, var('x'))))
    assert (mt(a), ) == res

    res = run(0, var('x'), eq_comm(mt.add(a, b), mt.add(b, var('x'))))
    assert (mt(a), ) == res

    res = run(0, var('x'), (eq_assoc, mt.add(a, b, c), mt.add(a, var('x'))))

    # TODO: `res[0]` should return `etuple`s.  Since `eq_assoc` effectively
    # picks apart the results of `arguments(...)`, I don't know if we can
    # keep the `etuple`s around.  We might be able to convert the results
    # to `etuple`s automatically by wrapping `eq_assoc`, though.
    res_obj = etuple(*res[0]).eval_obj
    assert res_obj == mt(b + c)

    res = run(0, var('x'), (eq_assoc, mt.mul(a, b, c), mt.mul(a, var('x'))))
    res_obj = etuple(*res[0]).eval_obj
    assert res_obj == mt(b * c)
Пример #3
0
    def mul_trans(in_expr, out_expr):
        """Equate `2 * x` with `5 * x` in a Theano `scan`.

        I.e. from left-to-right, replace `2 * x[t-1]` with `5 * x[t-1]`.
        """
        arg_lv = var()
        inputs_lv, info_lv = var(), var()
        in_scan_lv = mt.Scan(inputs_lv, [mt.mul(2, arg_lv)], info_lv)
        out_scan_lv = mt.Scan(inputs_lv, [mt.mul(5, arg_lv)], info_lv)

        return lall(eq(in_expr, in_scan_lv), eq(out_expr, out_scan_lv))
Пример #4
0
def test_kanren():
    # x, a, b = tt.dvectors('xab')
    #
    # with variables(x, a, b):
    #     assert b == run(1, x, eq(a + b, a + x))[0]
    #     assert b == run(1, x, eq(a * b, a * x))[0]

    a, b = mt.dvectors('ab')
    assert b == run(1, var('x'), eq(mt.add(a, b), mt.add(a, var('x'))))[0]
    assert b == run(1, var('x'), eq(mt.mul(a, b), mt.mul(a, var('x'))))[0]

    a_tt = tt.vector('a')
    R_tt = tt.matrix('R')
    F_t_tt = tt.matrix('F')
    V_tt = tt.matrix('V')
    beta_rv = MvNormalRV(a_tt, R_tt, name='\\beta')
    E_y_rv = F_t_tt.dot(beta_rv)
    Y_rv = MvNormalRV(E_y_rv, V_tt, name='y')

    beta_name_lv = var('beta_name')
    beta_size_lv = var('beta_size')
    beta_rng_lv = var('beta_rng')
    a_lv = var('a')
    R_lv = var('R')
    beta_prior_mt = mt.MvNormalRV(a_lv,
                                  R_lv,
                                  beta_size_lv,
                                  beta_rng_lv,
                                  name=beta_name_lv)

    y_name_lv = var('y_name')
    y_size_lv = var('y_size')
    y_rng_lv = var('y_rng')
    F_t_lv = var('f')
    V_lv = var('V')
    E_y_mt = mt.dot(F_t_lv, beta_prior_mt)

    Y_mt = mt.MvNormalRV(E_y_mt, V_lv, y_size_lv, y_rng_lv, name=y_name_lv)

    with variables(Y_mt):
        res, = run(0, Y_mt, (eq, Y_rv, Y_mt))
    assert res.reify() == Y_rv
Пример #5
0
def test_unification():
    x, y, a, b = tt.dvectors("xyab")
    x_s = tt.scalar("x_s")
    y_s = tt.scalar("y_s")
    c_tt = tt.constant(1, "c")
    d_tt = tt.constant(2, "d")

    x_l = var("x_l")
    y_l = var("y_l")

    assert a == reify(x_l, {x_l: a}).reify()
    test_expr = mt.add(1, mt.mul(2, x_l))
    test_reify_res = reify(test_expr, {x_l: a})
    assert graph_equal(test_reify_res.reify(), 1 + 2 * a)

    z = tt.add(b, a)
    assert {x_l: z} == unify(x_l, z)
    assert b == unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify()

    res = unify(mt.inv(mt.add(x_l, a)), mt.inv(mt.add(b, y_l)))
    assert res[x_l].reify() == b
    assert res[y_l].reify() == a

    mt_expr_add = mt.add(x_l, y_l)

    # The parameters are vectors
    tt_expr_add_1 = tt.add(x, y)
    assert graph_equal(
        tt_expr_add_1,
        reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_1)).reify())

    # The parameters are scalars
    tt_expr_add_2 = tt.add(x_s, y_s)
    assert graph_equal(
        tt_expr_add_2,
        reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_2)).reify())

    # The parameters are constants
    tt_expr_add_3 = tt.add(c_tt, d_tt)
    assert graph_equal(
        tt_expr_add_3,
        reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_3)).reify())
Пример #6
0
def test_kanren_algebra():
    a, b = mt.dvectors("ab")
    assert b == run(1, var("x"), eq(mt.add(a, b), mt.add(a, var("x"))))[0]
    assert b == run(1, var("x"), eq(mt.mul(a, b), mt.mul(a, var("x"))))[0]
Пример #7
0
def test_pymc_normal_model():
    """Conduct a more in-depth test of PyMC3/Theano conversions for a specific model."""
    tt.config.compute_test_value = 'ignore'

    mu_X = tt.dscalar('mu_X')
    sd_X = tt.dscalar('sd_X')
    mu_Y = tt.dscalar('mu_Y')
    mu_X.tag.test_value = np.array(0., dtype=tt.config.floatX)
    sd_X.tag.test_value = np.array(1., dtype=tt.config.floatX)
    mu_Y.tag.test_value = np.array(1., dtype=tt.config.floatX)

    # We need something that uses transforms...
    with pm.Model() as model:
        X_rv = pm.Normal('X_rv', mu_X, sd=sd_X)
        S_rv = pm.HalfCauchy('S_rv',
                             beta=np.array(0.5, dtype=tt.config.floatX))
        Y_rv = pm.Normal('Y_rv', X_rv * S_rv, sd=S_rv)
        Z_rv = pm.Normal('Z_rv', X_rv + Y_rv, sd=sd_X, observed=10.)

    fgraph = model_graph(model, output_vars=[Z_rv])

    Z_rv_tt = canonicalize(fgraph, return_graph=False)

    # This will break comparison if we don't reuse it
    rng = Z_rv_tt.owner.inputs[1].owner.inputs[-1]

    mu_X_ = mt.dscalar('mu_X')
    sd_X_ = mt.dscalar('sd_X')
    tt.config.compute_test_value = 'ignore'
    X_rv_ = mt.NormalRV(mu_X_, sd_X_, None, rng, name='X_rv')
    S_rv_ = mt.HalfCauchyRV(np.array(0., dtype=tt.config.floatX),
                            np.array(0.5, dtype=tt.config.floatX),
                            None,
                            rng,
                            name='S_rv')
    Y_rv_ = mt.NormalRV(mt.mul(X_rv_, S_rv_), S_rv_, None, rng, name='Y_rv')
    Z_rv_ = mt.NormalRV(mt.add(X_rv_, Y_rv_), sd_X, None, rng, name='Z_rv')
    obs_ = mt(Z_rv.observations)
    Z_rv_obs_ = mt.observed(obs_, Z_rv_)

    Z_rv_meta = mt(canonicalize(Z_rv_obs_.reify(), return_graph=False))

    assert mt(Z_rv_tt) == Z_rv_meta

    # Now, let's try that with multiple outputs.
    fgraph.disown()
    fgraph = model_graph(model, output_vars=[Y_rv, Z_rv])

    assert len(fgraph.variables) == 25

    Y_new_rv = walk(Y_rv, fgraph.memo)
    S_new_rv = walk(S_rv, fgraph.memo)
    X_new_rv = walk(X_rv, fgraph.memo)
    Z_new_rv = walk(Z_rv, fgraph.memo)

    # Make sure our new vars are actually in the graph and where
    # they should be.
    assert Y_new_rv == fgraph.outputs[0]
    assert Z_new_rv == fgraph.outputs[1]
    assert X_new_rv in fgraph.variables
    assert S_new_rv in fgraph.variables
    assert isinstance(Z_new_rv.owner.op, Observed)

    # Let's only look at the variables involved in the `Z_rv` subgraph.
    Z_vars = theano.gof.graph.variables(theano.gof.graph.inputs([Z_new_rv]),
                                        [Z_new_rv])

    # Let's filter for only the `RandomVariables` with names.
    Z_vars_count = Counter([
        n.name for n in Z_vars
        if n.name and n.owner and isinstance(n.owner.op, RandomVariable)
    ])

    # Each new RV should be present and only occur once.
    assert Y_new_rv.name in Z_vars_count.keys()
    assert X_new_rv.name in Z_vars_count.keys()
    assert Z_new_rv.owner.inputs[1].name in Z_vars_count.keys()
    assert all(v == 1 for v in Z_vars_count.values())
Пример #8
0
def test_unification():
    x, y, a, b = tt.dvectors('xyab')
    x_s = tt.scalar('x_s')
    y_s = tt.scalar('y_s')
    c_tt = tt.constant(1, 'c')
    d_tt = tt.constant(2, 'd')

    x_l = var('x_l')
    y_l = var('y_l')

    assert a == reify(x_l, {x_l: a}).reify()
    test_expr = mt.add(1, mt.mul(2, x_l))
    test_reify_res = reify(test_expr, {x_l: a})
    assert graph_equal(test_reify_res.reify(), 1 + 2 * a)

    z = tt.add(b, a)
    assert {x_l: z} == unify(x_l, z)
    assert b == unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify()

    res = unify(mt.inv(mt.add(x_l, a)), mt.inv(mt.add(b, y_l)))
    assert res[x_l].reify() == b
    assert res[y_l].reify() == a

    # TODO: This produces a `DimShuffle` so that the scalar constant `1`
    # will match the dimensions of the vector `b`.  That `DimShuffle` isn't
    # handled by the logic variable form.
    # assert unify(mt.add(x_l, 1), mt.add(b_l, 1))[x] == b

    with variables(x):
        assert unify(x + 1, b + 1)[x].reify() == b

    assert unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify() == b
    with variables(x):
        assert unify(x, b)[x] == b
        assert unify([x], [b])[x] == b
        assert unify((x, ), (b, ))[x] == b
        assert unify(x + 1, b + 1)[x].reify() == b
        assert unify(x + a, b + a)[x].reify() == b

    with variables(x):
        assert unify(a + b, a + x)[x].reify() == b

    mt_expr_add = mt.add(x_l, y_l)

    # The parameters are vectors
    tt_expr_add_1 = tt.add(x, y)
    assert graph_equal(
        tt_expr_add_1,
        reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_1)).reify())

    # The parameters are scalars
    tt_expr_add_2 = tt.add(x_s, y_s)
    assert graph_equal(
        tt_expr_add_2,
        reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_2)).reify())

    # The parameters are constants
    tt_expr_add_3 = tt.add(c_tt, d_tt)
    assert graph_equal(
        tt_expr_add_3,
        reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_3)).reify())