Beispiel #1
0
def test_convert_rv_to_dist_shape():

    # Make sure we use the `ShapeFeature` to get the shape info
    X_rv = NormalRV(np.r_[1, 2], 2.0, name="X_rv")
    fgraph = FunctionGraph(tt_inputs([X_rv]), [X_rv],
                           features=[tt.opt.ShapeFeature()])

    with pm.Model():
        res = convert_rv_to_dist(fgraph.outputs[0].owner, None)

    assert isinstance(res.distribution, pm.Normal)
    assert np.array_equal(res.distribution.shape, np.r_[2])
Beispiel #2
0
def test_mvnormalrv_ShapeFeature():
    M_tt = tt.iscalar("M")
    M_tt.tag.test_value = 2

    d_rv = MvNormalRV(tt.ones((M_tt, )), tt.eye(M_tt), size=2)

    fg = FunctionGraph(
        [i for i in tt_inputs([d_rv]) if not isinstance(i, tt.Constant)],
        [d_rv],
        clone=True,
        features=[tt.opt.ShapeFeature()],
    )

    s1, s2 = fg.shape_feature.shape_of[fg.memo[d_rv]]

    assert s1.eval() == 2
    assert fg.memo[M_tt] in tt_inputs([s2])
Beispiel #3
0
def test_kanren_opt():
    """Make sure we can run miniKanren "optimizations" over a graph until a fixed-point/normal-form is reached.
    """
    tt.config.cxx = ""
    tt.config.compute_test_value = "ignore"

    x_tt = tt.vector("x")
    c_tt = tt.vector("c")
    d_tt = tt.vector("c")
    A_tt = tt.matrix("A")
    B_tt = tt.matrix("B")

    Z_tt = A_tt.dot(x_tt + B_tt.dot(c_tt + d_tt))

    fgraph = FunctionGraph(tt_inputs([Z_tt]), [Z_tt], clone=True)

    assert isinstance(fgraph.outputs[0].owner.op, tt.Dot)

    def distributes(in_lv, out_lv):
        return lall(
            # lhs == A * (x + b)
            eq(etuple(mt.dot, var("A"), etuple(mt.add, var("x"), var("b"))),
               etuplize(in_lv)),
            # rhs == A * x + A * b
            eq(
                etuple(mt.add, etuple(mt.dot, var("A"), var("x")),
                       etuple(mt.dot, var("A"), var("b"))),
                out_lv,
            ),
        )

    distribute_opt = EquilibriumOptimizer([KanrenRelationSub(distributes)],
                                          max_use_ratio=10)

    fgraph_opt = optimize_graph(fgraph, distribute_opt, return_graph=False)

    assert fgraph_opt.owner.op == tt.add
    assert isinstance(fgraph_opt.owner.inputs[0].owner.op, tt.Dot)
    # TODO: Something wrong with `etuple` caching?
    # assert fgraph_opt.owner.inputs[0].owner.inputs[0] == A_tt
    assert fgraph_opt.owner.inputs[0].owner.inputs[0].name == "A"
    assert fgraph_opt.owner.inputs[1].owner.op == tt.add
    assert isinstance(fgraph_opt.owner.inputs[1].owner.inputs[0].owner.op,
                      tt.Dot)
    assert isinstance(fgraph_opt.owner.inputs[1].owner.inputs[1].owner.op,
                      tt.Dot)
Beispiel #4
0
def test_Normal_ShapeFeature():
    M_tt = tt.iscalar("M")
    M_tt.tag.test_value = 3
    sd_tt = tt.scalar("sd")
    sd_tt.tag.test_value = 1.0

    d_rv = NormalRV(tt.ones((M_tt, )), sd_tt, size=(2, M_tt))
    d_rv.tag.test_value

    fg = FunctionGraph(
        [i for i in tt_inputs([d_rv]) if not isinstance(i, tt.Constant)],
        [d_rv],
        clone=True,
        features=[tt.opt.ShapeFeature()],
    )
    s1, s2 = fg.shape_feature.shape_of[fg.memo[d_rv]]

    assert get_test_value(s1) == get_test_value(d_rv).shape[0]
    assert get_test_value(s2) == get_test_value(d_rv).shape[1]
Beispiel #5
0
def test_dirichlet_ShapeFeature():
    """Make sure `RandomVariable.infer_shape` works with `ShapeFeature`."""
    M_tt = tt.iscalar("M")
    M_tt.tag.test_value = 2
    N_tt = tt.iscalar("N")
    N_tt.tag.test_value = 3

    d_rv = DirichletRV(tt.ones((M_tt, N_tt)), name="Gamma")

    fg = FunctionGraph(
        [i for i in tt_inputs([d_rv]) if not isinstance(i, tt.Constant)],
        [d_rv],
        clone=True,
        features=[tt.opt.ShapeFeature()],
    )

    s1, s2 = fg.shape_feature.shape_of[fg.memo[d_rv]]

    assert fg.memo[M_tt] in tt_inputs([s1])
    assert fg.memo[N_tt] in tt_inputs([s2])
def test_normals_to_model():
    """Test conversion to a PyMC3 model."""
    tt.config.compute_test_value = 'ignore'

    a_tt = tt.vector('a')
    R_tt = tt.matrix('R')
    F_t_tt = tt.matrix('F')
    V_tt = tt.matrix('V')

    a_tt.tag.test_value = np.r_[1., 0.]
    R_tt.tag.test_value = np.diag([10., 10.])
    F_t_tt.tag.test_value = np.c_[-2., 1.]
    V_tt.tag.test_value = np.diag([0.5])

    beta_rv = MvNormalRV(a_tt, R_tt, name='\\beta')

    E_y_rv = F_t_tt.dot(beta_rv)
    Y_rv = MvNormalRV(E_y_rv, V_tt, name='Y')

    y_val = np.r_[-3.]

    def _check_model(model):
        assert len(model.observed_RVs) == 1
        assert model.observed_RVs[0].name == 'Y'
        Y_pm = model.observed_RVs[0].distribution
        assert isinstance(Y_pm, pm.MvNormal)
        np.testing.assert_array_equal(model.observed_RVs[0].observations.data,
                                      y_val)
        assert Y_pm.mu.owner.op == tt.basic._dot
        assert Y_pm.cov.name == 'V'
        assert len(model.unobserved_RVs) == 1
        assert model.unobserved_RVs[0].name == '\\beta'
        beta_pm = model.unobserved_RVs[0].distribution
        assert isinstance(beta_pm, pm.MvNormal)

    y_tt = theano.shared(y_val, name='y')
    Y_obs = observed(y_tt, Y_rv)

    fgraph = FunctionGraph(tt_inputs([beta_rv, Y_obs]), [beta_rv, Y_obs],
                           clone=True)

    model = graph_model(fgraph)

    _check_model(model)

    # Now, let `graph_model` create the `FunctionGraph`
    model = graph_model(Y_obs)

    _check_model(model)

    # Use a different type of observation value
    y_tt = tt.as_tensor_variable(y_val, name='y')
    Y_obs = observed(y_tt, Y_rv)

    model = graph_model(Y_obs)

    _check_model(model)

    # Use an invalid type of observation value
    tt.config.compute_test_value = 'ignore'
    y_tt = tt.vector('y')
    Y_obs = observed(y_tt, Y_rv)

    with pytest.raises(TypeError):
        model = graph_model(Y_obs)