def test_mvnormal_conjugate(): """Test that we can produce the closed-form distribution for the conjugate multivariate normal-regression with normal-prior model. """ # import symbolic_pymc.theano.meta as tm # # tm.load_dispatcher() tt.config.cxx = "" tt.config.compute_test_value = "ignore" a_tt = tt.vector("a") R_tt = tt.matrix("R") F_t_tt = tt.matrix("F") V_tt = tt.matrix("V") a_tt.tag.test_value = np.r_[1.0, 0.0] R_tt.tag.test_value = np.diag([10.0, 10.0]) F_t_tt.tag.test_value = np.c_[-2.0, 1.0] V_tt.tag.test_value = np.diag([0.5]) beta_rv = MvNormalRV(a_tt, R_tt, name="\\beta") E_y_rv = F_t_tt.dot(beta_rv) Y_rv = MvNormalRV(E_y_rv, V_tt, name="Y") y_tt = tt.as_tensor_variable(np.r_[-3.0]) y_tt.name = "y" Y_obs = observed(y_tt, Y_rv) q_lv = var() (expr_graph, ) = run(1, q_lv, walko(conjugate, Y_obs, q_lv)) fgraph_opt = expr_graph.eval_obj fgraph_opt_tt = fgraph_opt.reify() # Check that the SSE has decreased from prior to posterior. # TODO: Use a better test. beta_prior_mean_val = a_tt.tag.test_value F_val = F_t_tt.tag.test_value beta_post_mean_val = fgraph_opt_tt.owner.inputs[0].tag.test_value priorp_err = np.square(y_tt.data - F_val.dot(beta_prior_mean_val)).sum() postp_err = np.square(y_tt.data - F_val.dot(beta_post_mean_val)).sum() # First, make sure the prior and posterior means are simply not equal. with pytest.raises(AssertionError): np.testing.assert_array_equal(priorp_err, postp_err) # Now, make sure there's a decrease (relative to the observed point). np.testing.assert_array_less(postp_err, priorp_err)
def test_mvnormal_mvnormal(): """Test that we can produce the closed-form distribution for the conjugate multivariate normal-regression with normal-prior model. """ tt.config.cxx = '' tt.config.compute_test_value = 'ignore' a_tt = tt.vector('a') R_tt = tt.matrix('R') F_t_tt = tt.matrix('F') V_tt = tt.matrix('V') a_tt.tag.test_value = np.r_[1., 0.] R_tt.tag.test_value = np.diag([10., 10.]) F_t_tt.tag.test_value = np.c_[-2., 1.] V_tt.tag.test_value = np.diag([0.5]) beta_rv = MvNormalRV(a_tt, R_tt, name='\\beta') E_y_rv = F_t_tt.dot(beta_rv) Y_rv = MvNormalRV(E_y_rv, V_tt, name='Y') y_tt = tt.as_tensor_variable(np.r_[-3.]) y_tt.name = 'y' Y_obs = observed(y_tt, Y_rv) q_lv = var() expr_graph, = run(1, q_lv, (tt_graph_applyo, conjugate, Y_obs, q_lv)) fgraph_opt = expr_graph.eval_obj fgraph_opt_tt = fgraph_opt.reify() # Check that the SSE has decreased from prior to posterior. # TODO: Use a better test. beta_prior_mean_val = a_tt.tag.test_value F_val = F_t_tt.tag.test_value beta_post_mean_val = fgraph_opt_tt.owner.inputs[0].tag.test_value priorp_err = np.square( y_tt.data - F_val.dot(beta_prior_mean_val)).sum() postp_err = np.square( y_tt.data - F_val.dot(beta_post_mean_val)).sum() # First, make sure the prior and posterior means are simply not equal. np.testing.assert_raises( AssertionError, np.testing.assert_array_equal, priorp_err, postp_err) # Now, make sure there's a decrease (relative to the observed point). np.testing.assert_array_less(postp_err, priorp_err)
def test_unify_rvs(): a_tt = tt.vector("a") R_tt = tt.matrix("R") F_t_tt = tt.matrix("F") V_tt = tt.matrix("V") beta_rv = MvNormalRV(a_tt, R_tt, name="\\beta") E_y_rv = F_t_tt.dot(beta_rv) Y_rv = MvNormalRV(E_y_rv, V_tt, name="y") E_y_lv, V_lv, Y_name_lv = var(), var(), var() Y_lv = mt.MvNormalRV(E_y_lv, V_lv, size=var(), rng=var(), name=Y_name_lv) s = unify(Y_lv, Y_rv) assert s[E_y_lv].reify() == E_y_rv assert s[V_lv].reify() == V_tt assert s[Y_name_lv] == "y"
def test_kanren(): # x, a, b = tt.dvectors('xab') # # with variables(x, a, b): # assert b == run(1, x, eq(a + b, a + x))[0] # assert b == run(1, x, eq(a * b, a * x))[0] a, b = mt.dvectors('ab') assert b == run(1, var('x'), eq(mt.add(a, b), mt.add(a, var('x'))))[0] assert b == run(1, var('x'), eq(mt.mul(a, b), mt.mul(a, var('x'))))[0] a_tt = tt.vector('a') R_tt = tt.matrix('R') F_t_tt = tt.matrix('F') V_tt = tt.matrix('V') beta_rv = MvNormalRV(a_tt, R_tt, name='\\beta') E_y_rv = F_t_tt.dot(beta_rv) Y_rv = MvNormalRV(E_y_rv, V_tt, name='y') beta_name_lv = var('beta_name') beta_size_lv = var('beta_size') beta_rng_lv = var('beta_rng') a_lv = var('a') R_lv = var('R') beta_prior_mt = mt.MvNormalRV(a_lv, R_lv, beta_size_lv, beta_rng_lv, name=beta_name_lv) y_name_lv = var('y_name') y_size_lv = var('y_size') y_rng_lv = var('y_rng') F_t_lv = var('f') V_lv = var('V') E_y_mt = mt.dot(F_t_lv, beta_prior_mt) Y_mt = mt.MvNormalRV(E_y_mt, V_lv, y_size_lv, y_rng_lv, name=y_name_lv) with variables(Y_mt): res, = run(0, Y_mt, (eq, Y_rv, Y_mt)) assert res.reify() == Y_rv
def test_mvnormalrv_ShapeFeature(): M_tt = tt.iscalar("M") M_tt.tag.test_value = 2 d_rv = MvNormalRV(tt.ones((M_tt, )), tt.eye(M_tt), size=2) fg = FunctionGraph( [i for i in tt_inputs([d_rv]) if not isinstance(i, tt.Constant)], [d_rv], clone=True, features=[tt.opt.ShapeFeature()], ) s1, s2 = fg.shape_feature.shape_of[fg.memo[d_rv]] assert s1.eval() == 2 assert fg.memo[M_tt] in tt_inputs([s2])
def test_normals_to_model(): """Test conversion to a PyMC3 model.""" tt.config.compute_test_value = 'ignore' a_tt = tt.vector('a') R_tt = tt.matrix('R') F_t_tt = tt.matrix('F') V_tt = tt.matrix('V') a_tt.tag.test_value = np.r_[1., 0.] R_tt.tag.test_value = np.diag([10., 10.]) F_t_tt.tag.test_value = np.c_[-2., 1.] V_tt.tag.test_value = np.diag([0.5]) beta_rv = MvNormalRV(a_tt, R_tt, name='\\beta') E_y_rv = F_t_tt.dot(beta_rv) Y_rv = MvNormalRV(E_y_rv, V_tt, name='Y') y_val = np.r_[-3.] def _check_model(model): assert len(model.observed_RVs) == 1 assert model.observed_RVs[0].name == 'Y' Y_pm = model.observed_RVs[0].distribution assert isinstance(Y_pm, pm.MvNormal) np.testing.assert_array_equal(model.observed_RVs[0].observations.data, y_val) assert Y_pm.mu.owner.op == tt.basic._dot assert Y_pm.cov.name == 'V' assert len(model.unobserved_RVs) == 1 assert model.unobserved_RVs[0].name == '\\beta' beta_pm = model.unobserved_RVs[0].distribution assert isinstance(beta_pm, pm.MvNormal) y_tt = theano.shared(y_val, name='y') Y_obs = observed(y_tt, Y_rv) fgraph = FunctionGraph(tt_inputs([beta_rv, Y_obs]), [beta_rv, Y_obs], clone=True) model = graph_model(fgraph) _check_model(model) # Now, let `graph_model` create the `FunctionGraph` model = graph_model(Y_obs) _check_model(model) # Use a different type of observation value y_tt = tt.as_tensor_variable(y_val, name='y') Y_obs = observed(y_tt, Y_rv) model = graph_model(Y_obs) _check_model(model) # Use an invalid type of observation value tt.config.compute_test_value = 'ignore' y_tt = tt.vector('y') Y_obs = observed(y_tt, Y_rv) with pytest.raises(TypeError): model = graph_model(Y_obs)