def test_assoccomm(): from symbolic_pymc.relations import buildo x, a, b, c = tt.dvectors('xabc') test_expr = x + 1 q = var('q') res = run(1, q, buildo(tt.add, test_expr.owner.inputs, test_expr)) assert q == res[0] res = run(1, q, buildo(q, test_expr.owner.inputs, test_expr)) assert tt.add == res[0].reify() res = run(1, q, buildo(tt.add, q, test_expr)) assert mt(tuple(test_expr.owner.inputs)) == res[0] res = run(0, var('x'), eq_comm(mt.mul(a, b), mt.mul(b, var('x')))) assert (mt(a), ) == res res = run(0, var('x'), eq_comm(mt.add(a, b), mt.add(b, var('x')))) assert (mt(a), ) == res res = run(0, var('x'), (eq_assoc, mt.add(a, b, c), mt.add(a, var('x')))) # TODO: `res[0]` should return `etuple`s. Since `eq_assoc` effectively # picks apart the results of `arguments(...)`, I don't know if we can # keep the `etuple`s around. We might be able to convert the results # to `etuple`s automatically by wrapping `eq_assoc`, though. res_obj = etuple(*res[0]).eval_obj assert res_obj == mt(b + c) res = run(0, var('x'), (eq_assoc, mt.mul(a, b, c), mt.mul(a, var('x')))) res_obj = etuple(*res[0]).eval_obj assert res_obj == mt(b * c)
def test_assoccomm(): x, a, b, c = tt.dvectors("xabc") test_expr = x + 1 q = var() res = run(1, q, applyo(tt.add, etuple(*test_expr.owner.inputs), test_expr)) assert q == res[0] res = run(1, q, applyo(q, etuple(*test_expr.owner.inputs), test_expr)) assert tt.add == res[0].reify() res = run(1, q, applyo(tt.add, q, test_expr)) assert mt(tuple(test_expr.owner.inputs)) == res[0] x = var() res = run(0, x, eq_comm(mt.mul(a, b), mt.mul(b, x))) assert (mt(a), ) == res res = run(0, x, eq_comm(mt.add(a, b), mt.add(b, x))) assert (mt(a), ) == res (res, ) = run(0, x, eq_assoc(mt.add(a, b, c), mt.add(a, x))) assert res == mt(b + c) (res, ) = run(0, x, eq_assoc(mt.mul(a, b, c), mt.mul(a, x))) assert res == mt(b * c)
def test_pymc_broadcastable(): """Test PyMC3 to Theano conversion amid array broadcasting.""" tt.config.compute_test_value = 'ignore' mu_X = tt.vector('mu_X') sd_X = tt.vector('sd_X') mu_Y = tt.vector('mu_Y') sd_Y = tt.vector('sd_Y') mu_X.tag.test_value = np.array([0.], dtype=tt.config.floatX) sd_X.tag.test_value = np.array([1.], dtype=tt.config.floatX) mu_Y.tag.test_value = np.array([1.], dtype=tt.config.floatX) sd_Y.tag.test_value = np.array([0.5], dtype=tt.config.floatX) with pm.Model() as model: X_rv = pm.Normal('X_rv', mu_X, sd=sd_X, shape=(1, )) Y_rv = pm.Normal('Y_rv', mu_Y, sd=sd_Y, shape=(1, )) Z_rv = pm.Normal('Z_rv', X_rv + Y_rv, sd=sd_X + sd_Y, shape=(1, ), observed=[10.]) with pytest.warns(UserWarning): fgraph = model_graph(model) Z_rv_tt = canonicalize(fgraph, return_graph=False) # This will break comparison if we don't reuse it rng = Z_rv_tt.owner.inputs[1].owner.inputs[-1] mu_X_ = mt.vector('mu_X') sd_X_ = mt.vector('sd_X') mu_Y_ = mt.vector('mu_Y') sd_Y_ = mt.vector('sd_Y') tt.config.compute_test_value = 'ignore' X_rv_ = mt.NormalRV(mu_X_, sd_X_, (1, ), rng, name='X_rv') X_rv_ = mt.addbroadcast(X_rv_, 0) Y_rv_ = mt.NormalRV(mu_Y_, sd_Y_, (1, ), rng, name='Y_rv') Y_rv_ = mt.addbroadcast(Y_rv_, 0) Z_rv_ = mt.NormalRV(mt.add(X_rv_, Y_rv_), mt.add(sd_X_, sd_Y_), (1, ), rng, name='Z_rv') obs_ = mt(Z_rv.observations) Z_rv_obs_ = mt.observed(obs_, Z_rv_) Z_rv_meta = canonicalize(Z_rv_obs_.reify(), return_graph=False) assert mt(Z_rv_tt) == mt(Z_rv_meta)
def test_kanren(): # x, a, b = tt.dvectors('xab') # # with variables(x, a, b): # assert b == run(1, x, eq(a + b, a + x))[0] # assert b == run(1, x, eq(a * b, a * x))[0] a, b = mt.dvectors('ab') assert b == run(1, var('x'), eq(mt.add(a, b), mt.add(a, var('x'))))[0] assert b == run(1, var('x'), eq(mt.mul(a, b), mt.mul(a, var('x'))))[0] a_tt = tt.vector('a') R_tt = tt.matrix('R') F_t_tt = tt.matrix('F') V_tt = tt.matrix('V') beta_rv = MvNormalRV(a_tt, R_tt, name='\\beta') E_y_rv = F_t_tt.dot(beta_rv) Y_rv = MvNormalRV(E_y_rv, V_tt, name='y') beta_name_lv = var('beta_name') beta_size_lv = var('beta_size') beta_rng_lv = var('beta_rng') a_lv = var('a') R_lv = var('R') beta_prior_mt = mt.MvNormalRV(a_lv, R_lv, beta_size_lv, beta_rng_lv, name=beta_name_lv) y_name_lv = var('y_name') y_size_lv = var('y_size') y_rng_lv = var('y_rng') F_t_lv = var('f') V_lv = var('V') E_y_mt = mt.dot(F_t_lv, beta_prior_mt) Y_mt = mt.MvNormalRV(E_y_mt, V_lv, y_size_lv, y_rng_lv, name=y_name_lv) with variables(Y_mt): res, = run(0, Y_mt, (eq, Y_rv, Y_mt)) assert res.reify() == Y_rv
def test_etuple_term(): """Test `etuplize` and `etuple` interaction with `term` """ # Take apart an already constructed/evaluated meta # object. e2 = mt.add(mt.vector(), mt.vector()) e2_et = etuplize(e2) assert isinstance(e2_et, ExpressionTuple) # e2_et_expect = etuple( # mt.add, # etuple(mt.TensorVariable, # etuple(mt.TensorType, # 'float64', (False,), None), # None, None, None), # etuple(mt.TensorVariable, # etuple(mt.TensorType, # 'float64', (False,), None), # None, None, None), # ) e2_et_expect = etuple(mt.add, e2.base_arguments[0], e2.base_arguments[1]) assert e2_et == e2_et_expect assert e2_et.eval_obj is e2 # Make sure expression expansion works from Theano objects, too. # First, do it manually. tt_expr = tt.vector() + tt.vector() mt_expr = mt(tt_expr) assert mt_expr.obj is tt_expr assert mt_expr.reify() is tt_expr e3 = etuplize(mt_expr) assert e3 == e2_et assert e3.eval_obj is mt_expr assert e3.eval_obj.reify() is tt_expr # Now, through `etuplize` e2_et_2 = etuplize(tt_expr) assert e2_et_2 == e3 == e2_et assert isinstance(e2_et_2, ExpressionTuple) assert e2_et_2.eval_obj == tt_expr
def test_unification(): x, y, a, b = tt.dvectors("xyab") x_s = tt.scalar("x_s") y_s = tt.scalar("y_s") c_tt = tt.constant(1, "c") d_tt = tt.constant(2, "d") x_l = var("x_l") y_l = var("y_l") assert a == reify(x_l, {x_l: a}).reify() test_expr = mt.add(1, mt.mul(2, x_l)) test_reify_res = reify(test_expr, {x_l: a}) assert graph_equal(test_reify_res.reify(), 1 + 2 * a) z = tt.add(b, a) assert {x_l: z} == unify(x_l, z) assert b == unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify() res = unify(mt.inv(mt.add(x_l, a)), mt.inv(mt.add(b, y_l))) assert res[x_l].reify() == b assert res[y_l].reify() == a mt_expr_add = mt.add(x_l, y_l) # The parameters are vectors tt_expr_add_1 = tt.add(x, y) assert graph_equal( tt_expr_add_1, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_1)).reify()) # The parameters are scalars tt_expr_add_2 = tt.add(x_s, y_s) assert graph_equal( tt_expr_add_2, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_2)).reify()) # The parameters are constants tt_expr_add_3 = tt.add(c_tt, d_tt) assert graph_equal( tt_expr_add_3, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_3)).reify())
def test_kanren_algebra(): a, b = mt.dvectors("ab") assert b == run(1, var("x"), eq(mt.add(a, b), mt.add(a, var("x"))))[0] assert b == run(1, var("x"), eq(mt.mul(a, b), mt.mul(a, var("x"))))[0]
def test_meta_classes(): vec_tt = tt.vector('vec') vec_m = metatize(vec_tt) assert vec_m.obj == vec_tt assert type(vec_m) == TheanoMetaTensorVariable # This should invalidate the underlying base object. vec_m.index = 0 assert vec_m.obj is None assert vec_m.reify().type == vec_tt.type assert vec_m.reify().name == vec_tt.name vec_type_m = vec_m.type assert type(vec_type_m) == TheanoMetaTensorType assert vec_type_m.dtype == vec_tt.dtype assert vec_type_m.broadcastable == vec_tt.type.broadcastable assert vec_type_m.name == vec_tt.type.name assert graph_equal(tt.add(1, 2), mt.add(1, 2).reify()) meta_var = mt.add(1, var()).reify() assert isinstance(meta_var, TheanoMetaTensorVariable) assert isinstance(meta_var.owner.op.obj, theano.Op) assert isinstance(meta_var.owner.inputs[0].obj, tt.TensorConstant) test_vals = [1, 2.4] meta_vars = metatize(test_vals) assert meta_vars == [metatize(x) for x in test_vals] # TODO: Do we really want meta variables to be equal to their # reified base objects? # assert meta_vars == [tt.as_tensor_variable(x) for x in test_vals] name_mt = var() add_tt = tt.add(0, 1) add_mt = mt.add(0, 1, name=name_mt) assert add_mt.name is name_mt assert add_tt.type == add_mt.type.reify() assert mt(add_tt.owner) == add_mt.owner # assert isvar(add_mt._obj) # Let's confirm that we can dynamically create a new meta `Op` type test_mat = np.c_[[2, 3], [4, 5]] svd_tt = tt.nlinalg.SVD()(test_mat) # First, can we create one from a new base `Op` instance? svd_op_mt = mt(tt.nlinalg.SVD()) svd_mt = svd_op_mt(test_mat) assert svd_mt[0].owner.nin == 1 assert svd_mt[0].owner.nout == 3 svd_outputs = svd_mt[0].owner.outputs assert svd_outputs[0] == svd_mt[0] assert svd_outputs[1] == svd_mt[1] assert svd_outputs[2] == svd_mt[2] assert mt(svd_tt) == svd_mt # Next, can we create one from a base `Op` type/class? svd_op_type_mt = mt.nlinalg.SVD assert isinstance(svd_op_type_mt, type) assert issubclass(svd_op_type_mt, TheanoMetaOp) # svd_op_inst_mt = svd_op_type_mt(tt.nlinalg.SVD()) # svd_op_inst_mt(test_mat) == svd_mt # Apply node with logic variable as outputs svd_apply_mt = TheanoMetaApply(svd_op_mt, [test_mat], outputs=var('out')) assert isinstance(svd_apply_mt.inputs, tuple) assert isinstance(svd_apply_mt.inputs[0], MetaSymbol) assert isvar(svd_apply_mt.outputs) assert svd_apply_mt.nin == 1 assert svd_apply_mt.nout is None # Apply node with logic variable as inputs svd_apply_mt = TheanoMetaApply(svd_op_mt, var('in'), outputs=var('out')) assert svd_apply_mt.nin is None # A meta variable with None index var_mt = TheanoMetaVariable(svd_mt[0].type, svd_mt[0].owner, None, None) assert var_mt.index is None reified_var_mt = var_mt.reify() assert isinstance(reified_var_mt, TheanoMetaTensorVariable) assert reified_var_mt.index == 0 assert var_mt.index == 0 assert reified_var_mt == svd_mt[0] # A meta variable with logic variable index var_mt = TheanoMetaVariable(svd_mt[0].type, svd_mt[0].owner, var('index'), None) assert isvar(var_mt.index) reified_var_mt = var_mt.reify() assert isvar(var_mt.index) assert reified_var_mt.index == 0 const_mt = mt(1) assert isinstance(const_mt, TheanoMetaTensorConstant) assert const_mt != mt(2)
def test_pymc_normal_model(): """Conduct a more in-depth test of PyMC3/Theano conversions for a specific model.""" tt.config.compute_test_value = 'ignore' mu_X = tt.dscalar('mu_X') sd_X = tt.dscalar('sd_X') mu_Y = tt.dscalar('mu_Y') mu_X.tag.test_value = np.array(0., dtype=tt.config.floatX) sd_X.tag.test_value = np.array(1., dtype=tt.config.floatX) mu_Y.tag.test_value = np.array(1., dtype=tt.config.floatX) # We need something that uses transforms... with pm.Model() as model: X_rv = pm.Normal('X_rv', mu_X, sd=sd_X) S_rv = pm.HalfCauchy('S_rv', beta=np.array(0.5, dtype=tt.config.floatX)) Y_rv = pm.Normal('Y_rv', X_rv * S_rv, sd=S_rv) Z_rv = pm.Normal('Z_rv', X_rv + Y_rv, sd=sd_X, observed=10.) fgraph = model_graph(model, output_vars=[Z_rv]) Z_rv_tt = canonicalize(fgraph, return_graph=False) # This will break comparison if we don't reuse it rng = Z_rv_tt.owner.inputs[1].owner.inputs[-1] mu_X_ = mt.dscalar('mu_X') sd_X_ = mt.dscalar('sd_X') tt.config.compute_test_value = 'ignore' X_rv_ = mt.NormalRV(mu_X_, sd_X_, None, rng, name='X_rv') S_rv_ = mt.HalfCauchyRV(np.array(0., dtype=tt.config.floatX), np.array(0.5, dtype=tt.config.floatX), None, rng, name='S_rv') Y_rv_ = mt.NormalRV(mt.mul(X_rv_, S_rv_), S_rv_, None, rng, name='Y_rv') Z_rv_ = mt.NormalRV(mt.add(X_rv_, Y_rv_), sd_X, None, rng, name='Z_rv') obs_ = mt(Z_rv.observations) Z_rv_obs_ = mt.observed(obs_, Z_rv_) Z_rv_meta = mt(canonicalize(Z_rv_obs_.reify(), return_graph=False)) assert mt(Z_rv_tt) == Z_rv_meta # Now, let's try that with multiple outputs. fgraph.disown() fgraph = model_graph(model, output_vars=[Y_rv, Z_rv]) assert len(fgraph.variables) == 25 Y_new_rv = walk(Y_rv, fgraph.memo) S_new_rv = walk(S_rv, fgraph.memo) X_new_rv = walk(X_rv, fgraph.memo) Z_new_rv = walk(Z_rv, fgraph.memo) # Make sure our new vars are actually in the graph and where # they should be. assert Y_new_rv == fgraph.outputs[0] assert Z_new_rv == fgraph.outputs[1] assert X_new_rv in fgraph.variables assert S_new_rv in fgraph.variables assert isinstance(Z_new_rv.owner.op, Observed) # Let's only look at the variables involved in the `Z_rv` subgraph. Z_vars = theano.gof.graph.variables(theano.gof.graph.inputs([Z_new_rv]), [Z_new_rv]) # Let's filter for only the `RandomVariables` with names. Z_vars_count = Counter([ n.name for n in Z_vars if n.name and n.owner and isinstance(n.owner.op, RandomVariable) ]) # Each new RV should be present and only occur once. assert Y_new_rv.name in Z_vars_count.keys() assert X_new_rv.name in Z_vars_count.keys() assert Z_new_rv.owner.inputs[1].name in Z_vars_count.keys() assert all(v == 1 for v in Z_vars_count.values())
def test_unification(): x, y, a, b = tt.dvectors('xyab') x_s = tt.scalar('x_s') y_s = tt.scalar('y_s') c_tt = tt.constant(1, 'c') d_tt = tt.constant(2, 'd') x_l = var('x_l') y_l = var('y_l') assert a == reify(x_l, {x_l: a}).reify() test_expr = mt.add(1, mt.mul(2, x_l)) test_reify_res = reify(test_expr, {x_l: a}) assert graph_equal(test_reify_res.reify(), 1 + 2 * a) z = tt.add(b, a) assert {x_l: z} == unify(x_l, z) assert b == unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify() res = unify(mt.inv(mt.add(x_l, a)), mt.inv(mt.add(b, y_l))) assert res[x_l].reify() == b assert res[y_l].reify() == a # TODO: This produces a `DimShuffle` so that the scalar constant `1` # will match the dimensions of the vector `b`. That `DimShuffle` isn't # handled by the logic variable form. # assert unify(mt.add(x_l, 1), mt.add(b_l, 1))[x] == b with variables(x): assert unify(x + 1, b + 1)[x].reify() == b assert unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify() == b with variables(x): assert unify(x, b)[x] == b assert unify([x], [b])[x] == b assert unify((x, ), (b, ))[x] == b assert unify(x + 1, b + 1)[x].reify() == b assert unify(x + a, b + a)[x].reify() == b with variables(x): assert unify(a + b, a + x)[x].reify() == b mt_expr_add = mt.add(x_l, y_l) # The parameters are vectors tt_expr_add_1 = tt.add(x, y) assert graph_equal( tt_expr_add_1, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_1)).reify()) # The parameters are scalars tt_expr_add_2 = tt.add(x_s, y_s) assert graph_equal( tt_expr_add_2, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_2)).reify()) # The parameters are constants tt_expr_add_3 = tt.add(c_tt, d_tt) assert graph_equal( tt_expr_add_3, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_3)).reify())
def test_etuple_term(): """Test `etuplize` and `etuple` interaction with `term`.""" # Take apart an already constructed/evaluated meta # object. e2 = mt.add(mt.vector(), mt.vector()) e2_et = etuplize(e2) assert isinstance(e2_et, ExpressionTuple) # e2_et_expect = etuple( # mt.add, # etuple(mt.TensorVariable, # etuple(mt.TensorType, # 'float64', (False,), None), # None, None, None), # etuple(mt.TensorVariable, # etuple(mt.TensorType, # 'float64', (False,), None), # None, None, None), # ) e2_et_expect = etuple(mt.add, e2.base_arguments[0], e2.base_arguments[1]) assert e2_et == e2_et_expect assert e2_et.eval_obj is e2 # Make sure expression expansion works from Theano objects, too. # First, do it manually. tt_expr = tt.vector() + tt.vector() mt_expr = mt(tt_expr) assert mt_expr.obj is tt_expr assert mt_expr.reify() is tt_expr e3 = etuplize(mt_expr) assert e3 == e2_et assert e3.eval_obj is mt_expr assert e3.eval_obj.reify() is tt_expr # Now, through `etuplize` e2_et_2 = etuplize(tt_expr) assert e2_et_2 == e3 == e2_et assert isinstance(e2_et_2, ExpressionTuple) assert e2_et_2.eval_obj == tt_expr test_expr = mt(tt.vector("z") * 7) assert rator(test_expr) == mt.mul assert rands(test_expr)[0] == mt(tt.vector("z")) dim_shuffle_op = rator(rands(test_expr)[1]) assert isinstance(dim_shuffle_op, mt.DimShuffle) assert rands(rands(test_expr)[1]) == etuple(mt(7)) with pytest.raises(ConsError): rator(dim_shuffle_op) # assert rator(dim_shuffle_op) == mt.DimShuffle # assert rands(dim_shuffle_op) == etuple((), ("x",), True) const_tensor = rands(rands(test_expr)[1])[0] with pytest.raises(ConsError): rator(const_tensor) with pytest.raises(ConsError): rands(const_tensor) et_expr = etuplize(test_expr) exp_res = etuple(mt.mul, mt(tt.vector("z")), etuple(mt.DimShuffle((), ("x", ), True), mt(7)) # etuple(etuple(mt.DimShuffle, (), ("x",), True), mt(7)) ) assert et_expr == exp_res assert exp_res.eval_obj == test_expr