def test_unify_Op(): # These `Op`s expand into `ExpressionTuple`s op1 = CustomOp(1) op2 = CustomOp(1) # `Op`, `Op` s = unify(op1, op2) assert s == {} # `ExpressionTuple`, `Op` s = unify(etuplize(op1), op2) assert s == {} # These `Op`s don't expand into `ExpressionTuple`s op1_np = CustomOpNoProps(1) op2_np = CustomOpNoProps(1) s = unify(op1_np, op2_np) assert s == {} # Same, but this one also doesn't implement `__eq__` op1_np_neq = CustomOpNoPropsNoEq(1) s = unify(op1_np_neq, etuplize(op1)) assert s is False
def _unify_Variable_Variable(u, v, s): # Avoid converting to `etuple`s, when possible if u == v: yield s return if not u.owner and not v.owner: yield False return yield _unify( etuplize(u, shallow=True) if u.owner else u, etuplize(v, shallow=True) if v.owner else v, s, )
def _unify_Variable_ExpressionTuple(u, v, s): # `Constant`s are "atomic" if not u.owner: yield False return yield _unify(etuplize(u, shallow=True), v, s)
def _unify_etuplize_first_arg(u, v, s): try: u_et = etuplize(u, shallow=True) yield _unify(u_et, v, s) except TypeError: yield False return
def test_etuples(): x_at = at.vector("x") y_at = at.vector("y") z_at = etuple(x_at, y_at) res = apply(at.add, z_at) assert res.owner.op == at.add assert res.owner.inputs == [x_at, y_at] w_at = etuple(at.add, x_at, y_at) res = w_at.evaled_obj assert res.owner.op == at.add assert res.owner.inputs == [x_at, y_at] # This `Op` doesn't expand into an `etuple` (i.e. it's "atomic") op1_np = CustomOpNoProps(1) res = apply(op1_np, z_at) assert res.owner.op == op1_np q_at = op1_np(x_at, y_at) res = etuplize(q_at) assert res[0] == op1_np with pytest.raises(TypeError): etuplize(op1_np) class MyMultiOutOp(Op): def make_node(self, *inputs): outputs = [MyType()(), MyType()()] return Apply(self, list(inputs), outputs) def perform(self, node, inputs, outputs): outputs[0] = np.array(inputs[0]) outputs[1] = np.array(inputs[0]) x_at = at.vector("x") op1_np = MyMultiOutOp() res = apply(op1_np, etuple(x_at)) assert len(res) == 2 assert res[0].owner.op == op1_np assert res[1].owner.op == op1_np
def test_sexp_unify_reify(): """Make sure we can unify and reify etuples/S-exps.""" # Unify `A . (x + y)`, for `x`, `y` logic variables A = tf.compat.v1.placeholder(tf.float64, name="A", shape=tf.TensorShape([None, None])) x = tf.compat.v1.placeholder(tf.float64, name="x", shape=tf.TensorShape([None, 1])) y = tf.compat.v1.placeholder(tf.float64, name="y", shape=tf.TensorShape([None, 1])) z = tf.matmul(A, tf.add(x, y)) z_sexp = etuplize(z, shallow=False) # Let's just be sure that the original TF objects are preserved assert z_sexp[1].reify() == A assert z_sexp[2][1].reify() == x assert z_sexp[2][2].reify() == y A_lv, x_lv, y_lv = var(), var(), var() dis_pat = etuple( TFlowMetaOperator(mt.matmul.op_def, var()), A_lv, etuple(TFlowMetaOperator(mt.add.op_def, var()), x_lv, y_lv), ) s = unify(dis_pat, z_sexp, {}) assert s[A_lv] == mt(A) assert s[x_lv] == mt(x) assert s[y_lv] == mt(y) # Now, we construct a graph that reflects the distributive property and # reify with the substitutions from the un-distributed form out_pat = etuple(mt.add, etuple(mt.matmul, A_lv, x_lv), etuple(mt.matmul, A_lv, y_lv)) z_dist = reify(out_pat, s) # Evaluate the tuple-expression and get a meta object/graph z_dist_mt = z_dist.eval_obj # If all the logic variables were reified, we should be able to # further reify the meta graph and get a concrete TF graph z_dist_tf = z_dist_mt.reify() assert isinstance(z_dist_tf, tf.Tensor) # Check the first part of `A . x + A . y` (i.e. `A . x`) assert z_dist_tf.op.inputs[0].op.inputs[0] == A assert z_dist_tf.op.inputs[0].op.inputs[1] == x # Now, the second, `A . y` assert z_dist_tf.op.inputs[1].op.inputs[0] == A assert z_dist_tf.op.inputs[1].op.inputs[1] == y
def distributes(in_lv, out_lv): return lall( # lhs == A * (x + b) eq(etuple(mt.dot, var("A"), etuple(mt.add, var("x"), var("b"))), etuplize(in_lv)), # rhs == A * x + A * b eq( etuple(mt.add, etuple(mt.dot, var("A"), var("x")), etuple(mt.dot, var("A"), var("b"))), out_lv, ), )
lambda u, v, s: unify_MetaSymbol(u, metatize(v), s), ) _unify.add( (tf_class_abstractions, TFlowMetaSymbol, Mapping), lambda u, v, s: unify_MetaSymbol(metatize(u), v, s), ) _unify.add( (tf_class_abstractions, tf_class_abstractions, Mapping), lambda u, v, s: unify_MetaSymbol(metatize(u), metatize(v), s), ) def _reify_TFlowClasses(o, s): meta_obj = metatize(o) return reify(meta_obj, s) _reify.add((tf_class_abstractions, Mapping), _reify_TFlowClasses) _car.add((tf.Tensor,), lambda x: operator(metatize(x))) operator.add((tf.Tensor,), lambda x: operator(metatize(x))) _cdr.add((tf.Tensor,), lambda x: arguments(metatize(x))) arguments.add((tf.Tensor,), lambda x: arguments(metatize(x))) etuplize.add(tf_class_abstractions, lambda x, shallow=False: etuplize(metatize(x), shallow)) __all__ = []
def test_normal_normal_regression(): tt.config.compute_test_value = "ignore" theano.config.cxx = "" np.random.seed(9283) N = 10 M = 3 a_tt = tt.vector("a") R_tt = tt.vector("R") X_tt = tt.matrix("X") V_tt = tt.vector("V") a_tt.tag.test_value = np.random.normal(size=M) R_tt.tag.test_value = np.abs(np.random.normal(size=M)) X = np.random.normal(10, 1, size=N) X = np.c_[np.ones(10), X, X * X] X_tt.tag.test_value = X V_tt.tag.test_value = np.ones(N) beta_rv = NormalRV(a_tt, R_tt, name="\\beta") E_y_rv = X_tt.dot(beta_rv) E_y_rv.name = "E_y" Y_rv = NormalRV(E_y_rv, V_tt, name="Y") y_tt = tt.as_tensor_variable(Y_rv.tag.test_value) y_tt.name = "y" y_obs_rv = observed(y_tt, Y_rv) y_obs_rv.name = "y_obs" # # Use the relation with identify/match `Y`, `X` and `beta`. # y_args_tail_lv, b_args_tail_lv = var(), var() beta_lv = var() y_args_lv, y_lv, Y_lv, X_lv = var(), var(), var(), var() (res, ) = run( 1, (beta_lv, y_args_tail_lv, b_args_tail_lv), applyo(mt.observed, y_args_lv, y_obs_rv), eq(y_args_lv, (y_lv, Y_lv)), normal_normal_regression(Y_lv, X_lv, beta_lv, y_args_tail_lv, b_args_tail_lv), ) # TODO FIXME: This would work if non-op parameters (e.g. names) were covered by # `operator`/`car`. See `TheanoMetaOperator`. assert res[0].eval_obj.obj == beta_rv assert res[0] == etuplize(beta_rv) assert res[1] == etuplize(Y_rv)[2:] assert res[2] == etuplize(beta_rv)[1:] # # Use the relation with to produce `Y` from given `X` and `beta`. # X_new_mt = mt(tt.eye(N, M)) beta_new_mt = mt(NormalRV(0, 1, size=M)) Y_args_cdr_mt = etuplize(Y_rv)[2:] Y_lv = var() (res, ) = run( 1, Y_lv, normal_normal_regression(Y_lv, X_new_mt, beta_new_mt, Y_args_cdr_mt)) Y_out_mt = res.eval_obj Y_new_mt = etuple(mt.NormalRV, mt.dot(X_new_mt, beta_new_mt)) + Y_args_cdr_mt Y_new_mt = Y_new_mt.eval_obj assert Y_out_mt == Y_new_mt
) _unify.add( (tt_class_abstractions, TheanoMetaSymbol, Mapping), lambda u, v, s: unify_MetaSymbol(metatize(u), v, s), ) _unify.add( (tt_class_abstractions, tt_class_abstractions, Mapping), lambda u, v, s: unify_MetaSymbol(metatize(u), metatize(v), s), ) def _reify_TheanoClasses(o, s): meta_obj = metatize(o) return reify(meta_obj, s) _reify.add((tt_class_abstractions, Mapping), _reify_TheanoClasses) operator.add((tt.Variable, ), lambda x: operator(metatize(x))) _car.add((tt.Variable, ), lambda x: operator(metatize(x))) arguments.add((tt.Variable, ), lambda x: arguments(metatize(x))) _cdr.add((tt.Variable, ), lambda x: arguments(metatize(x))) term.add((tt.Op, ExpressionTuple), lambda op, args: term(metatize(op), args)) etuplize.add(tt_class_abstractions, lambda x, shallow=False: etuplize(metatize(x), shallow)) __all__ = []
def normal_qr_transform(in_expr, out_expr): """Produce a relation for normal-normal regression and its QR-reduced form. TODO XXX: This isn't entirely correct (e.g. it needs to also transform the variance terms), but it demonstrates all the requisite functionality for this kind of model reformulation. """ y_lv, Y_lv, X_lv, beta_lv = var(), var(), var(), var() Y_args_lv, beta_args_lv = var(), var() QR_lv, Q_lv, R_lv = var(), var(), var() beta_til_lv, beta_new_lv = var(), var() beta_mean_lv, beta_sd_lv = var(), var() beta_size_lv, beta_rng_lv = var(), var() Y_new_lv = var() X_op_lv = var() in_expr = etuplize(in_expr) res = lall( # Only applies to regression models on observed RVs eq(in_expr, etuple(mt.observed, y_lv, Y_lv)), # Relate the model components normal_normal_regression(Y_lv, X_lv, beta_lv, Y_args_lv, beta_args_lv), # Let's not do all this to an already QR-reduce graph; # otherwise, we'll loop forever! applyo(X_op_lv, var(), X_lv), # XXX: This type of dis-equality goal isn't the best, # but it will definitely work for now. neq(mt.nlinalg.qr_full, X_op_lv), # Relate terms for the QR decomposition eq(QR_lv, etuple(mt.nlinalg.qr_full, X_lv)), eq(Q_lv, etuple(itemgetter(0), QR_lv)), eq(R_lv, etuple(itemgetter(1), QR_lv)), # The new `beta_tilde` eq(beta_args_lv, (beta_mean_lv, beta_sd_lv, beta_size_lv, beta_rng_lv)), eq( beta_til_lv, etuple( mt.NormalRV, # Use these `tt.[ones|zeros]_like` functions to preserve the # correct shape (and a valid `tt.dot`). etuple(mt.zeros_like, beta_mean_lv), etuple(mt.ones_like, beta_sd_lv), beta_size_lv, beta_rng_lv, ), ), # Relate the new and old coeffs eq(beta_new_lv, etuple(mt.dot, etuple(mt.nlinalg.matrix_inverse, R_lv), beta_til_lv)), # Use the relation the other way to produce the new/transformed # observation distribution normal_normal_regression(Y_new_lv, Q_lv, beta_til_lv, Y_args_lv), eq( out_expr, [ ( in_expr, etuple(mt.observed, y_lv, etuple(update_name_suffix, Y_new_lv, Y_lv, "")), ), (beta_lv, beta_new_lv), ], ), ) return res
def test_etuple_term(): assert etuplize("blah", return_bad_args=True) == "blah" a = tf.compat.v1.placeholder(tf.float64, name="a") b = tf.compat.v1.placeholder(tf.float64, name="b") a_mt = mt(a) a_mt._obj = None a_reified = a_mt.reify() assert isinstance(a_reified, tf.Tensor) assert a_reified.shape.dims is None with pytest.raises(TypeError): etuplize(a_mt.op.op_def) with pytest.raises(TypeError): etuplize(a_mt.op.node_def, shallow=False) with pytest.raises(TypeError): etuplize(a_mt, shallow=False) # Now, consider a meta graph with operator arguments add_mt = mt.AddV2(a, b) add_et = etuplize(add_mt, shallow=True) assert isinstance(add_et, ExpressionTuple) assert add_et[0].op_def == mt.AddV2.op_def # Check `kanren`'s term framework assert isinstance(operator(add_mt), TFlowMetaOperator) assert arguments(add_mt) == add_mt.op.inputs assert operator(add_mt)(*arguments(add_mt)) == add_mt assert isinstance(add_et[0], TFlowMetaOperator) assert add_et[1:] == add_mt.op.inputs assert operator(add_mt)(*arguments(add_mt)) == add_mt assert term(operator(add_mt), arguments(add_mt)) == add_mt add_mt = mt.AddV2(a, add_mt) add_et = etuplize(add_mt, shallow=False) assert isinstance(add_et, ExpressionTuple) assert len(add_et) == 3 assert add_et[0].op_def == mt.AddV2.op_def assert len(add_et[2]) == 3 assert add_et[2][0].op_def == mt.AddV2.op_def assert add_et.eval_obj is add_mt add_et._eval_obj = ExpressionTuple.null with tf.Graph().as_default(): assert add_et.eval_obj == add_mt # Make sure things work with logic variables add_lvar_mt = TFlowMetaTensor(var(), var(), [1, 2]) with pytest.raises(ConsError): assert operator(add_lvar_mt) is None with pytest.raises(ConsError): assert arguments(add_lvar_mt) is None
def test_etuple_term(): """Test `etuplize` and `etuple` interaction with `term`.""" # Take apart an already constructed/evaluated meta # object. e2 = mt.add(mt.vector(), mt.vector()) e2_et = etuplize(e2) assert isinstance(e2_et, ExpressionTuple) # e2_et_expect = etuple( # mt.add, # etuple(mt.TensorVariable, # etuple(mt.TensorType, # 'float64', (False,), None), # None, None, None), # etuple(mt.TensorVariable, # etuple(mt.TensorType, # 'float64', (False,), None), # None, None, None), # ) e2_et_expect = etuple(mt.add, e2.base_arguments[0], e2.base_arguments[1]) assert e2_et == e2_et_expect assert e2_et.eval_obj is e2 # Make sure expression expansion works from Theano objects, too. # First, do it manually. tt_expr = tt.vector() + tt.vector() mt_expr = mt(tt_expr) assert mt_expr.obj is tt_expr assert mt_expr.reify() is tt_expr e3 = etuplize(mt_expr) assert e3 == e2_et assert e3.eval_obj is mt_expr assert e3.eval_obj.reify() is tt_expr # Now, through `etuplize` e2_et_2 = etuplize(tt_expr) assert e2_et_2 == e3 == e2_et assert isinstance(e2_et_2, ExpressionTuple) assert e2_et_2.eval_obj == tt_expr test_expr = mt(tt.vector("z") * 7) assert rator(test_expr) == mt.mul assert rands(test_expr)[0] == mt(tt.vector("z")) dim_shuffle_op = rator(rands(test_expr)[1]) assert isinstance(dim_shuffle_op, mt.DimShuffle) assert rands(rands(test_expr)[1]) == etuple(mt(7)) with pytest.raises(ConsError): rator(dim_shuffle_op) # assert rator(dim_shuffle_op) == mt.DimShuffle # assert rands(dim_shuffle_op) == etuple((), ("x",), True) const_tensor = rands(rands(test_expr)[1])[0] with pytest.raises(ConsError): rator(const_tensor) with pytest.raises(ConsError): rands(const_tensor) et_expr = etuplize(test_expr) exp_res = etuple(mt.mul, mt(tt.vector("z")), etuple(mt.DimShuffle((), ("x", ), True), mt(7)) # etuple(etuple(mt.DimShuffle, (), ("x",), True), mt(7)) ) assert et_expr == exp_res assert exp_res.eval_obj == test_expr