def test_goal_ordering(): # Regression test for https://github.com/logpy/logpy/issues/58 def lefto(q, p, lst): return membero((q, p), zip(lst, lst[1:])) vals = var() # Verify the solution can be computed when we specify the execution # ordering. rules_greedy = ( lallgreedy, (eq, (var(), var()), vals), (lefto, 'green', 'white', vals), ) solution, = run(1, vals, rules_greedy) assert solution == ('green', 'white') # Verify that attempting to compute the "safe" order does not itself cause # the evaluation to fail. rules_greedy = ( lall, (eq, (var(), var()), vals), (lefto, 'green', 'white', vals), ) solution, = run(1, vals, rules_greedy) assert solution == ('green', 'white')
def test_reify_object(): obj = reify_object(Foo(1, var(3)), {var(3): 4}) assert obj.a == 1 assert obj.b == 4 f = Foo(1, 2) assert reify_object(f, {}) is f
def test_unify_slice(): x = var('x') y = var('y') assert unify(slice(1), slice(1), {}) == {} assert unify(slice(1, 2, 3), x, {}) == {x: slice(1, 2, 3)} assert unify(slice(1, 2, None), slice(x, y), {}) == {x: 1, y: 2}
def test_goaleval(): x, y = var('x'), var('y') g = eq(x, 2) assert goaleval(g) == g assert callable(goaleval((eq, x, 2))) with raises(EarlyGoalError): goaleval((membero, x, y)) assert callable(goaleval((lallgreedy, (eq, x, 2))))
def test_reify_object_attrs(): x, y = var('x'), var('y') f, g = Foo(1, 2), Foo(x, y) s = {x: 1, y: 2} assert reify_object_attrs(g, s, ['a', 'b']) == f assert reify_object_attrs(g, s, ['a']) == Foo(1, y) assert reify_object_attrs(g, s, ['b']) == Foo(x, 2) assert reify_object_attrs(g, s, []) is g
def test_reify(): x, y, z = var(), var(), var() s = {x: 1, y: 2, z: (x, y)} assert reify(x, s) == 1 assert reify(10, s) == 10 assert reify((1, y), s) == (1, 2) assert reify((1, (x, (y, 2))), s) == (1, (1, (2, 2))) assert reify(z, s) == (1, 2)
def test_lall(lall_impl): x, y = var('x'), var('y') assert results(lall_impl((eq, x, 2))) == ({x: 2}, ) assert results(lall_impl((eq, x, 2), (eq, x, 3))) == () assert results(lall_impl()) == ({}, ) assert run(0, x, lall_impl((eq, y, (1, 2)), (membero, x, y))) assert run(0, x, lall_impl()) == (x, ) with pytest.raises(EarlyGoalError): run(0, x, lall_impl(membero(x, y)))
def test_unify_isinstance_list(): class Foo2(Foo): pass x = var('x') y = var('y') f, g = Foo2(1, 2), Foo2(x, y) _unify.add((Foo, Foo, dict), unify_object) _reify.add((Foo, dict), reify_object) assert unify(f, g, {}) assert reify(g, {x: 1, y: 2}) == f
def test_unify_tuple(): # Tests that adding facts can be unified with unpacked versions of those # facts. valido = Relation() fact(valido, (0, 1)) fact(valido, (1, 0)) fact(valido, (1, 1)) x = var() y = var() assert set(run(0, x, valido((x, y)))) == set([0, 1]) assert set(run(0, (x, y), valido((x, y)))) == set([(0, 1), (1, 0), (1, 1)]) assert run(0, x, valido((x, x))) == (1, )
def test_expr(): add = 'add' mul = 'mul' fact(commutative, add) fact(associative, add) fact(commutative, mul) fact(associative, mul) x, y = var('x'), var('y') pattern = (mul, (add, 1, x), y) # (1 + x) * y expr = (mul, 2, (add, 3, 1)) # 2 * (3 + 1) assert run(0, (x, y), eq_assoccomm(pattern, expr)) == ((3, 2), )
def itero(l): """A relation asserting that a term is an iterable type. This is a generic version of the standard `listo` that accounts for different iterable types supported by `cons` in Python. See `nullo` """ c, d = var(), var() return (conde, [(nullo, l), success], [(conso, c, d, l), (itero, d)])
def decode_prod(decode_fst, decode_snd): x = unification.var('x') y = unification.var('y') pair_pattern = lib.pair(x, y) def decode(code): match = unification.unify(pair_pattern, code) if not match: raise TypeError(code) x_value = decode_fst(match[x]) y_value = decode_snd(match[y]) return (x_value, y_value) return decode
def test_nullo_itero(): assert isvar(run(0, y, nullo([]))[0]) assert isvar(run(0, y, nullo(None))[0]) assert run(0, y, nullo(y))[0] is None assert run(0, y, (conso, var(), y, [1]), nullo(y))[0] == [] assert run(0, y, (conso, var(), y, (1,)), nullo(y))[0] == () assert run(1, y, conso(1, x, y), itero(y))[0] == [1] assert run(1, y, conso(1, x, y), conso(2, z, x), itero(y))[0] == [1, 2] # Make sure that the remaining results end in logic variables res_2 = run(2, y, conso(1, x, y), conso(2, z, x), itero(y))[1] assert res_2[:2] == [1, 2] assert isvar(res_2[-1])
def test_unify_variable_with_itself_should_not_unify(): # Regression test for https://github.com/logpy/logpy/issues/33 valido = Relation() fact(valido, "a", "b") fact(valido, "b", "a") x = var() assert run(0, x, valido(x, x)) == ()
def decode_list(decode_item): head = unification.var('head') tail = unification.var('tail') cons_pattern = lib.cons(head, tail) def decode(code): result = [] while code is not lib.nil: match = unification.unify(cons_pattern, code) if not match: raise TypeError(code) result.append(decode_item(match[head])) code = match[tail] return result return decode
def test_unify_variable_with_itself_should_unify(): valido = Relation() fact(valido, 0, 1) fact(valido, 1, 0) fact(valido, 1, 1) x = var() assert run(0, x, valido(x, x)) == (1, )
def heado(head, coll): """ head is the head of coll See also: tailo conso """ return (eq, cons(head, var()), coll)
def test_membero(): x = var('x') assert set(run(5, x, membero(x, (1, 2, 3)), membero(x, (2, 3, 4)))) \ == {2, 3} assert run(5, x, membero(2, (1, x, 3))) == (2, ) assert run(0, x, (membero, 1, (1, 2, 3))) == (x, ) assert run(0, x, (membero, 1, (2, 3))) == ()
def test_eq_assoccomm(): x, y = var(), var() eqac = eq_assoccomm ac = 'commassoc_op' fact(commutative, ac) fact(associative, ac) assert results(eqac(1, 1)) assert results(eqac((1, ), (1, ))) assert results(eqac(x, (1, ))) assert results(eqac((1, ), x)) assert results(eqac((ac, (ac, 1, x), y), (ac, 2, (ac, 3, 1)))) assert results((eqac, 1, 1)) assert results(eqac((a, (a, 1, 2), 3), (a, 1, 2, 3))) assert results(eqac((ac, (ac, 1, 2), 3), (ac, 1, 2, 3))) assert results(eqac((ac, 3, (ac, 1, 2)), (ac, 1, 2, 3))) assert not results(eqac((ac, 1, 1), ('other_op', 1, 1))) assert run(0, x, eqac((ac, 3, (ac, 1, 2)), (ac, 1, x, 3))) == (2, )
def test_buildo(): x = var('x') assert results( buildo('add', (1, 2, 3), x), {}) == ({x: ('add', 1, 2, 3)}, ) assert results( buildo(x, (1, 2, 3), ('add', 1, 2, 3)), {}) == ({x: 'add'}, ) assert results( buildo('add', x, ('add', 1, 2, 3)), {}) == ({x: (1, 2, 3)}, )
def test_condeseq(): x = var('x') assert set(run(0, x, condeseq(([eq(x, 2)], [eq(x, 3)])))) == {2, 3} assert set(run(0, x, condeseq([[eq(x, 2), eq(x, 3)]]))) == set() goals = ([eq(x, i)] for i in count()) # infinite number of goals assert run(1, x, condeseq(goals)) == (0, ) assert run(1, x, condeseq(goals)) == (1, )
def test_buildo_object(): x = var('x') assert results(buildo(Add, (1, 2, 3), x), {}) == \ ({x: add(1, 2, 3)}, ) assert results(buildo(x, (1, 2, 3), add(1, 2, 3)), {}) == \ ({x: Add}, ) assert results(buildo(Add, x, add(1, 2, 3)), {}) == \ ({x: (1, 2, 3)}, )
def tailo(tail, coll): """ tail is the tail of coll See also: heado conso """ return (eq, cons(var(), tail), coll)
def test_unifiable_with_term(): add = Op('add') t = MyTerm(add, (1, 2)) assert arguments(t) == (1, 2) assert operator(t) == add assert term(operator(t), arguments(t)) == t x = var('x') assert unify(MyTerm(add, (1, x)), MyTerm(add, (1, 2)), {}) == {x: 2}
def test_lanyseq(): x = var('x') g = lanyseq(((eq, x, i) for i in range(3))) assert list(goaleval(g)({})) == [{x: 0}, {x: 1}, {x: 2}] assert list(goaleval(g)({})) == [{x: 0}, {x: 1}, {x: 2}] # Test lanyseq with an infinite number of goals. assert set(run(3, x, lanyseq(((eq, x, i) for i in count())))) == {0, 1, 2} assert set(run(3, x, (lanyseq, ((eq, x, i) for i in count())))) == \ {0, 1, 2}
def conso(h, t, l): """ Logical cons -- l[0], l[1:] == h, t """ if isinstance(l, (tuple, list)): if len(l) == 0: return fail else: return (conde, [(eq, h, l[0]), (eq, t, l[1:])]) elif isinstance(t, (tuple, list)): return eq((h,) + tuple(t), l) else: return ( lall, # The definition of conso. This means that l can be unified with # an LCons object (head + tail). (eq, LCons(h, t), l), # A "type declaration" for the tail. This means that the first goal # found will simplify to a list with no extra unbound variables. (lany, (eq, t, ()), (eq, t, LCons(var(), var()))), )
def test_eq_comm_object(): x = var('x') fact(commutative, Add) fact(associative, Add) assert run(0, x, eq_comm(add(1, 2, 3), add(3, 1, x))) == (2, ) assert set(run(0, x, eq_comm(add(1, 2), x))) == set((add(1, 2), add(2, 1))) assert set(run(0, x, eq_assoccomm(add(1, 2, 3), add(1, x)))) == \ set((add(2, 3), add(3, 2)))
def test_reify_slots(): class SlotsObject(object): __slots__ = ['myattr'] def __init__(self, myattr): self.myattr = myattr x = var() s = {x: 1} e = SlotsObject(x) assert reify_object(e, s), SlotsObject(1) assert reify_object(SlotsObject(1), s), SlotsObject(1)
def heado(head, coll): """ head is the head of coll See also: tailo conso """ if isinstance(coll, (tuple, list)): return (fail if len(coll) == 0 else (eq, head, coll[0])) else: tail = var() return (eq, LCons(head, tail), coll)
def tailo(tail, coll): """ tail is the tail of coll See also: heado conso """ if isinstance(coll, (tuple, list)): return (fail if len(coll) == 0 else (eq, tail, coll[1:])) else: head = var() return (eq, LCons(head, tail), coll)
def reduceo_goal(s): nonlocal in_term, out_term in_term_rf, out_term_rf = reify((in_term, out_term), s) # The result of reducing the input graph once term_rdcd = var() # Are we working "backward" and (potentially) "expanding" a graph # (e.g. when the relation is a reduction rule)? is_expanding = isvar(in_term_rf) # One application of the relation assigned to `term_rdcd` single_apply_g = (relation, in_term, term_rdcd) # Assign/equate (unify, really) the result of a single application to # the "output" term. single_res_g = eq(term_rdcd, out_term) # Recurse into applications of the relation (well, produce a goal that # will do that) another_apply_g = reduceo(relation, term_rdcd, out_term) # We want the fixed-point value to show up in the stream output # *first*, but that requires some checks. if is_expanding: # When an un-reduced term is a logic variable (e.g. we're # "expanding"), we can't go depth first. # We need to draw the association between (i.e. unify) the reduced # and expanded terms ASAP, in order to produce finite # expanded graphs first and yield results. # # In other words, there's no fixed-point to produce in this # situation. Instead, for example, we have to produce an infinite # stream of terms that have `out_term` as a fixed point. # g = conde([single_res_g, single_apply_g], # [another_apply_g, single_apply_g]) g = lall(conde([single_res_g], [another_apply_g]), single_apply_g) else: # Run the recursion step first, so that we get the fixed-point as # the first result g = lall(single_apply_g, conde([another_apply_g], [single_res_g])) g = goaleval(g) yield from g(s)
def single_math_reduceo(expanded_term, reduced_term): """Construct a goal for some simple math reductions.""" x_lv = var() return lall( isinstanceo(x_lv, Real), isinstanceo(x_lv, ExpressionTuple), conde( [ eq(expanded_term, etuple(add, x_lv, x_lv)), eq(reduced_term, etuple(mul, 2, x_lv)), ], [ eq(expanded_term, etuple(log, etuple(exp, x_lv))), eq(reduced_term, x_lv) ], ), )
def test_nodedef(): X = np.random.normal(0, 1, (10, 10)) S = tf.matmul(X, X, transpose_a=True) d, U, V = tf.linalg.svd(S) node_def_mt = mt(d.op.node_def) assert 'compute_uv' in node_def_mt.attr assert 'full_matrices' in node_def_mt.attr # Some outputs use nodedef information; let's test those. norm_rv = mt.RandomStandardNormal(mean=0, stddev=1, shape=(1000, ), dtype=tf.float32, name=var()) assert isinstance(norm_rv, TFlowMetaTensor) assert norm_rv.dtype == tf.float32 # We shouldn't be metatizing all parsed `node_def.attr` values; otherwise, # we won't be able to reconstruct corresponding meta Ops using their meta # OpDefs and inputs. x_test = tf.constant([1.8, 2.2], dtype=tf.float32) with tf.Graph().as_default(): y_test = tf.dtypes.cast(x_test, tf.int32, name="y") y_test_mt = mt(y_test) # `ytest_mt.inputs` should have two `.attr` values that are Python # primitives (i.e. int and bool); these shouldn't get metatized and break # our ability to reconstruct the object from its rator + rands. y_test_new_mt = TFlowMetaOperator( y_test_mt.op.op_def, y_test_mt.op.node_def)(*y_test_mt.base_arguments) # We're changing this so we can use == assert y_test_new_mt.op.node_def.name.startswith('y') y_test_new_mt.op.node_def.name = 'y' assert y_test_mt == y_test_new_mt with tf.Graph().as_default(): z_test_mt = mt.cast(x_test, tf.int32, name="y") assert z_test_mt.op.node_def.name.startswith('y') z_test_mt.op.node_def.name = 'y' assert z_test_mt == y_test_mt
def test_commutativity(): with enable_lvar_defaults("names"): add_1_mt = mt(1) + mt(2) add_2_mt = mt(2) + mt(1) q = var() res = run(0, q, commutative(add_1_mt.base_operator)) assert res is not False res = run(0, q, eq_comm(add_1_mt, add_2_mt)) assert res is not False with enable_lvar_defaults("names"): add_pattern_mt = mt(2) + q res = run(0, q, eq_comm(add_1_mt, add_pattern_mt)) assert res[0] == add_1_mt.base_arguments[0]
def test_map_anyo_types(): """Make sure that `map_anyo` preserves the types between its arguments.""" q_lv = var() res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1], q_lv)) assert res[0] == [1] res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), (1, ), q_lv)) assert res[0] == (1, ) res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), q_lv, (1, ))) assert res[0] == (1, ) res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), q_lv, [1])) assert res[0] == [1] res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], [1, 2])) assert len(res) == 1 res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], [1, 3])) assert len(res) == 0 res = run(1, q_lv, map_anyo(lambda x, y: eq(x, y), [1, 2], (1, 2))) assert len(res) == 0
def math_reduceo(in_expr, out_expr): """Create a relation for a couple math-based identities.""" x_lv = var() x_lv.token = f'x{x_lv.token}' return (lall, conde([ eq(in_expr, etuple(add, x_lv, x_lv)), eq(out_expr, etuple(mul, 2, x_lv)) ], [ eq(in_expr, etuple(log, etuple(exp, x_lv))), eq(out_expr, x_lv) ]), conde([(isinstanceo, [in_expr, (float, int, ExpressionTuple)], True)], [(isinstanceo, [out_expr, (float, int, ExpressionTuple)], True)]))
def test_operator(): s = unify(TFlowMetaOperator(var('a'), var('b')), mt.add) assert s[var('a')] == mt.add.op_def assert s[var('b')] == mt.add.node_def add_mt = reify(TFlowMetaOperator(var('a'), var('b')), s) assert add_mt == mt.add assert unify(mt.mul, mt.matmul) is False assert unify(mt.mul.op_def, mt.matmul.op_def) is False
def test_assoc_flatten(): add = "add" mul = "mul" fact(commutative, add) fact(associative, add) fact(commutative, mul) fact(associative, mul) assert ( run( 0, True, assoc_flatten( (mul, 1, (add, 2, 3), (mul, 4, 5)), (mul, 1, (add, 2, 3), 4, 5) ), ) == (True,) ) x = var() assert ( run( 0, x, assoc_flatten((mul, 1, (add, 2, 3), (mul, 4, 5)), x), ) == ((mul, 1, (add, 2, 3), 4, 5),) ) assert ( run( 0, True, assoc_flatten( ("op", 1, (add, 2, 3), (mul, 4, 5)), ("op", 1, (add, 2, 3), (mul, 4, 5)) ), ) == (True,) ) assert run(0, x, assoc_flatten(("op", 1, (add, 2, 3), (mul, 4, 5)), x)) == ( ("op", 1, (add, 2, 3), (mul, 4, 5)), )
def test_graph_applyo(test_input, test_output): """Test `graph_applyo` with fully ground terms (i.e. no logic variables).""" q_lv = var() test_res = run(len(test_output), q_lv, fixedp_graph_applyo(full_math_reduceo, test_input, q_lv)) assert len(test_res) == len(test_output) test_res = sorted(test_res) test_output = sorted(test_output) # Make sure the first result matches. if len(test_output) > 0: assert test_res[0] == test_output[0] # Make sure all the results match. assert set(test_res) == set(test_output)
def appendo(l, s, ls, base_type=tuple): """ Goal that ls = l + s. See Byrd thesis pg. 247 https://scholarworks.iu.edu/dspace/bitstream/handle/2022/8777/Byrd_indiana_0093A_10344.pdf Parameters ========== base_type: type The empty collection type to use when all terms are logic variables. """ if all(map(isvar, (l, s, ls))): raise EarlyGoalError() a, d, res = [var() for i in range(3)] return (lany, (lallgreedy, (eq, l, base_type()), (eq, s, ls)), (lall, (conso, a, d, l), (conso, a, res, ls), (appendo, d, s, res)))
def distributes(in_lv, out_lv): return lall( # lhs == A * (x + b) eq( etuple(_dot, var("A"), etuple(at.add, var("x"), var("b"))), in_lv, ), # rhs == A * x + A * b eq( etuple( at.add, etuple(_dot, var("A"), var("x")), etuple(_dot, var("A"), var("b")), ), out_lv, ), )
def test_assoc(): d = {"a": 1, 2: 2} assert assoc(d, "c", 3) is not d assert assoc(d, "c", 3) == {"a": 1, 2: 2, "c": 3} assert assoc(d, 2, 3) == {"a": 1, 2: 3} assert assoc(d, "a", 0) == {"a": 0, 2: 2} assert d == {"a": 1, 2: 2} def assoc_OrderedDict(s, u, v): s[u] = v return s assoc.add((OrderedDict, object, object), assoc_OrderedDict) x = var() d2 = OrderedDict(d) assert assoc(d2, x, 3) is d2 assert assoc(d2, x, 3) == {"a": 1, 2: 2, x: 3} assert assoc(d, x, 3) is not d
def test_metatize(): vec_tt = tt.vector('vec') vec_m = metatize(vec_tt) assert vec_m.base == type(vec_tt) test_list = [1, 2, 3] metatize_test_list = metatize(test_list) assert isinstance(metatize_test_list, list) assert all(isinstance(m, MetaSymbol) for m in metatize_test_list) test_iter = iter([1, 2, 3]) metatize_test_iter = metatize(test_iter) assert isinstance(metatize_test_iter, Iterator) assert all(isinstance(m, MetaSymbol) for m in metatize_test_iter) test_out = metatize(var()) assert isvar(test_out) with variables(vec_tt): test_out = metatize(vec_tt) assert test_out == vec_tt assert isvar(test_out) test_out = metatize(np.r_[1, 2, 3]) assert isinstance(test_out, MetaSymbol) class TestClass(object): pass with pytest.raises(Exception): metatize(TestClass()) class TestOp(tt.gof.Op): pass test_out = metatize(TestOp) assert issubclass(test_out, MetaOp) test_op_tt = TestOp() test_obj = test_out(obj=test_op_tt) assert isinstance(test_obj, MetaSymbol) assert test_obj.obj == test_op_tt assert test_obj.base == TestOp
def transform(self, node): if not isinstance(node, tt.Apply): return False if self.node_filter(node): return False try: input_expr = node.default_output() except AttributeError: input_expr = node.outputs with variables(*self.relation_lvars): q = var() kanren_results = run(None, q, self.kanren_relation(input_expr, q)) chosen_res = self.results_filter(kanren_results) if chosen_res: if isinstance(chosen_res, ExpressionTuple): chosen_res = eval_and_reify_meta(chosen_res) if isinstance(chosen_res, dict): chosen_res = list(chosen_res.items()) if isinstance(chosen_res, list): # We got a dictionary of replacements new_node = {eval_and_reify_meta(k): eval_and_reify_meta(v) for k, v in chosen_res} assert all(k in node.fgraph.variables for k in new_node.keys()) elif isinstance(chosen_res, tt.Variable): # Attempt to automatically format the output for multi-output # `Apply` nodes. new_node = self.adjust_outputs(node, eval_and_reify_meta(chosen_res)) else: raise ValueError( "Unsupported FunctionGraph replacement variable type: {chosen_res}" ) return new_node else: return False
def test_objects(): fact(commutative, Add) fact(associative, Add) assert tuple(goaleval(eq_assoccomm(add(1, 2, 3), add(3, 1, 2)))({})) assert tuple(goaleval(eq_assoccomm(add(1, 2, 3), add(3, 1, 2)))({})) x = var('x') assert reify( x, tuple(goaleval(eq_assoccomm(add(1, 2, 3), add(1, 2, x)))({}))[0]) == 3 assert reify(x, next(goaleval(eq_assoccomm(add(1, 2, 3), add(x, 2, 1)))({}))) == 3 v = add(1, 2, 3) with variables(v): x = add(5, 6) assert reify(v, next(goaleval(eq_assoccomm(v, x))({}))) == x
def replace_ast_name_with_lvar(obj: ast.AST, replace_var: str) -> ast.AST: """ Replace a name being loaded in the AST with a logic variable """ if isinstance(obj, ast.Name) and obj.id == replace_var: return var(replace_var) new_obj = deepcopy(obj) for k, v in vars(obj).items(): if isinstance(v, Iterable) and not isinstance(v, str): new_v = type(v)([ replace_ast_name_with_lvar(c, replace_var) for c in v if isinstance(c, ast.AST) ]) elif isinstance(v, ast.AST): new_v = replace_ast_name_with_lvar(v, replace_var) else: new_v = v setattr(new_obj, k, new_v) return new_obj
def test_seq_apply_anyo(test_input, test_output): """Test `seq_apply_anyo` with fully ground terms (i.e. no logic variables).""" q_lv = var() test_res = run(0, q_lv, (seq_apply_anyo, full_math_reduceo, test_input, q_lv)) assert len(test_res) == len(test_output) test_res = sorted(test_res) test_output = sorted(test_output) # Make sure the first result matches. # TODO: This is fairly implementation-specific (i.e. dependent on the order # in which `condeseq` returns results). if len(test_output) > 0: assert test_res[0] == test_output[0] # Make sure all the results match. # TODO: If we want to avoid fixing the output order, convert the lists to # tuples and add everything to a set, then compare. assert test_res == test_output
def test_basic_scan_transform(): def f_pow2(x_tm1): return 2 * x_tm1 state = theano.tensor.scalar("state") n_steps = theano.tensor.iscalar("nsteps") output, updates = theano.scan(f_pow2, [], state, [], n_steps=n_steps, truncate_gradient=-1, go_backwards=False) assert np.array_equal(output.eval({ state: 1.0, n_steps: 4 }), np.r_[2.0, 4.0, 8.0, 16.0]) def mul_trans(in_expr, out_expr): """Equate `2 * x` with `5 * x` in a Theano `scan`. I.e. from left-to-right, replace `2 * x[t-1]` with `5 * x[t-1]`. """ arg_lv = var() inputs_lv, info_lv = var(), var() in_scan_lv = mt.Scan(inputs_lv, [mt.mul(2, arg_lv)], info_lv) out_scan_lv = mt.Scan(inputs_lv, [mt.mul(5, arg_lv)], info_lv) return lall(eq(in_expr, in_scan_lv), eq(out_expr, out_scan_lv)) q_lv = var() (output_mt, ) = run(1, q_lv, walko(partial(reduceo, mul_trans), output, q_lv)) output_new = output_mt.eval_obj.reify() assert output_new != output assert np.array_equal(output_new.eval({ state: 1.0, n_steps: 4 }), np.r_[5.0, 25.0, 125.0, 625.0])
def test_reduceo(): q_lv = var() # Reduce/forward res = run( 0, q_lv, full_math_reduceo(etuple(log, etuple(exp, etuple(log, 1))), q_lv)) assert len(res) == 1 assert res[0] == etuple(log, 1) res = run( 0, q_lv, full_math_reduceo( etuple(log, etuple(exp, etuple(log, etuple(exp, 1)))), q_lv)) assert res[0] == 1 assert res[1] == etuple(log, etuple(exp, 1)) # Expand/backward res = run(2, q_lv, full_math_reduceo(q_lv, 1)) assert res[0] == etuple(log, etuple(exp, 1)) assert res[1] == etuple(log, etuple(exp, etuple(log, etuple(exp, 1))))
def test_relation(): parent = Relation() fact(parent, "Homer", "Bart") fact(parent, "Homer", "Lisa") fact(parent, "Marge", "Bart") fact(parent, "Marge", "Lisa") fact(parent, "Abe", "Homer") fact(parent, "Jackie", "Marge") x = var('x') assert set(run(5, x, parent("Homer", x))) == set(("Bart", "Lisa")) assert set(run(5, x, parent(x, "Bart"))) == set(("Homer", "Marge")) def grandparent(x, z): y = var() return conde((parent(x, y), parent(y, z))) assert set(run(5, x, grandparent(x, "Bart"))) == set(("Abe", "Jackie")) foo = Relation('foo') assert 'foo' in str(foo)
def test_eq_lvar(): a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)]) b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)]) assert eq_lvar(a, b) is True a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)]) b = SomeOtherMetaSymbol(1, [2, var()]) assert eq_lvar(a, b) is False a = SomeOtherMetaSymbol(1, [2, var()]) b = SomeOtherMetaSymbol(1, [2, var()]) assert eq_lvar(a, b) is True a = SomeOtherMetaSymbol(1, [2, {"a": var()}]) b = SomeOtherMetaSymbol(1, [2, {"a": var()}]) assert eq_lvar(a, b) is True a = SomeOtherMetaSymbol(1, [3, var()]) b = SomeOtherMetaSymbol(1, [2, var()]) assert eq_lvar(a, b) is False
def __init__(self, op, name, attr, obj=None): super().__init__(obj=obj) self.op = metatize(op) assert name is not None self.name = name if isvar(name) else str(name) if not isvar(attr): opdef_sig, _ = op_def_lib.get_op_info(self.op) _attr = dict() for k, v in attr.items(): if isinstance(v, Message): try: v = self._protobuf_convert(k, v) except TypeError: v = var() _attr[k] = v self.attr = _attr else: self.attr = attr
def outputs(self): """Compute outputs for this meta `Operation`.""" if getattr(self, "_outputs", None) is not None: return self._outputs if isvar(self.op_def): self._outputs = var() else: if isvar(self.node_def) or not isinstance( getattr(self.node_def, "attr", None), dict): node_attr = {} else: node_attr = self.node_def.attr operator = TFlowMetaOperator(self.op_def, self.node_def) if isvar(self.inputs): inputs = (None, ) * len(operator._apply_func_sig.parameters) apply_defaults = False else: inputs = self.inputs apply_defaults = True apply_arguments = operator.input_args( *inputs, apply_defaults=apply_defaults, **node_attr) # TODO: The above could probably be simplified into a # NodeDef-from-input-args function. out_types_mt = operator.output_meta_types(inputs=apply_arguments) mt_outs = tuple( o_type(self, i, o_dtype) for i, (o_type, o_dtype) in enumerate(out_types_mt)) self._outputs = mt_outs return self._outputs
def test_walko(test_input, test_output): """Test `walko` with fully ground terms (i.e. no logic variables).""" q_lv = var() term_walko_fp = partial(reduceo, partial(term_walko, single_math_reduceo)) test_res = run( len(test_output), q_lv, term_walko_fp(test_input, q_lv), results_filter=toolz.unique, ) assert len(test_res) == len(test_output) test_res = sorted(test_res) test_output = sorted(test_output) # Make sure the first result matches. if len(test_output) > 0: assert test_res[0] == test_output[0] # Make sure all the results match. assert set(test_res) == set(test_output)
def transform(self, fgraph, node): if self.node_filter(node) is False: return False try: input_expr = node.default_output() except ValueError: input_expr = node.outputs q = var() kanren_results = run(None, q, self.kanren_relation(input_expr, q)) chosen_res = self.results_filter(kanren_results) if chosen_res: if isinstance(chosen_res, list): new_outputs = [eval_if_etuple(v) for v in chosen_res] else: new_outputs = [eval_if_etuple(chosen_res)] return new_outputs else: return False
def test_map_anyo_reverse(): """Test `map_anyo` in "reverse" (i.e. specify the reduced form and generate the un-reduced form).""" # Unbounded reverse q_lv = var() rev_input = [etuple(mul, 2, 1)] test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, rev_input)) assert test_res == ( [etuple(add, 1, 1)], [etuple(log, etuple(exp, etuple(add, 1, 1)))], # [etuple(log, etuple(exp, etuple(mul, 2, 1)))], [ etuple(log, etuple(exp, etuple(log, etuple(exp, etuple(add, 1, 1))))) ], # [etuple(log, etuple(exp, etuple(log, etuple(exp, etuple(mul, 2, 1)))))], [ etuple( log, etuple( exp, etuple( log, etuple(exp, etuple(log, etuple(exp, etuple(add, 1, 1))))), ), ) ], ) # Guided reverse test_res = run( 4, q_lv, map_anyo(math_reduceo, [etuple(add, q_lv, 1)], [etuple(mul, 2, 1)]), ) assert test_res == (1, )
def test_typeo_correctly_parses_int(self): x = var() goals = typeo(1, x) ret = run(1, x, goals) self.assertEqual(ret[0], int)
def test_typeo_correctly_parses_list(self): x = var() goals = typeo([1, 2, 3], x) ret = run(1, x, goals) self.assertEqual(ret[0], list)
def test_typeo_correctly_parses_tuple(self): x = var() goals = typeo((1, 2, 3), x) ret = run(1, x, goals) self.assertEqual(ret[0], tuple)
def test_typeo_correctly_parses_str(self): x = var() goals = typeo("bla", x) ret = run(1, x, goals) self.assertEqual(ret[0], str)