def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=None): for lv_key, constraints in list(self.lvar_constraints.items()): lv = reify(lv_key, lvar_map) constraints_rf = reify(tuple(constraints), lvar_map) for cs in constraints_rf: s = unify(lv, cs, {}) if s is not False and not s: # They already unify, but with no unground logic variables, # so we have an immediate violation of the constraint. return False elif s is False: # They don't unify and have no unground logic variables, so # the constraint is immediately satisfied and there's no # reason to continue checking this constraint. constraints.discard(cs) else: # They unify when/if the unifications in `s` are made, so # let's add these as new constraints. for k, v in s.items(): self.add(k, v) if len(constraints) == 0: # This logic variable has no more unground constraints, so # remove it. del self.lvar_constraints[lv_key] return True
def nullo_goal(s): nonlocal args, default_ConsNull if refs is not None: refs_rf = reify(refs, s) else: refs_rf = () args_rf = reify(args, s) arg_null_types = set( # Get an empty instance of the type type(a) for a in args_rf + refs_rf # `ConsPair` and `ConsNull` types that are not literally `ConsPair`s if isinstance(a, (ConsPair, ConsNull)) and not issubclass(type(a), ConsPair)) try: null_type = arg_null_types.pop() except KeyError: null_type = default_ConsNull if len(arg_null_types) > 0 and any(a != null_type for a in arg_null_types): # Mismatching null types: fail. return g = lall(*[eq(a, null_type()) for a in args_rf]) yield from g(s)
def test_basic_unify_reify(): # Test reification with manually constructed replacements a = tf.compat.v1.placeholder(tf.float64, name='a') x_l = var('x_l') a_reif = reify(x_l, {x_l: mt(a)}) assert a_reif.obj is not None # Confirm that identity is preserved (i.e. that the underlying object # was properly tracked and not unnecessarily reconstructed) assert a == a_reif.reify() test_expr = mt.add(tf.constant(1, dtype=tf.float64), mt.mul(tf.constant(2, dtype=tf.float64), x_l)) test_reify_res = reify(test_expr, {x_l: a}) test_base_res = test_reify_res.reify() assert isinstance(test_base_res, tf.Tensor) with tf.Graph().as_default(): a = tf.compat.v1.placeholder(tf.float64, name='a') expected_res = tf.add(tf.constant(1, dtype=tf.float64), tf.multiply(tf.constant(2, dtype=tf.float64), a)) assert_ops_equal(test_base_res, expected_res) # Simply make sure that unification succeeds meta_expected_res = mt(expected_res) s_test = unify(test_expr, meta_expected_res, {}) assert len(s_test) == 3 assert reify(test_expr, s_test) == meta_expected_res
def post_unify_check(self, lvar_map, lvar=None, value=None, old_state=None): for lv_key, constraints in list(self.lvar_constraints.items()): lv = reify(lv_key, lvar_map) is_lv_ground = self.constraint_isground(lv, lvar_map) or isground( lv, lvar_map) if not is_lv_ground: # This constraint isn't ready to be checked continue # if is_lv_ground and not self.cterm_type_check(lv): # self.lvar_constraints[lv_key] # return False constraint_grps = groupby(lambda x: isground(x, lvar_map), reify(iter(constraints), lvar_map)) constraints_unground = constraint_grps.get(False, ()) constraints_ground = constraint_grps.get(True, ()) if len(constraints_ground) > 0 and not all( self.cparam_type_check(c) for c in constraints_ground): # Some constraint parameters aren't the correct type, so fail. # del self.lvar_constraints[lv_key] return False assert constraints_unground or constraints_ground if is_lv_ground and len(constraints_unground) == 0: if self.require_all_constraints and any( not self.constraint_check(lv, t) for t in constraints_ground): return False elif not self.require_all_constraints and not any( self.constraint_check(lv, t) for t in constraints_ground): return False # The instance and constraint parameters are all ground and the # constraint is satisfied, so, since nothing should change from # here on, we can remove the constraint. del self.lvar_constraints[lv_key] # Some types are unground, so we continue checking until they are return True
def allgoal(s): for i, g in enumerate(goals): try: goal = goaleval(reify(g, s)) except EarlyGoalError: continue other_goals = tuple(goals[:i] + goals[i + 1:]) return unique(interleave( goaleval(reify((lallfirst, ) + other_goals, ss))(ss) for ss in goal(s)), key=dicthash) else: raise EarlyGoalError()
def allgoal(s): for i, g in enumerate(goals): try: goal = goaleval(reify(g, s)) except EarlyGoalError: continue other_goals = tuple(goals[:i] + goals[i + 1:]) return unique( interleave( goaleval(reify((lallfirst, ) + other_goals, ss))(ss) for ss in goal(s)), key=dicthash) else: raise EarlyGoalError()
def isinstanceo_goal(S): nonlocal u, u_type u_rf, u_type_rf = reify((u, u_type), S) if not isground(u_rf, S) or not isground(u_type_rf, S): if not isinstance(S, ConstrainedState): S = ConstrainedState(S) cs = S.constraints.setdefault(IsinstanceStore, IsinstanceStore()) try: cs.add(u_rf, u_type_rf) except TypeError: # If the instance object can't be hashed, we can simply use a # logic variable to uniquely identify it. u_lv = var() S[u_lv] = u_rf cs.add(u_lv, u_type_rf) if cs.post_unify_check(S.data, u_rf, u_type_rf): yield S # elif isground(u_type, S): # yield from lany(eq(u_type, u_t) for u_t in type(u).mro())(S) elif (isinstance(u_type_rf, type) # or ( # isinstance(u_type, Iterable) # and all(isinstance(t, type) for t in u_type) # ) ) and isinstance(u_rf, u_type_rf): yield S
def typeo_goal(S): nonlocal u, u_type u_rf, u_type_rf = reify((u, u_type), S) if not isground(u_rf, S) or not isground(u_type_rf, S): if not isinstance(S, ConstrainedState): S = ConstrainedState(S) cs = S.constraints.setdefault(TypeStore, TypeStore()) try: cs.add(u_rf, u_type_rf) except TypeError: # If the instance object can't be hashed, we can simply use a # logic variable to uniquely identify it. u_lv = var() S[u_lv] = u_rf cs.add(u_lv, u_type_rf) if cs.post_unify_check(S.data, u_rf, u_type_rf): yield S elif isinstance(u_type_rf, type) and type(u_rf) == u_type_rf: yield S
def neq_goal(S): nonlocal u, v u_rf, v_rf = reify((u, v), S) # Get the unground logic variables that would unify the two objects; # these are all the logic variables that we can't let unify. s_uv = unify(u_rf, v_rf, {}) if s_uv is False: # They don't unify and have no unground logic variables, so the # constraint is immediately satisfied. yield S return elif not s_uv: # They already unify, but with no unground logic variables, so we # have an immediate violation of the constraint. return if not isinstance(S, ConstrainedState): S = ConstrainedState(S) cs = S.constraints.setdefault(DisequalityStore, DisequalityStore()) for lvar, obj in s_uv.items(): cs.add(lvar, obj) # We need to check the current state for validity. if cs.post_unify_check(S.data): yield S
def appendo_goal(S): nonlocal lst, s, out l_rf, s_rf, out_rf = reify((lst, s, out), S) a, d, res = var(prefix="a"), var(prefix="d"), var(prefix="res") _nullo = partial(nullo, default_ConsNull=default_ConsNull) g = conde( [ # All empty _nullo(s_rf, l_rf, out_rf), ], [ # `lst` is empty conso(a, d, out_rf), eq(s_rf, out_rf), _nullo(l_rf, refs=(s_rf, out_rf)), ], [ conso(a, d, l_rf), conso(a, res, out_rf), appendo(d, s_rf, res, default_ConsNull=default_ConsNull), ], ) yield from g(S)
def goal(substitution): newx, newy = reify((x, y), substitution) def apply_constrain(oldvar, newvar): if hasrange(oldvar): newvar = RangedVar.new_from_intersection(oldvar, newvar) if newvar: yield temp_assoc(substitution, oldvar, newvar) else: yield temp_assoc(substitution, oldvar, newvar) if isvar(newx): if isvar(newy): raise EarlyGoalError('two vars in comparison') elif isinstance(newy, Number): oldvar = newx newvar = RangedVar(RealRange([(newy, np.inf)])) yield from apply_constrain(oldvar, newvar) else: raise EarlyGoalError('Invalid constant type') elif isinstance(newx, Number): if isvar(newy): oldvar = newy newvar = RangedVar(RealRange([(-np.inf, newx)])) yield from apply_constrain(oldvar, newvar) elif isinstance(newy, Number): if newx > newy: yield substitution else: raise EarlyGoalError('Invalid constant type') else: raise EarlyGoalError('Invalid constant type')
def walko_goal(s): nonlocal goal, rator_goal, graph_in, graph_out, null_type, map_rel graph_in_rf, graph_out_rf = reify((graph_in, graph_out), s) rator_in, rands_in, rator_out, rands_out = var(), var(), var(), var() _walko = partial(walko, goal, rator_goal=rator_goal, null_type=null_type, map_rel=map_rel) g = conde( # TODO: Use `Zzz`, if needed. [ goal(graph_in_rf, graph_out_rf), ], [ lall( applyo(rator_in, rands_in, graph_in_rf), applyo(rator_out, rands_out, graph_out_rf), rator_goal(rator_in, rator_out), map_rel(_walko, rands_in, rands_out, null_type=null_type), ) if rator_goal is not None else map_rel( _walko, graph_in_rf, graph_out_rf, null_type=null_type), ], ) yield from g(s)
def goalify_goal(S): """ This is the goal that's generated. Parameters ---------- S: Mapping The miniKanren state (e.g. unification mappings/`dict`). Yields ------ miniKanren states. """ nonlocal args # 2. If you only want to confirm something in/about the state, `S`, then # simply `yield` it if the condition(s) are met: args_rf = reify(args, S) if func(*args_rf) == expected_result: yield S else: # If the condition isn't met, end the stream by returning/not # `yield`ing anything. return
def _proofs_of(cls, term): # If term contains any variables then we rename them here so that they # don't collide with variable names used in this proof. term = rename_variables(term, '__parent__.') for rule in cls.get_rules(): variables = unify(term, rule.conclusion) if variables is False: continue if not rule.premises: yield Proof(rule, variables) continue # If we reach here it means that there are premises to prove. reified_premises = [reify(x, variables) for x in rule.premises] for premise_proofs in cls._proofs_of_many(reified_premises): candiate_variables = variables for (premise, premise_proof) in zip(reified_premises, premise_proofs): candiate_variables = unify(premise, premise_proof.conclusion, candiate_variables) if candiate_variables is False: break else: yield Proof(rule, candiate_variables, premise_proofs)
def rembero_goal(s): nonlocal x, lst, o x_rf, l_rf, o_rf = reify((x, lst, o), s) l_car, l_cdr, r = var(), var(), var() g = conde( [ nullo(l_rf, o_rf, default_ConsNull=default_ConsNull), ], [ conso(l_car, l_cdr, l_rf), eq(x_rf, l_car), eq(l_cdr, o_rf), ], [ conso(l_car, l_cdr, l_rf), neq(l_car, x), conso(l_car, r, o_rf), rembero(x_rf, l_cdr, r, default_ConsNull=default_ConsNull), ], ) yield from g(s)
def membero_goal(S): nonlocal x, ls x_rf, ls_rf = reify((x, ls), S) a, d = var(), var() g = lall(conso(a, d, ls), conde([eq(a, x)], [membero(x, d)])) yield from g(S)
def test_map_anyo_misc(): q_lv = var("q") res = run(0, q_lv, map_anyo(eq, [1, 2, 3], [1, 2, 3])) # TODO: Remove duplicate results assert len(res) == 7 res = run(0, q_lv, map_anyo(eq, [1, 2, 3], [1, 3, 3])) assert len(res) == 0 def one_to_threeo(x, y): return conde([eq(x, 1), eq(y, 3)]) res = run(0, q_lv, map_anyo(one_to_threeo, [1, 2, 4, 1, 4, 1, 1], q_lv)) assert res[0] == [3, 2, 4, 3, 4, 3, 3] assert (len( run(4, q_lv, map_anyo(math_reduceo, [etuple(mul, 2, var("x"))], q_lv))) == 0) test_res = run(4, q_lv, map_anyo(math_reduceo, [etuple(add, 2, 2), 1], q_lv)) assert test_res == ([etuple(mul, 2, 2), 1], ) test_res = run(4, q_lv, map_anyo(math_reduceo, [1, etuple(add, 2, 2)], q_lv)) assert test_res == ([1, etuple(mul, 2, 2)], ) test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, var("z"))) assert all(isinstance(r, list) for r in test_res) test_res = run(4, q_lv, map_anyo(math_reduceo, q_lv, var("z"), tuple)) assert all(isinstance(r, tuple) for r in test_res) x, y, z = var(), var(), var() def test_bin(a, b): return conde([eq(a, 1), eq(b, 2)]) res = run(10, (x, y), map_anyo(test_bin, x, y, null_type=tuple)) exp_res_form = ( ((1, ), (2, )), ((x, 1), (x, 2)), ((1, 1), (2, 2)), ((x, y, 1), (x, y, 2)), ((1, x), (2, x)), ((x, 1, 1), (x, 2, 2)), ((1, 1, 1), (2, 2, 2)), ((x, y, z, 1), (x, y, z, 2)), ((1, x, 1), (2, x, 2)), ((x, 1, y), (x, 2, y)), ) for a, b in zip(res, exp_res_form): s = unify(a, b) assert s is not False assert all(isvar(i) for i in reify((x, y, z), s))
def seq_apply_anyo_sub_goal(s): nonlocal i_any, null_type l_in_rf, l_out_rf = reify((l_in, l_out), s) i_car, i_cdr = var(), var() o_car, o_cdr = var(), var() conde_branches = [] if i_any or (isvar(l_in_rf) and isvar(l_out_rf)): # Consider terminating the sequences when we've had at least # one successful goal or when both sequences are logic variables. conde_branches.append([eq(l_in_rf, null_type), eq(l_in_rf, l_out_rf)]) # Extract the CAR and CDR of each argument sequence; this is how we # iterate through elements of the two sequences. cons_parts_branch = [ goaleval(conso(i_car, i_cdr, l_in_rf)), goaleval(conso(o_car, o_cdr, l_out_rf)), ] conde_branches.append(cons_parts_branch) conde_relation_branches = [] relation_branch = None if not skip_cars: relation_branch = [ # This case tries the relation continues on. relation(i_car, o_car), # In this conde clause, we can tell future calls to # seq_apply_anyo that we've had at least one successful # application of the relation (otherwise, this clause # would fail due to the above goal). _seq_apply_anyo(relation, i_cdr, o_cdr, True, null_type), ] conde_relation_branches.append(relation_branch) base_branch = [ # This is the "base" case; it is used when, for example, # the given relation isn't satisfied. eq(i_car, o_car), _seq_apply_anyo(relation, i_cdr, o_cdr, i_any, null_type), ] conde_relation_branches.append(base_branch) cons_parts_branch.append(conde(*conde_relation_branches)) g = conde(*conde_branches) g = goaleval(g) yield from g(s)
def test_objects(): fact(commutative, Add) fact(associative, Add) assert tuple(goaleval(eq_assoccomm(add(1, 2, 3), add(3, 1, 2)))({})) assert tuple(goaleval(eq_assoccomm(add(1, 2, 3), add(3, 1, 2)))({})) x = var('x') assert reify(x, tuple(goaleval(eq_assoccomm( add(1, 2, 3), add(1, 2, x)))({}))[0]) == 3 assert reify(x, next(goaleval(eq_assoccomm( add(1, 2, 3), add(x, 2, 1)))({}))) == 3 v = add(1, 2, 3) with variables(v): x = add(5, 6) assert reify(v, next(goaleval(eq_assoccomm(v, x))({}))) == x
def test_sexp_unify_reify(): """Make sure we can unify and reify etuples/S-exps.""" # Unify `A . (x + y)`, for `x`, `y` logic variables A = tf.compat.v1.placeholder(tf.float64, name="A", shape=tf.TensorShape([None, None])) x = tf.compat.v1.placeholder(tf.float64, name="x", shape=tf.TensorShape([None, 1])) y = tf.compat.v1.placeholder(tf.float64, name="y", shape=tf.TensorShape([None, 1])) z = tf.matmul(A, tf.add(x, y)) z_sexp = etuplize(z, shallow=False) # Let's just be sure that the original TF objects are preserved assert z_sexp[1].reify() == A assert z_sexp[2][1].reify() == x assert z_sexp[2][2].reify() == y A_lv, x_lv, y_lv = var(), var(), var() dis_pat = etuple( TFlowMetaOperator(mt.matmul.op_def, var()), A_lv, etuple(TFlowMetaOperator(mt.add.op_def, var()), x_lv, y_lv), ) s = unify(dis_pat, z_sexp, {}) assert s[A_lv] == mt(A) assert s[x_lv] == mt(x) assert s[y_lv] == mt(y) # Now, we construct a graph that reflects the distributive property and # reify with the substitutions from the un-distributed form out_pat = etuple(mt.add, etuple(mt.matmul, A_lv, x_lv), etuple(mt.matmul, A_lv, y_lv)) z_dist = reify(out_pat, s) # Evaluate the tuple-expression and get a meta object/graph z_dist_mt = z_dist.eval_obj # If all the logic variables were reified, we should be able to # further reify the meta graph and get a concrete TF graph z_dist_tf = z_dist_mt.reify() assert isinstance(z_dist_tf, tf.Tensor) # Check the first part of `A . x + A . y` (i.e. `A . x`) assert z_dist_tf.op.inputs[0].op.inputs[0] == A assert z_dist_tf.op.inputs[0].op.inputs[1] == x # Now, the second, `A . y` assert z_dist_tf.op.inputs[1].op.inputs[0] == A assert z_dist_tf.op.inputs[1].op.inputs[1] == y
def itero_goal(S): nonlocal lst, nullo_refs, default_ConsNull l_rf = reify(lst, S) c, d = var(), var() g = conde( [nullo(l_rf, refs=nullo_refs, default_ConsNull=default_ConsNull)], [conso(c, d, l_rf), itero(d, default_ConsNull=default_ConsNull)], ) yield from g(S)
def apply_rule(graph, rule): LHS, RHS = rule matches = find_matches(graph, LHS) # remove matched nodes except for inputs remove = {n for match in matches for k, n in match.items() if k in LHS} # generate names for nodes to be added to the graph IDs = filter(lambda key: key not in graph, count(1)) add = [reify(reindex(RHS, union(dict(zip(RHS.keys(), IDs)), match)), match) for match in matches] return union({k: v for k, v in graph.items() if k not in remove}, *add)
def _proofs_of_many(cls, terms): (first_term, *other_terms) = terms for proof in cls._proofs_of(first_term): if other_terms: reified_other_terms = reify(other_terms, proof.parent_variables) for other_proofs in cls._proofs_of_many(reified_other_terms): yield (proof, *other_proofs) else: yield (proof, )
def test_unification(): x, y, a, b = tt.dvectors("xyab") x_s = tt.scalar("x_s") y_s = tt.scalar("y_s") c_tt = tt.constant(1, "c") d_tt = tt.constant(2, "d") x_l = var("x_l") y_l = var("y_l") assert a == reify(x_l, {x_l: a}).reify() test_expr = mt.add(1, mt.mul(2, x_l)) test_reify_res = reify(test_expr, {x_l: a}) assert graph_equal(test_reify_res.reify(), 1 + 2 * a) z = tt.add(b, a) assert {x_l: z} == unify(x_l, z) assert b == unify(mt.add(x_l, a), mt.add(b, a))[x_l].reify() res = unify(mt.inv(mt.add(x_l, a)), mt.inv(mt.add(b, y_l))) assert res[x_l].reify() == b assert res[y_l].reify() == a mt_expr_add = mt.add(x_l, y_l) # The parameters are vectors tt_expr_add_1 = tt.add(x, y) assert graph_equal( tt_expr_add_1, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_1)).reify()) # The parameters are scalars tt_expr_add_2 = tt.add(x_s, y_s) assert graph_equal( tt_expr_add_2, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_2)).reify()) # The parameters are constants tt_expr_add_3 = tt.add(c_tt, d_tt) assert graph_equal( tt_expr_add_3, reify(mt_expr_add, unify(mt_expr_add, tt_expr_add_3)).reify())
def test_objects(): fact(commutative, Add) fact(associative, Add) assert tuple(goaleval(eq_assoccomm(add(1, 2, 3), add(3, 1, 2)))({})) assert tuple(goaleval(eq_assoccomm(add(1, 2, 3), add(3, 1, 2)))({})) x = var('x') assert reify( x, tuple(goaleval(eq_assoccomm(add(1, 2, 3), add(1, 2, x)))({}))[0]) == 3 assert reify(x, next(goaleval(eq_assoccomm(add(1, 2, 3), add(x, 2, 1)))({}))) == 3 v = add(1, 2, 3) with variables(v): x = add(5, 6) assert reify(v, next(goaleval(eq_assoccomm(v, x))({}))) == x
def goal(substitution): args2 = reify(args, substitution) subsets = [self.index[key] for key in enumerate(args) if key in self.index] if subsets: # we are able to reduce the pool early facts = intersection(*sorted(subsets, key=len)) else: facts = self.facts for fact in facts: unified = unify(fact, args2, substitution) if unified != False: yield merge(unified, substitution)
def goal(s): for gs in goalseqs: if len(gs) == 1: print(gs) g = gs[0] evaled_g = goaleval(reify(g, s)) print(evaled_g) new_s = Stream(evaled_g(s)) if not new_s.empty(): return new_s g, rgs = gs print(g, rgs) print('reify', s, reify(g, s)) evaled_g = goaleval(reify(g, s)) print(evaled_g) new_s = Stream(evaled_g(s)) # new_s = Stream(goaleval(reify(g, s))(s)) if new_s.empty(): continue return unique(interleave(goaleval(lall(rgs))(ss) for ss in new_s))
def test_operator(): s = unify(TFlowMetaOperator(var('a'), var('b')), mt.add) assert s[var('a')] == mt.add.op_def assert s[var('b')] == mt.add.node_def add_mt = reify(TFlowMetaOperator(var('a'), var('b')), s) assert add_mt == mt.add assert unify(mt.mul, mt.matmul) is False assert unify(mt.mul.op_def, mt.matmul.op_def) is False
def dbgo_goal(S): nonlocal args args = reify(args, S) if msg is not None: print(msg) pprint(args) import pdb pdb.set_trace() yield S
def test_unifiable_with_term(): add = Operator("add") t = Node(add, (1, 2)) assert arguments(t) == (1, 2) assert operator(t) == add assert term(operator(t), arguments(t)) == t x = var() s = unify(Node(add, (1, x)), Node(add, (1, 2)), {}) assert s == {x: 2} assert reify(Node(add, (1, x)), s) == Node(add, (1, 2))
def concat_goal(S): nonlocal a, b, out a_rf, b_rf, out_rf = reify((a, b, out), S) if isinstance(a_rf, str) and isinstance(b_rf, str): S_new = unify(out_rf, a_rf + b_rf, S) if S_new is not False: yield S_new return elif isinstance(a_rf, (Var, str)) and isinstance(b_rf, (Var, str)): yield S
def ground_order_goal(S): nonlocal in_args, out_args in_args_rf, out_args_rf = reify((in_args, out_args), S) S_new = unify( list(out_args_rf) if isinstance(out_args_rf, Sequence) else out_args_rf, sorted(in_args_rf, key=partial(ground_order_key, S)), S, ) if S_new is not False: yield S_new
def _nullo(s): _s = reify(l, s) if isvar(_s): yield unify(_s, None, s) elif is_null(_s): yield s
def allgoal(s): g = goaleval(reify(goals[0], s)) return unique( interleave(goaleval(reify( (lallgreedy, ) + tuple(goals[1:]), ss))(ss) for ss in g(s)), key=dicthash)
def f(goals): for goal in goals: try: yield goaleval(reify(goal, s))(s) except EarlyGoalError: pass
def reify_term(obj, s): op, args = operator(obj), arguments(obj) op = reify(op, s) args = reify(args, s) new = term(op, args) return new