def test_unwrap_base_functions(): ontology = _make_mock_ontology() eq_(str(ontology.unwrap_base_functions(Expression.fromstring(r"unique(sphere)"))), r"unique(\z1.sphere(z1))") eq_(str(ontology.unwrap_base_functions(Expression.fromstring(r"cmp_pos(ax_x,unique(sphere),unique(cube))"))), r"cmp_pos(ax_x,unique(\z1.sphere(z1)),unique(\z1.cube(z1)))")
def test_valid_lambda_expr(): """ Regression test: valid_lambda_expr was rejecting this good sub-expression at c720b4 """ ontology = _make_mock_ontology() eq_(ontology._valid_lambda_expr(Expression.fromstring(r"\b.ltzero(cmp_pos(ax_x,a,b))"), ctx_bound_vars=()), False) eq_(ontology._valid_lambda_expr(Expression.fromstring(r"\b.ltzero(cmp_pos(ax_x,a,b))"), ctx_bound_vars=(Variable('a'),)), True)
def tableau_test(c, ps=None, verbose=False): pc = Expression.fromstring(c) pps = [Expression.fromstring(p) for p in ps] if ps else [] if not ps: ps = [] print("%s |- %s: %s" % (", ".join(ps), pc, TableauProver().prove(pc, pps, verbose=verbose)))
def tableau_test(c, ps=None, verbose=False): pc = Expression.fromstring(c) pps = [Expression.fromstring(p) for p in ps] if ps else [] if not ps: ps = [] print( '%s |- %s: %s' % (', '.join(ps), pc, TableauProver().prove(pc, pps, verbose=verbose)) )
def folmodel(quiet=False, trace=None): """Example of a first-order model.""" global val2, v2, dom2, m2, g2 v2 = [ ("adam", "b1"), ("betty", "g1"), ("fido", "d1"), ("girl", set(["g1", "g2"])), ("boy", set(["b1", "b2"])), ("dog", set(["d1"])), ("love", set([("b1", "g1"), ("b2", "g2"), ("g1", "b1"), ("g2", "b1")])), ] val2 = Valuation(v2) dom2 = val2.domain m2 = Model(dom2, val2) g2 = Assignment(dom2, [("x", "b1"), ("y", "g2")]) if not quiet: print() print("*" * mult) print("Models Demo") print("*" * mult) print("Model m2:\n", "-" * 14, "\n", m2) print("Variable assignment = ", g2) exprs = ["adam", "boy", "love", "walks", "x", "y", "z"] parsed_exprs = [Expression.fromstring(e) for e in exprs] print() for parsed in parsed_exprs: try: print("The interpretation of '%s' in m2 is %s" % (parsed, m2.i(parsed, g2))) except Undefined: print("The interpretation of '%s' in m2 is Undefined" % parsed) applications = [ ("boy", ("adam")), ("walks", ("adam", )), ("love", ("adam", "y")), ("love", ("y", "adam")), ] for (fun, args) in applications: try: funval = m2.i(Expression.fromstring(fun), g2) argsval = tuple( m2.i(Expression.fromstring(arg), g2) for arg in args) print("%s(%s) evaluates to %s" % (fun, args, argsval in funval)) except Undefined: print("%s(%s) evaluates to Undefined" % (fun, args))
def folmodel(quiet=False, trace=None): """Example of a first-order model.""" global val2, v2, dom2, m2, g2 v2 = [ ('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'), ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])), ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')])), ] val2 = Valuation(v2) dom2 = val2.domain m2 = Model(dom2, val2) g2 = Assignment(dom2, [('x', 'b1'), ('y', 'g2')]) if not quiet: print() print('*' * mult) print("Models Demo") print("*" * mult) print("Model m2:\n", "-" * 14, "\n", m2) print("Variable assignment = ", g2) exprs = ['adam', 'boy', 'love', 'walks', 'x', 'y', 'z'] parsed_exprs = [Expression.fromstring(e) for e in exprs] print() for parsed in parsed_exprs: try: print("The interpretation of '%s' in m2 is %s" % (parsed, m2.i(parsed, g2))) except Undefined: print("The interpretation of '%s' in m2 is Undefined" % parsed) applications = [ ('boy', ('adam')), ('walks', ('adam', )), ('love', ('adam', 'y')), ('love', ('y', 'adam')), ] for (fun, args) in applications: try: funval = m2.i(Expression.fromstring(fun), g2) argsval = tuple( m2.i(Expression.fromstring(arg), g2) for arg in args) print("%s(%s) evaluates to %s" % (fun, args, argsval in funval)) except Undefined: print("%s(%s) evaluates to Undefined" % (fun, args))
def example1(): background_theory = [ Expression.fromstring(u"UnitedStates(Henry)"), Expression.fromstring(u"Young(Henry)") ] default_rules = [ Expression.fromstring(u"all x.(Young(x) -> -Read(x))"), Expression.fromstring(u"all x.(UnitedStates(x) -> Read(x))") ] goal = Expression.fromstring(u"-Read(Henry)") example_template(1, background_theory, default_rules, goal)
def do_case(index, left, semantics, right, expected_synt, expected_sem): pfr = PositionalForwardRaiseCombinator(index) left = lex.parse_category(left) right = lex.parse_category(right) ok_(pfr.can_combine(left, right)) eq_(str(next(iter(pfr.combine(left, right)))), expected_synt) semantics = Expression.fromstring(semantics) expected_sem = str(Expression.fromstring(expected_sem).simplify()) eq_(str(pfr.update_semantics(semantics)), expected_sem)
def test_prove(arguments): """ Try some proofs and exhibit the results. """ for (goal, assumptions) in arguments: g = Expression.fromstring(goal) alist = [Expression.fromstring(a) for a in assumptions] p = Prover9Command(g, assumptions=alist).prove() for a in alist: print(" %s" % a) print("|- %s: %s\n" % (g, p))
def test_config(): a = Expression.fromstring("(walk(j) & sing(j))") g = Expression.fromstring("walk(j)") p = Prover9Command(g, assumptions=[a]) p._executable_path = None p.prover9_search = [] p.prove() # config_prover9('/usr/local/bin') print(p.prove()) print(p.proof())
def test_prove(arguments): """ Try some proofs and exhibit the results. """ for (goal, assumptions) in arguments: g = Expression.fromstring(goal) alist = [Expression.fromstring(a) for a in assumptions] p = Prover9Command(g, assumptions=alist).prove() for a in alist: print(' %s' % a) print('|- %s: %s\n' % (g, p))
def test_config(): a = Expression.fromstring('(walk(j) & sing(j))') g = Expression.fromstring('walk(j)') p = Prover9Command(g, assumptions=[a]) p._executable_path = None p.prover9_search=[] p.prove() #config_prover9('/usr/local/bin') print(p.prove()) print(p.proof())
def testResolutionProver(): resolution_test(r'man(x)') resolution_test(r'(man(x) -> man(x))') resolution_test(r'(man(x) -> --man(x))') resolution_test(r'-(man(x) and -man(x))') resolution_test(r'(man(x) or -man(x))') resolution_test(r'(man(x) -> man(x))') resolution_test(r'-(man(x) and -man(x))') resolution_test(r'(man(x) or -man(x))') resolution_test(r'(man(x) -> man(x))') resolution_test(r'(man(x) iff man(x))') resolution_test(r'-(man(x) iff -man(x))') resolution_test('all x.man(x)') resolution_test('-all x.some y.F(x,y) & some x.all y.(-F(x,y))') resolution_test('some x.all y.sees(x,y)') p1 = Expression.fromstring(r'all x.(man(x) -> mortal(x))') p2 = Expression.fromstring(r'man(Socrates)') c = Expression.fromstring(r'mortal(Socrates)') print('%s, %s |- %s: %s' % (p1, p2, c, ResolutionProver().prove(c, [p1,p2]))) p1 = Expression.fromstring(r'all x.(man(x) -> walks(x))') p2 = Expression.fromstring(r'man(John)') c = Expression.fromstring(r'some y.walks(y)') print('%s, %s |- %s: %s' % (p1, p2, c, ResolutionProver().prove(c, [p1,p2]))) p = Expression.fromstring(r'some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))') c = Expression.fromstring(r'some e0.walk(e0,mary)') print('%s |- %s: %s' % (p, c, ResolutionProver().prove(c, [p])))
def testResolutionProver(): resolution_test(r'man(x)') resolution_test(r'(man(x) -> man(x))') resolution_test(r'(man(x) -> --man(x))') resolution_test(r'-(man(x) and -man(x))') resolution_test(r'(man(x) or -man(x))') resolution_test(r'(man(x) -> man(x))') resolution_test(r'-(man(x) and -man(x))') resolution_test(r'(man(x) or -man(x))') resolution_test(r'(man(x) -> man(x))') resolution_test(r'(man(x) iff man(x))') resolution_test(r'-(man(x) iff -man(x))') resolution_test('all x.man(x)') resolution_test('-all x.some y.F(x,y) & some x.all y.(-F(x,y))') resolution_test('some x.all y.sees(x,y)') p1 = Expression.fromstring(r'all x.(man(x) -> mortal(x))') p2 = Expression.fromstring(r'man(Socrates)') c = Expression.fromstring(r'mortal(Socrates)') print('%s, %s |- %s: %s' % (p1, p2, c, ResolutionProver().prove(c, [p1, p2]))) p1 = Expression.fromstring(r'all x.(man(x) -> walks(x))') p2 = Expression.fromstring(r'man(John)') c = Expression.fromstring(r'some y.walks(y)') print('%s, %s |- %s: %s' % (p1, p2, c, ResolutionProver().prove(c, [p1, p2]))) p = Expression.fromstring( r'some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))') c = Expression.fromstring(r'some e0.walk(e0,mary)') print('%s |- %s: %s' % (p, c, ResolutionProver().prove(c, [p])))
def testResolutionProver(): resolution_test(r"man(x)") resolution_test(r"(man(x) -> man(x))") resolution_test(r"(man(x) -> --man(x))") resolution_test(r"-(man(x) and -man(x))") resolution_test(r"(man(x) or -man(x))") resolution_test(r"(man(x) -> man(x))") resolution_test(r"-(man(x) and -man(x))") resolution_test(r"(man(x) or -man(x))") resolution_test(r"(man(x) -> man(x))") resolution_test(r"(man(x) iff man(x))") resolution_test(r"-(man(x) iff -man(x))") resolution_test("all x.man(x)") resolution_test("-all x.some y.F(x,y) & some x.all y.(-F(x,y))") resolution_test("some x.all y.sees(x,y)") p1 = Expression.fromstring(r"all x.(man(x) -> mortal(x))") p2 = Expression.fromstring(r"man(Socrates)") c = Expression.fromstring(r"mortal(Socrates)") print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") p1 = Expression.fromstring(r"all x.(man(x) -> walks(x))") p2 = Expression.fromstring(r"man(John)") c = Expression.fromstring(r"some y.walks(y)") print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") p = Expression.fromstring(r"some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))") c = Expression.fromstring(r"some e0.walk(e0,mary)") print(f"{p} |- {c}: {ResolutionProver().prove(c, [p])}")
def test_convert_to_prover9(expr): """ Test that parsing works OK. """ for t in expr: e = Expression.fromstring(t) print(convert_to_prover9(e))
def test_model_induced_functions(): """ Test evaluating a model with an ontology which has induced functions. """ fake_scene = { "objects": ["foo", "bar"], } types = TypeSystem(["a"]) functions = [ types.new_function("test", ("a", "a"), lambda x: True), types.new_function("test2", ("a", "a"), Expression.fromstring(r"\x.test(test(x))")), ] ontology = Ontology(types, functions, []) model = Model(scene=fake_scene, ontology=ontology) cases = [ ("Test basic call of an abstract function", r"\a.test2(a)", { "foo": True, "bar": True }), ("Test embedded call of an abstract function", r"\a.test(test2(a))", { "foo": True, "bar": True }), ] def test(msg, expr, expected): eq_(model.evaluate(Expression.fromstring(expr)), expected, msg=msg) for msg, expr, expected in cases: yield test, msg, expr, expected
def __init__(self, meaning, glue, indices=None): if not indices: indices = set() if isinstance(meaning, str): self.meaning = Expression.fromstring(meaning) elif isinstance(meaning, Expression): self.meaning = meaning else: raise RuntimeError( "Meaning term neither string or expression: %s, %s" % (meaning, meaning.__class__) ) if isinstance(glue, str): self.glue = linearlogic.LinearLogicParser().parse(glue) elif isinstance(glue, linearlogic.Expression): self.glue = glue else: raise RuntimeError( "Glue term neither string or expression: %s, %s" % (glue, glue.__class__) ) self.indices = indices
def __init__(self, meaning, glue, indices=None): if not indices: indices = set() if isinstance(meaning, string_types): self.meaning = Expression.fromstring(meaning) elif isinstance(meaning, Expression): self.meaning = meaning else: raise RuntimeError( 'Meaning term neither string or expression: %s, %s' % (meaning, meaning.__class__) ) if isinstance(glue, string_types): self.glue = linearlogic.LinearLogicParser().parse(glue) elif isinstance(glue, linearlogic.Expression): self.glue = glue else: raise RuntimeError( 'Glue term neither string or expression: %s, %s' % (glue, glue.__class__) ) self.indices = indices
def do_test(expression, expected_members, expected_non_members): expr = Expression.fromstring(expression) split_tuples = [] # iterating with for-loop so that we can catch incremental yields -- easier # to debug for part1, part2, dir in ontology.iter_application_splits(expr): # print("\t\t",part1, "\t", part2, "\t", dir) split_tuples.append((str(part1), str(part2), dir)) # from pprint import pprint # pprint(split_tuples) all_parts = set(part1 for part1, _, _ in split_tuples) | set( part2 for _, part2, _ in split_tuples) for el in expected_members: if el[1] is None: # just want to assert that a logical expr appears *somewhere* ok_(el[0] in all_parts, el[0]) else: # full split specified ok_(el in split_tuples, el) for el in expected_non_members: if el[1] is None: # just want to assert that a logical expr appears *nowhere* ok_(el[0] not in all_parts, el[0]) else: # full split specified ok_(el not in split_tuples, el)
def do_test(expr, extra_signature, expected): expr = Expression.fromstring(expr) if expected == None: assert_raises(TypeException, ontology.typecheck, expr, extra_signature) else: ontology.typecheck(expr, extra_signature) eq_(expr.type, expected)
def do_test(expr, assert_in, assert_not_in): expr = Expression.fromstring(expr) subexprs = [str(e) for e, _ in get_subexpressions(expr)] for e in assert_in: ok_(e in subexprs, e) for e in assert_not_in: ok_(e not in subexprs, e)
def demo(): test_clausify() print() testResolutionProver() print() p = Expression.fromstring('man(x)') print(ResolutionProverCommand(p, [p]).prove())
def test_read_ec_sexpr_de_bruijn(): """ properly handle de Bruijn indexing in EC lambda expressions. """ expr, bound_vars = read_ec_sexpr( "(lambda ((lambda ($0 (lambda $0))) (lambda ($1 $0))))") print(expr) eq_(expr, Expression.fromstring(r"\A.((\B.B(\C.C))(\C.A(C)))"))
def test_read_ec_sexpr_nested(): """ read_ec_sexpr should support reading in applications where the function itself is an expression (i.e. there is some not-yet-reduced beta reduction candidate). """ expr, bound_vars = read_ec_sexpr("(lambda ((lambda (foo $0)) $0))") eq_(expr, Expression.fromstring(r"\a.((\b.foo(b))(a))"))
def demo(): test_clausify() print() testResolutionProver() print() p = Expression.fromstring("man(x)") print(ResolutionProverCommand(p, [p]).prove())
def test_extract_lambda(): """ `extract_lambda` should support all possible orderings of the variables it encounters. """ expr = Expression.fromstring(r"foo(\a.a,\a.a)") extracted = B.extract_lambda(expr) eq_(len(extracted), 2)
def satdemo(trace=None): """Satisfiers of an open formula in a first order model.""" print() print("*" * mult) print("Satisfiers Demo") print("*" * mult) folmodel(quiet=True) formulas = [ "boy(x)", "(x = x)", "(boy(x) | girl(x))", "(boy(x) & girl(x))", "love(adam, x)", "love(x, adam)", "-(x = adam)", "exists z22. love(x, z22)", "exists y. love(y, x)", "all y. (girl(y) -> love(x, y))", "all y. (girl(y) -> love(y, x))", "all y. (girl(y) -> (boy(x) & love(y, x)))", "(boy(x) & all y. (girl(y) -> love(x, y)))", "(boy(x) & all y. (girl(y) -> love(y, x)))", "(boy(x) & exists y. (girl(y) & love(y, x)))", "(girl(x) -> dog(x))", "all y. (dog(y) -> (x = y))", "exists y. love(y, x)", "exists y. (love(adam, y) & love(y, x))", ] if trace: print(m2) for fmla in formulas: print(fmla) Expression.fromstring(fmla) parsed = [Expression.fromstring(fmla) for fmla in formulas] for p in parsed: g2.purge() print("The satisfiers of '%s' are: %s" % (p, m2.satisfiers(p, "x", g2, trace)))
def test_iter_expressions_with_used_constants(): ontology = _make_simple_mock_ontology() ontology.register_expressions([Expression.fromstring(r"\z1.and_(foo(z1),baz)")]) expressions = set(ontology.iter_expressions(max_depth=3, use_unused_constants=True)) expression_strs = list(map(str, expressions)) ok_(r"foo(qux)" in expression_strs, "Use of new constant variable") ok_(r"baz" not in expression_strs, "Cannot use used constant variable")
def satdemo(trace=None): """Satisfiers of an open formula in a first order model.""" print() print('*' * mult) print("Satisfiers Demo") print('*' * mult) folmodel(quiet=True) formulas = [ 'boy(x)', '(x = x)', '(boy(x) | girl(x))', '(boy(x) & girl(x))', 'love(adam, x)', 'love(x, adam)', '-(x = adam)', 'exists z22. love(x, z22)', 'exists y. love(y, x)', 'all y. (girl(y) -> love(x, y))', 'all y. (girl(y) -> love(y, x))', 'all y. (girl(y) -> (boy(x) & love(y, x)))', '(boy(x) & all y. (girl(y) -> love(x, y)))', '(boy(x) & all y. (girl(y) -> love(y, x)))', '(boy(x) & exists y. (girl(y) & love(y, x)))', '(girl(x) -> dog(x))', 'all y. (dog(y) -> (x = y))', 'exists y. love(y, x)', 'exists y. (love(adam, y) & love(y, x))' ] if trace: print(m2) for fmla in formulas: print(fmla) Expression.fromstring(fmla) parsed = [Expression.fromstring(fmla) for fmla in formulas] for p in parsed: g2.purge() print("The satisfiers of '%s' are: %s" % (p, m2.satisfiers(p, 'x', g2, trace)))
def test_read_ec_sexpr(): ontology = _make_simple_mock_ontology() expr, bound_vars = ontology.read_ec_sexpr( "(lambda (lambda (lambda (and_ (threeplace $0 qux $1) (and_ (foo $2) baz)))))" ) eq_( expr, Expression.fromstring( r"\a b c.and_(threeplace(c,qux,b),and_(foo(a),baz))")) eq_(len(bound_vars), 3)
def test_read_ec_sexpr_de_bruijn(): """ properly handle de Bruijn indexing in EC lambda expressions. """ ontology = _make_simple_mock_ontology() expr, bound_vars = ontology.read_ec_sexpr( "(lambda ((lambda ($0 (lambda $0))) (lambda ($1 $0))))") print(expr) eq_(expr, Expression.fromstring(r"\A.((\B.B(\C.C))(\C.A(C)))")) eq_(len(bound_vars), 4)
def test_read_ec_sexpr_nested(): """ read_ec_sexpr should support reading in applications where the function itself is an expression (i.e. there is some not-yet-reduced beta reduction candidate). """ ontology = _make_simple_mock_ontology() expr, bound_vars = ontology.read_ec_sexpr( "(lambda ((lambda (abc $0)) $0))") eq_(expr, Expression.fromstring(r"\a.((\b.abc(b))(a))")) eq_(len(bound_vars), 2)
def test_model_stored_partial_application(): types = TypeSystem(["obj"]) functions = [ types.new_function("lotsofargs", ("obj", "obj", "obj"), lambda a, b: b), ] constants = [ types.new_constant("obj1", "obj"), types.new_constant("obj2", "obj"), ] ontology = Ontology(types, functions, constants) ontology.add_functions([ types.new_function("partial", ("obj", "obj"), Expression.fromstring(r"lotsofargs(obj2)")) ]) scene = {"objects": []} model = Model(scene, ontology) eq_(model.evaluate(Expression.fromstring(r"partial(obj1)")), "obj1")
def folmodel(quiet=False, trace=None): """Example of a first-order model.""" global val2, v2, dom2, m2, g2 v2 = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\ ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])), ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))] val2 = Valuation(v2) dom2 = val2.domain m2 = Model(dom2, val2) g2 = Assignment(dom2, [('x', 'b1'), ('y', 'g2')]) if not quiet: print() print('*' * mult) print("Models Demo") print("*" * mult) print("Model m2:\n", "-" * 14,"\n", m2) print("Variable assignment = ", g2) exprs = ['adam', 'boy', 'love', 'walks', 'x', 'y', 'z'] parsed_exprs = [Expression.fromstring(e) for e in exprs] print() for parsed in parsed_exprs: try: print("The interpretation of '%s' in m2 is %s" % (parsed, m2.i(parsed, g2))) except Undefined: print("The interpretation of '%s' in m2 is Undefined" % parsed) applications = [('boy', ('adam')), ('walks', ('adam',)), ('love', ('adam', 'y')), ('love', ('y', 'adam'))] for (fun, args) in applications: try: funval = m2.i(Expression.fromstring(fun), g2) argsval = tuple(m2.i(Expression.fromstring(arg), g2) for arg in args) print("%s(%s) evaluates to %s" % (fun, args, argsval in funval)) except Undefined: print("%s(%s) evaluates to Undefined" % (fun, args))
def _test_application_split_sound(expr, ontology): """ Evaluate soundness of `iter_application_splits` for a particular expression. """ if isinstance(expr, str): expr = Expression.fromstring(expr) subexprs = [str(x) for x, _ in get_subexpressions(expr)] for part1, part2, dir in ontology.iter_application_splits(expr): arg1, arg2 = (part1, part2) if dir == "/" else (part2, part1) reapplied = str(ApplicationExpression(arg1, arg2).simplify()) ok_(reapplied in subexprs, "%s %s %s --> %s" % (part1, dir, part2, reapplied))
def do_test(expr, assert_in): expr = Expression.fromstring(expr) splits = [] for left, right, dir in ontology.iter_application_splits(expr): splits.append((str(left), str(right), dir)) print("\t", left, right, dir) # splits = list(ontology.iter_application_splits(expr)) # splits = [(str(left), str(right), dir) for left, right, dir in splits] # from pprint import pprint # pprint(splits) for el in assert_in: ok_(el in splits, "%s not in splits" % (el, ))
def example2(): background_theory = [ Expression.fromstring(u"Personal(y)"), Expression.fromstring(u"all x.(Personal(x) -> Interest(x))"), Expression.fromstring(u"all x.(QRI(x) -> Personal(x))") ] default_rules = [ Expression.fromstring(u"all x.(QRI(x) -> Deductible(x))"), Expression.fromstring(u"all x.(Personal(x) -> -Deductible(x))"), Expression.fromstring(u"all x.(Interest(x) -> Deductible(x))") ] goal = Expression.fromstring(u"-Deductible(y)") example_template(2, background_theory, default_rules, goal)
def fromstring(lex_str, include_semantics=False): """ Convert string representation into a lexicon for CCGs. """ CCGVar.reset_id() primitives = [] families = {} entries = defaultdict(list) for line in lex_str.splitlines(): # Strip comments and leading/trailing whitespace. line = COMMENTS_RE.match(line).groups()[0].strip() if line == "": continue if line.startswith(':-'): # A line of primitive categories. # The first one is the target category # ie, :- S, N, NP, VP primitives = primitives + [ prim.strip() for prim in line[2:].strip().split(',') ] else: # Either a family definition, or a word definition (ident, sep, rhs) = LEX_RE.match(line).groups() (catstr, semantics_str) = RHS_RE.match(rhs).groups() (cat, var) = augParseCategory(catstr, primitives, families) if sep == '::': # Family definition # ie, Det :: NP/N families[ident] = (cat, var) else: semantics = None if include_semantics is True: if semantics_str is None: raise AssertionError( line + " must contain semantics because include_semantics is set to True" ) else: semantics = Expression.fromstring( SEMANTICS_RE.match(semantics_str).groups()[0] ) # Word definition # ie, which => (N\N)/(S/NP) entries[ident].append(Token(ident, cat, semantics)) return CCGLexicon(primitives[0], primitives, families, entries)
def load_fol(s): """ Temporarily duplicated from ``nltk.sem.util``. Convert a file of first order formulas into a list of ``Expression`` objects. :param s: the contents of the file :type s: str :return: a list of parsed formulas. :rtype: list(Expression) """ statements = [] for linenum, line in enumerate(s.splitlines()): line = line.strip() if line.startswith('#') or line=='': continue try: statements.append(Expression.fromstring(line)) except Exception: raise ValueError('Unable to parse line %s: %s' % (linenum, line)) return statements
def evaluate(self, expr, g, trace=None): """ Read input expressions, and provide a handler for ``satisfy`` that blocks further propagation of the ``Undefined`` error. :param expr: An ``Expression`` of ``logic``. :type g: Assignment :param g: an assignment to individual variables. :rtype: bool or 'Undefined' """ try: parsed = Expression.fromstring(expr) value = self.satisfy(parsed, g, trace=trace) if trace: print() print("'%s' evaluates to %s under M, %s" % (expr, value, g)) return value except Undefined: if trace: print() print("'%s' is undefined under M, %s" % (expr, g)) return 'Undefined'
def resolution_test(e): f = Expression.fromstring(e) t = ResolutionProver().prove(f) print('|- %s: %s' % (f, t))
def _get_transitions(self, expression: Expression, current_transitions: List[str]) -> List[str]: # The way we handle curried functions in here is a bit of a mess, but it works. For any # function that takes more than one argument, the NLTK Expression object will be curried, # and so the standard "visitor" pattern used by NLTK will result in action sequences that # are also curried. We need to detect these curried functions and uncurry them in the # action sequence. We do that by keeping around a dictionary mapping multi-argument # functions to the number of arguments they take. When we see a multi-argument function, # we check to see if we're at the top-level, first instance of that function by checking # its number of arguments with NLTK's `uncurry()` function. If it is, we output an action # using those arguments. Otherwise, we're at an intermediate node of a curried function, # and we squelch the action that would normally be generated. # TODO(mattg): There might be some way of removing the need for `curried_functions` here, # using instead the `argument_types()` function I added to `ComplexType`, but my guess is # that it would involve needing to modify nltk, and I don't want to bother with figuring # that out right now. curried_functions = self._get_curried_functions() expression_type = expression.type try: # ``Expression.visit()`` takes two arguments: the first one is a function applied on # each sub-expression and the second is a combinator that is applied to the list of # values returned from the function applications. We just want the list of all # sub-expressions here. sub_expressions = expression.visit(lambda x: x, lambda x: x) transformed_types = [sub_exp.type for sub_exp in sub_expressions] if isinstance(expression, LambdaExpression): # If the expression is a lambda expression, the list of sub expressions does not # include the "lambda x" term. We're adding it here so that we will see transitions # like # <e,d> -> [\x, d] instead of # <e,d> -> [d] transformed_types = ["lambda x"] + transformed_types elif isinstance(expression, ApplicationExpression): function, arguments = expression.uncurry() function_type = function.type if function_type in curried_functions: expected_num_arguments = curried_functions[function_type] if len(arguments) == expected_num_arguments: # This is the initial application of a curried function. We'll use this # node in the expression to generate the action for this function, using # all of its arguments. transformed_types = [function.type] + [argument.type for argument in arguments] else: # We're at an intermediate node. We'll set `transformed_types` to `None` # to indicate that we need to squelch this action. transformed_types = None if transformed_types: transition = f"{expression_type} -> {transformed_types}" current_transitions.append(transition) for sub_expression in sub_expressions: self._get_transitions(sub_expression, current_transitions) except NotImplementedError: # This means that the expression is a leaf. We simply make a transition from its type to itself. original_name = str(expression) if original_name in self.reverse_name_mapping: original_name = self.reverse_name_mapping[original_name] transition = f"{expression_type} -> {original_name}" current_transitions.append(transition) return current_transitions