Beispiel #1
0
    def test_nondet_query(self, input_file):
        program_parser = PrologParser()
        self.request_parser = PrologParser('request')
        program = program_parser.parse(read(input_file))
        # You can print the input prolog program
        print(f'Print Input Program:')
        for rule in program:
            print(f'rule: {str(rule)}')

        while True:
            prefix = '?- '
            try:
                str_request = input(prefix)
            except KeyboardInterrupt:
                break
            if not str_request:
                break
            try:
                goal = self.request_parser.parse(prefix + str_request)
            except lark.exceptions.LarkError:
                print('Error : invalid syntax')
                continue
            try:
                # You can print the input goal
                print(f'Print Input Goal:')
                print(f'Query: {goal.body}')

                goal_ = self.nondet_query(program, goal)
                # You can print the solution goal_ here.
                print(f'Solution: {goal_}')
            except RecursionError:
                print('Error : stack overflow')
Beispiel #2
0
    def __init__(self):

        self.config = misc.load_config('.nlprc')

        #
        # database
        #

        Session = sessionmaker(bind=model.engine)
        self.session = Session()

        #
        # logic DB
        #

        self.db = LogicDB(self.session)

        #
        # knowledge base
        #

        self.kb = HALKB()

        #
        # TensorFlow (deferred, as tf can take quite a bit of time to set up)
        #

        self.tf_session = None
        self.nlp_model = None

        #
        # module management, setup
        #

        self.modules = {}
        s = self.config.get('semantics', 'modules')
        self.all_modules = map(lambda s: s.strip(), s.split(','))

        for mn2 in self.all_modules:
            self.load_module(mn2)

        #
        # prolog environment setup
        #

        self.prolog_engine = PrologAIEngine(self.db)

        self.parser = PrologParser()
Beispiel #3
0
Session = sessionmaker(bind=model.engine)

session = Session()

#
# main
#

db = LogicDB(session)

if options.clear_all:
    db.clear_all_modules()

first = True

parser = PrologParser()
for pl_fn in args:

    linecnt = 0
    with codecs.open(pl_fn, encoding='utf-8', errors='ignore', mode='r') as f:
        while f.readline():
            linecnt += 1
    print "%s: %d lines." % (pl_fn, linecnt)

    nlp_macros      = {}
    src             = None
    nlp_test_engine = PrologAIEngine(db)
    nlp_test_engine.set_trace(options.trace)

    with open (pl_fn, 'r') as f:
        parser.start(f, pl_fn)
Beispiel #4
0
    def __init__(self, session):

        self.session = session
        self.enabled_modules = set()
        self.parser = PrologParser()
Beispiel #5
0
class LogicDB(object):
    def __init__(self, session):

        self.session = session
        self.enabled_modules = set()
        self.parser = PrologParser()

    def clear_module(self, module):

        logging.info("Clearing %s ..." % module)
        self.session.query(
            model.ORMClause).filter(model.ORMClause.module == module).delete()
        self.session.query(model.ORMPredicateDoc).filter(
            model.ORMPredicateDoc.module == module).delete()
        self.session.query(model.ModuleDependency).filter(
            model.ModuleDependency.module == module).delete()
        logging.info("Clearing %s ... done." % module)

    def clear_all_modules(self):

        logging.info("Clearing all modules ...")
        self.session.query(model.ORMClause).delete()
        self.session.query(model.ORMPredicateDoc).delete()
        self.session.query(model.ModuleDependency).delete()
        logging.info("Clearing all modules ... done.")

    def store_module_requirements(self, module, requirements):
        for r in requirements:
            md = model.ModuleDependency(module=module, requires=r)
            self.session.add(md)

    def disable_all_modules(self):
        self.enabled_modules = set()

    def enable_module(self, module):

        # also enable required modules

        todo = [module]
        done = set()

        while len(todo) > 0:

            m = todo.pop()
            if m in done:
                continue

            # print "LogicDB: enabling module %s" % m
            self.enabled_modules.add(m)

            for req in self.session.query(model.ModuleDependency).filter(
                    model.ModuleDependency.module == m).all():
                todo.append(req.requires)

            done.add(m)

    def store(self, module, clause):

        ormc = model.ORMClause(module=module,
                               arity=len(clause.head.args),
                               head=clause.head.name,
                               prolog=unicode(clause))
        self.session.add(ormc)

    def store_doc(self, module, name, doc):

        ormd = model.ORMPredicateDoc(module=module, name=name, doc=doc)
        self.session.add(ormd)

    def lookup(self, name):

        # FIXME: caching ?

        # if name in self.clauses:
        #     return self.clauses[name]

        res = []

        for ormc in self.session.query(
                model.ORMClause).filter(model.ORMClause.head == name).all():

            if not ormc.module in self.enabled_modules:
                continue

            for c in self.parser.parse_line_clauses(ormc.prolog):
                res.append(c)

        return res

    #
    # manage stored contexts in db
    #

    def read_context(self, name, key):

        ctx = self.session.query(model.Context).filter(
            model.Context.name == name, model.Context.key == key).first()
        if not ctx:
            return None

        return self.parser.parse_line_clause_body(ctx.value)

    def write_context(self, name, key, value):

        v = unicode(value)

        ctx = self.session.query(model.Context).filter(
            model.Context.name == name, model.Context.key == key).first()
        if not ctx:
            ctx = model.Context(name=name, key=key, value=v, default_value=v)
            self.session.add(ctx)
        else:
            ctx.value = v

    def set_context_default(self, name, key, value):

        ctx = self.session.query(model.Context).filter(
            model.Context.name == name, model.Context.key == key).first()

        if not ctx:
            ctx = model.Context(name=name,
                                key=key,
                                value=value,
                                default_value=value)
            self.session.add(ctx)
        else:
            ctx.default_value = value

    def reset_context(self, name):

        for ctx in self.session.query(
                model.Context).filter(model.Context.name == name).all():
            ctx.value = ctx.default_value
Beispiel #6
0
class NLPKernal(object):
    def __init__(self):

        self.config = misc.load_config('.nlprc')

        #
        # database
        #

        Session = sessionmaker(bind=model.engine)
        self.session = Session()

        #
        # logic DB
        #

        self.db = LogicDB(self.session)

        #
        # knowledge base
        #

        self.kb = HALKB()

        #
        # TensorFlow (deferred, as tf can take quite a bit of time to set up)
        #

        self.tf_session = None
        self.nlp_model = None

        #
        # module management, setup
        #

        self.modules = {}
        s = self.config.get('semantics', 'modules')
        self.all_modules = map(lambda s: s.strip(), s.split(','))

        for mn2 in self.all_modules:
            self.load_module(mn2)

        #
        # prolog environment setup
        #

        self.prolog_engine = PrologAIEngine(self.db)

        self.parser = PrologParser()

    # FIXME: this will work only on the first call
    def setup_tf_model(self, forward_only, load_model):

        if not self.tf_session:

            import tensorflow as tf

            # setup config to use BFC allocator
            config = tf.ConfigProto()
            config.gpu_options.allocator_type = 'BFC'

            self.tf_session = tf.Session(config=config)

        if not self.nlp_model:

            from nlp_model import NLPModel

            self.nlp_model = NLPModel(self.session)

            if load_model:

                self.nlp_model.load_dicts()

                # we need the inverse dict to reconstruct the output from tensor

                self.inv_output_dict = {
                    v: k
                    for k, v in self.nlp_model.output_dict.iteritems()
                }

                self.tf_model = self.nlp_model.create_tf_model(
                    self.tf_session, forward_only=forward_only)
                self.tf_model.batch_size = 1

                self.nlp_model.load_model(self.tf_session)

    def clean(self, module_names, clean_all, clean_logic, clean_discourses,
              clean_cronjobs, clean_kb):

        for module_name in module_names:

            if clean_logic or clean_all:
                logging.info('cleaning logic for %s...' % module_name)
                if module_name == 'all':
                    self.db.clear_all_modules()
                else:
                    self.db.clear_module(module_name)

            if clean_discourses or clean_all:
                logging.info('cleaning discourses for %s...' % module_name)
                if module_name == 'all':
                    self.session.query(model.Discourse).delete()
                else:
                    self.session.query(model.Discourse).filter(
                        model.Discourse.module == module_name).delete()

            if clean_cronjobs or clean_all:
                logging.info('cleaning cronjobs for %s...' % module_name)
                if module_name == 'all':
                    self.session.query(model.Cronjob).delete()
                else:
                    self.session.query(model.Cronjob).filter(
                        model.Cronjob.module == module_name).delete()

            if clean_kb or clean_all:
                logging.info('cleaning kb for %s...' % module_name)
                if module_name == 'all':
                    self.kb.clear_all_graphs()
                else:
                    graph = self._module_graph_name(module_name)
                    self.kb.clear_graphs(graph)

        self.session.commit()

    def load_module(self, module_name):

        if module_name in self.modules:
            return self.modules[module_name]

        logging.debug("loading module '%s'" % module_name)

        fp, pathname, description = imp.find_module(module_name, ['modules'])

        # print fp, pathname, description

        m = None

        try:
            m = imp.load_module(module_name, fp, pathname, description)

            self.modules[module_name] = m

            # print m
            # print getattr(m, '__all__', None)

            # for name in dir(m):
            #     print name

            for m2 in getattr(m, 'DEPENDS'):
                self.load_module(m2)

            if hasattr(m, 'CRONJOBS'):

                # update cronjobs in db

                old_cronjobs = set()
                for cronjob in self.session.query(model.Cronjob).filter(
                        model.Cronjob.module == module_name):
                    old_cronjobs.add(cronjob.name)

                new_cronjobs = set()
                for name, interval, f in getattr(m, 'CRONJOBS'):

                    logging.debug('registering cronjob %s' % name)

                    cj = self.session.query(model.Cronjob).filter(
                        model.Cronjob.module == module_name,
                        model.Cronjob.name == name).first()
                    if not cj:
                        cj = model.Cronjob(module=module_name,
                                           name=name,
                                           last_run=0)
                        self.session.add(cj)

                    cj.interval = interval
                    new_cronjobs.add(cj.name)

                for cjn in old_cronjobs:
                    if cjn in new_cronjobs:
                        continue
                    self.session.query(model.Cronjob).filter(
                        model.Cronjob.module == module_name,
                        model.Cronjob.name == cjn).delete()

                self.session.commit()

        except:
            logging.error(traceback.format_exc())

        finally:
            # Since we may exit via an exception, close fp explicitly.
            if fp:
                fp.close()

        return m

    def _module_graph_name(self, module_name):
        return GRAPH_PREFIX + module_name

    def import_kb(self, module_name):

        graph = self._module_graph_name(module_name)

        self.kb.register_graph(graph)

        self.kb.clear_graph(graph)

        m = self.modules[module_name]

        for kb_entry in getattr(m, 'KB_SOURCES'):

            if isinstance(kb_entry, basestring):

                kb_pathname = 'modules/%s/%s' % (module_name, kb_entry)

                logging.info('importing %s ...' % kb_pathname)

                self.kb.parse_file(graph, 'n3', kb_pathname)

            else:

                endpoint, nodes = kb_entry

                logging.info('importing nodes from %s...' % endpoint)

                for node in nodes:

                    logging.debug('importing %s from %s...' % (node, endpoint))

                    query = u"""
                             CONSTRUCT {
                                %s ?r ?n .
                             }
                             WHERE {
                                %s ?r ?n .
                             }
                             """ % (node, node)

                    res = self.kb.remote_sparql(endpoint,
                                                query,
                                                response_format='text/n3')

                    logging.debug("importing %s ?r ?n from %s: %d bytes." %
                                  (node, endpoint, len(res.text)))

                    self.kb.parse(context=graph, format='n3', data=res.text)

                    query = u"""
                             CONSTRUCT {
                                ?n ?r %s .
                             }
                             WHERE {
                                ?n ?r %s .
                             }
                             """ % (node, node)

                    res = self.kb.remote_sparql(endpoint,
                                                query,
                                                response_format='text/n3')
                    logging.debug("importing ?n ?r %s from %s: %d bytes" %
                                  (node, endpoint, len(res.text)))

                    self.kb.parse(context=graph, format='n3', data=res.text)

    def import_kb_multi(self, module_names):

        for module_name in module_names:

            if module_name == 'all':

                for mn2 in self.all_modules:
                    self.load_module(mn2)
                    self.import_kb(mn2)

            else:

                self.load_module(module_name)

                self.import_kb(module_name)

        self.session.commit()

    def compile_module(self,
                       module_name,
                       trace=False,
                       run_tests=False,
                       print_utterances=False):

        m = self.modules[module_name]

        self.db.clear_module(module_name)
        self.session.query(model.Discourse).filter(
            model.Discourse.module == module_name).delete()

        compiler = PrologCompiler(self.session, trace, run_tests,
                                  print_utterances)

        for pl_fn in getattr(m, 'PL_SOURCES'):

            pl_pathname = 'modules/%s/%s' % (module_name, pl_fn)

            compiler.do_compile(pl_pathname, module_name)

    def compile_module_multi(self,
                             module_names,
                             run_trace=False,
                             run_tests=False,
                             print_utterances=False):

        for module_name in module_names:

            if module_name == 'all':

                for mn2 in self.all_modules:
                    self.load_module(mn2)
                    self.compile_module(mn2, run_trace, run_tests,
                                        print_utterances)

            else:
                self.load_module(module_name)
                self.compile_module(module_name, run_trace, run_tests,
                                    print_utterances)

        self.session.commit()

    def run_cronjobs(self, module_name, force=False):

        m = self.modules[module_name]
        if not hasattr(m, 'CRONJOBS'):
            return

        graph = self._module_graph_name(module_name)

        self.kb.register_graph(graph)

        for name, interval, f in getattr(m, 'CRONJOBS'):

            cronjob = self.session.query(model.Cronjob).filter(
                model.Cronjob.module == module_name,
                model.Cronjob.name == name).first()

            t = time.time()

            next_run = cronjob.last_run + interval

            if force or t > next_run:

                logging.debug('running cronjob %s' % name)
                f(self.config, self.kb)

                cronjob.last_run = t

    def run_cronjobs_multi(self, module_names, force):

        for module_name in module_names:

            if module_name == 'all':

                for mn2 in self.all_modules:
                    self.load_module(mn2)
                    self.run_cronjobs(mn2, force=force)

            else:
                self.load_module(module_name)
                self.run_cronjobs(module_name, force=force)

        self.session.commit()

    def train(self, num_steps):

        self.setup_tf_model(False, False)
        self.nlp_model.train(num_steps)

    def process_line(self, line):

        self.setup_tf_model(True, True)
        from nlp_model import BUCKETS

        x = self.nlp_model.compute_x(line)

        logging.debug("x: %s -> %s" % (line, x))

        # which bucket does it belong to?
        bucket_id = min(
            [b for b in xrange(len(BUCKETS)) if BUCKETS[b][0] > len(x)])

        # get a 1-element batch to feed the sentence to the model
        encoder_inputs, decoder_inputs, target_weights = self.tf_model.get_batch(
            {bucket_id: [(x, [])]}, bucket_id)

        # print "encoder_inputs, decoder_inputs, target_weights", encoder_inputs, decoder_inputs, target_weights

        # get output logits for the sentence
        _, _, output_logits = self.tf_model.step(self.tf_session,
                                                 encoder_inputs,
                                                 decoder_inputs,
                                                 target_weights, bucket_id,
                                                 True)

        logging.debug("output_logits: %s" % repr(output_logits))

        # this is a greedy decoder - outputs are just argmaxes of output_logits.
        outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]

        # print "outputs", outputs

        preds = map(lambda o: self.inv_output_dict[o], outputs)
        logging.debug("preds: %s" % repr(preds))

        prolog_s = ''

        for p in preds:

            if p[0] == '_':
                continue  # skip _EOS

            if len(prolog_s) > 0:
                prolog_s += ', '
            prolog_s += p

        logging.debug('?- %s' % prolog_s)

        try:
            c = self.parser.parse_line_clause_body(prolog_s)
            logging.debug("Parse result: %s" % c)

            self.prolog_engine.reset_utterances()
            self.prolog_engine.reset_actions()

            self.prolog_engine.search(c)

            utts = self.prolog_engine.get_utterances()
            actions = self.prolog_engine.get_actions()

            return utts, actions

        except PrologError as e:

            logging.error("*** ERROR: %s" % e)

        return [], []
Beispiel #7
0
print repr(dictionary)

model = nlp_model.create_keras_model()

model.load_weights(KERAS_WEIGHTS_FN)

#
# main
#

db = LogicDB(session)

engine = PrologEngine(db)
prolog_builtins.register_builtins(engine)

parser = PrologParser()

while True:

    line = raw_input('nlp> ')

    if line == 'quit' or line == 'exit':
        break

    try:

        x = nlp_model.compute_x(line)

        print 'x:', x

        batch_x = np.zeros([0, max_len], np.int32)
Beispiel #8
0
    def do_compile(self, pl_fn, module_name):

        # quick source line count for progress output below

        linecnt = 0
        with codecs.open(pl_fn, encoding='utf-8', errors='ignore',
                         mode='r') as f:
            while f.readline():
                linecnt += 1
        logging.info("%s: %d lines." % (pl_fn, linecnt))

        # setup compiler / test environment

        self.db = LogicDB(self.session)

        parser = PrologParser()

        self.macro_engine = NLPMacroEngine()

        self.nlp_test_engine = PrologAIEngine(self.db)
        self.nlp_test_engine.set_trace(self.trace)
        self.nlp_test_engine.set_context_name('test')

        with codecs.open(pl_fn, encoding='utf-8', errors='ignore',
                         mode='r') as f:
            parser.start(f, pl_fn)

            while parser.cur_sym != SYM_EOF:
                clauses = parser.clause()

                for clause in clauses:
                    logging.debug(u"%7d / %7d (%3d%%) > %s" %
                                  (parser.cur_line, linecnt, parser.cur_line *
                                   100 / linecnt, unicode(clause)))

                    # compiler directive?

                    if clause.head.name == 'nlp_macro':
                        self.nlp_macro(clause)

                    elif clause.head.name == 'nlp_gen':
                        self.nlp_gen(module_name, clause)

                    elif clause.head.name == 'nlp_test':
                        if self.run_tests:
                            self.nlp_test(clause)

                    elif clause.head.name == 'set_context_default':
                        self.set_context_default(clause)

                    else:
                        self.db.store(module_name, clause)

                if parser.comment_pred:

                    self.db.store_doc(module_name, parser.comment_pred,
                                      parser.comment)

                    parser.comment_pred = None
                    parser.comment = ''

        logging.info("Compilation succeeded.")
Beispiel #9
0
    def nlp_test(self, clause):

        args = clause.head.args

        lang = args[0].name

        # extract test rounds, look up matching discourses

        rounds = []  # [ (in, out, actions), ...]
        round_num = 0
        discourse_ids = set()

        for ivr in args[1:]:

            if ivr.name != 'ivr':
                raise PrologError('nlp_test: ivr predicate args expected.')

            test_in = ''
            test_out = ''
            test_actions = []

            for e in ivr.args:

                if e.name == 'in':
                    test_in = ' '.join(tokenize(e.args[0].s, lang))
                elif e.name == 'out':
                    test_out = ' '.join(tokenize(e.args[0].s, lang))
                elif e.name == 'action':
                    test_actions.append(e.args)
                else:
                    raise PrologError(
                        u'nlp_test: ivr predicate: unexpected arg: ' +
                        unicode(e))

            rounds.append((test_in, test_out, test_actions))

            # look up matching discourse_ids:

            d_ids = set()

            for dr in self.session.query(model.DiscourseRound).filter(model.DiscourseRound.inp_tokenized==test_in) \
                                                              .filter(model.DiscourseRound.round_num==round_num).all():
                d_ids.add(dr.discourse_id)

            if round_num == 0:
                discourse_ids = d_ids
            else:
                discourse_ids = discourse_ids & d_ids

            # print 'discourse_ids:', repr(discourse_ids)

            round_num += 1

        if len(discourse_ids) == 0:
            raise PrologError('nlp_test: %s: no matching discourse found.' %
                              clause.location)

        nlp_test_parser = PrologParser()

        # run the test(s): look up reaction to input in db, execute it, check result
        for did in discourse_ids:
            self.nlp_test_engine.reset_context()

            round_num = 0
            for dr in self.session.query(model.DiscourseRound).filter(model.DiscourseRound.discourse_id==did) \
                                                              .order_by(model.DiscourseRound.round_num):

                prolog_s = ','.join(dr.resp.split(';'))

                logging.info(
                    "nlp_test: %s round=%3d, %s => %s" %
                    (clause.location, round_num, dr.inp_tokenized, prolog_s))

                c = nlp_test_parser.parse_line_clause_body(prolog_s)
                # logging.debug( "Parse result: %s" % c)

                # logging.debug( "Searching for c: %s" % c )

                self.nlp_test_engine.reset_utterances()
                self.nlp_test_engine.reset_actions()
                solutions = self.nlp_test_engine.search(c)

                if len(solutions) == 0:
                    raise PrologError('nlp_test: %s no solution found.' %
                                      clause.location)

                # print "round %d utterances: %s" % (round_num, repr(nlp_test_engine.get_utterances()))

                # check actual utterances vs expected one

                test_in, test_out, test_actions = rounds[round_num]

                utterance_matched = False
                actual_out = ''
                utts = self.nlp_test_engine.get_utterances()

                if len(utts) > 0:
                    for utt in utts:
                        actual_out = ' '.join(
                            tokenize(utt['utterance'], utt['lang']))
                        if actual_out == test_out:
                            utterance_matched = True
                            break
                else:
                    utterance_matched = len(test_out) == 0

                if utterance_matched:
                    if len(utts) > 0:
                        logging.info(
                            "nlp_test: %s round=%3d *** UTTERANCE MATCHED!" %
                            (clause.location, round_num))
                else:
                    raise PrologError(
                        u'nlp_test: %s round=%3d actual utterance \'%s\' did not match expected utterance \'%s\'.'
                        % (clause.location, round_num, actual_out, test_out))

                # check actions

                if len(test_actions) > 0:

                    # print repr(test_actions)

                    actions_matched = True
                    acts = self.nlp_test_engine.get_actions()
                    for action in test_actions:
                        for act in acts:
                            # print "    check action match: %s vs %s" % (repr(action), repr(act))
                            if action == act:
                                break
                        if action != act:
                            actions_matched = False
                            break

                    if actions_matched:
                        logging.info(
                            "nlp_test: %s round=%3d *** ACTIONS MATCHED!" %
                            (clause.location, round_num))

                    else:
                        raise PrologError(
                            u'nlp_test: %s round=%3d ACTIONS MISMATCH.' %
                            (clause.location, round_num))

                round_num += 1
Beispiel #10
0
class Interpreter:
    def __init__(self):
        pass

    '''
	Problem 1
	occurs_check (v, t) where v is of type Variable, t is of type Term.
	occurs_check (v, t) returns true if the Prolog Variable v occurs in t.
	Please see the lecture note Control in Prolog to revisit the concept of
	occurs-check.
	'''

    def occurs_check(self, v: Variable, t: Term) -> bool:
        pass

    # assert (occurs_check (var "E") (func "cons" [const "a"; const "b"; var "E"]))
    def test_occurs_check(self):
        v = Variable("E")
        t = Function("cons", [Atom("a"), Atom("b"), Variable("E")])
        #assert
        (self.occurs_check(v, t))

    '''
	Problem 2
	variables_of_term (t) where t is of type Term.
	variables_of_clause (c) where c is of type Rule.

	The function should return the Variables contained in a term or a rule
	using Python set.

	The result must be saved in a Python set. The type of each element (a Prolog Variable)
	in the set is Variable.
	'''

    def variables_of_term(self, t: Term) -> set:
        pass

    def variables_of_clause(self, c: Rule) -> set:
        pass

    # The variables in a function f (X, Y, a) is [X; Y]
    def test_variables_of_term(self):
        t = Function("f", [Variable("X"), Variable("Y"), Atom("a")])
        #assert
        (self.variables_of_term(t) == set([Variable("X"), Variable("Y")]))

    # The variables in a Prolog rule p (X, Y, a) :- q (a, b, a) is [X; Y]
    def test_variables_of_clause(self):
        c = Rule(
            Function("p",
                     [Variable("X"), Variable("Y"),
                      Atom("a")]),
            RuleBody([Function(
                "q", [Atom("a"), Atom("b"), Atom("a")])]))
        #assert
        (self.variables_of_clause(c) == set([Variable("X"), Variable("Y")]))

    '''
	Problem 3
	substitute_in_term (s, t) where s is of type dictionary and t is of type Term
	substitute_in_clause (s, t) where s is of type dictionary and c is of type Rule,

	The value of type dict should be a Python dictionary whose key is of type Variable
	and value is of type Term in general. It is a map from variables to terms.

	The function should return t_ obtained by applying substitution s to t.

	Use Python dictionary to represent a subsititution map.
	'''

    def substitute_in_term(self, s: dict, t: Term) -> Term:
        pass

    def substitute_in_clause(self, s: dict, c: Rule) -> Rule:
        pass

    # Function substitution - f (X, Y, a) [Y/0, X/Y] = f (Y, 0, a)
    def test_substitute_in_term(self):
        s = {Variable("Y"): Number(0), Variable("X"): Variable("Y")}
        t = Function("f", [Variable("X"), Variable("Y"), Atom("a")])
        t_ = Function("f", [Variable("Y"), Number(0), Atom("a")])
        #assert
        (self.substitute_in_term(s, t) == t_)

    # Given a Prolog rule, p (X, Y, a) :- q (a, b, a), after doing substitution [Y/0, X/Y],
    #            we have p (Y, 0, a) :- q (a, b, a)
    def test_substitute_in_clause(self):
        s = {Variable("Y"): Number(0), Variable("X"): Variable("Y")}
        p = Function("p", [Variable("X"), Variable("Y"), Atom("a")])
        q = Function("q", [Atom("a"), Atom("b"), Atom("a")])
        p_ = Function("p", [Variable("Y"), Number(0), Atom("a")])
        q_ = Function("q", [Atom("a"), Atom("b"), Atom("a")])
        r = Rule(p, [q])
        r_ = Rule(p_, [q_])
        #assert
        (self.substitute_in_clause(s, r) == r_)

    '''
	Problem 4
	unify (t1, t2) where t1 is of type term and t2 is of type Term
	The function should return a substitution map of type dict,
	which is a unifier of the given terms. You may find the pseudocode
	of unify in the lecture note Control in Prolog useful.

	The function should raise the exception raise Not_unfifiable (),
	if the given terms are not unifiable.

	Use Python dictionary to represent a subsititution map.
	'''

    def unify(self, t1: Term, t2: Term) -> dict:
        pass

    def test_unify(self):
        t1 = Function("f", [Variable("X"), Variable("Y"), Variable("Y")])
        t2 = Function("f", [Variable("Y"), Variable("Z"), Atom("a")])
        u = {
            Variable("X"): Atom("a"),
            Variable("Y"): Atom("a"),
            Variable("Z"): Atom("a")
        }
        #assert
        (self.unify(t1, t2) == u)

    '''
	Problem 5
	Following the pseudocode Abstract interpreter in the lecture note Control in Prolog to implement
	a nondeterministic Prolog interpreter.

	nondet_query (program, goal) where
		the first argument is the program which is a list of Rules
		the second argument is the goal of type RuleBody (a conjunction of Functions/Terms).

	The function returns a list of Terms (results), which is an instance of the original goal and is
	a logical consequence of the program. See tests cases for expected results.
	'''

    def nondet_query(self, program: List[Rule], goal: RuleBody) -> RuleBody:
        pass

    # This test is different than above and that in OCaml in the sense that
    # 	(1) You can read a program from file:
    #	(2) You can input a query and see the result:
    def test_nondet_query(self, input_file):
        program_parser = PrologParser()
        self.request_parser = PrologParser('request')
        program = program_parser.parse(read(input_file))
        # You can print the input prolog program
        print(f'Print Input Program:')
        for rule in program:
            print(f'rule: {str(rule)}')

        while True:
            prefix = '?- '
            try:
                str_request = input(prefix)
            except KeyboardInterrupt:
                break
            if not str_request:
                break
            try:
                goal = self.request_parser.parse(prefix + str_request)
            except lark.exceptions.LarkError:
                print('Error : invalid syntax')
                continue
            try:
                # You can print the input goal
                print(f'Print Input Goal:')
                print(f'Query: {goal.body}')

                goal_ = self.nondet_query(program, goal)
                # You can print the solution goal_ here.
                print(f'Solution: {goal_}')
            except RecursionError:
                print('Error : stack overflow')
Beispiel #11
0
    def __init__(self, session):

        self.session = session
        self.parser = PrologParser()
Beispiel #12
0
class LogicDB(object):
    def __init__(self, session):

        self.session = session
        self.parser = PrologParser()

    def clear_module(self, module):

        logging.info("Clearing %s ..." % module)
        self.session.query(
            model.ORMClause).filter(model.ORMClause.module == module).delete()
        self.session.query(model.ORMPredicateDoc).filter(
            model.ORMPredicateDoc.module == module).delete()
        logging.info("Clearing %s ... done." % module)

    def clear_all_modules(self):

        logging.info("Clearing all modules ...")
        self.session.query(model.ORMClause).delete()
        self.session.query(model.ORMPredicateDoc).delete()
        self.session.query(model.Context).delete()
        logging.info("Clearing all modules ... done.")

    def store(self, module, clause):

        ormc = model.ORMClause(module=module,
                               arity=len(clause.head.args),
                               head=clause.head.name,
                               prolog=unicode(clause))

        # print unicode(clause)

        self.session.add(ormc)

    def store_doc(self, module, name, doc):

        ormd = model.ORMPredicateDoc(module=module, name=name, doc=doc)
        self.session.add(ormd)

    def lookup(self, name):

        # FIXME: caching ?

        # if name in self.clauses:
        #     return self.clauses[name]

        res = []

        for ormc in self.session.query(
                model.ORMClause).filter(model.ORMClause.head == name).all():

            for c in self.parser.parse_line_clauses(ormc.prolog):
                res.append(c)

        return res

    #
    # manage stored contexts in db
    #

    def read_context(self, name, key):

        ctx = self.session.query(model.Context).filter(
            model.Context.name == name, model.Context.key == key).first()
        if not ctx:
            return None

        return self.parser.parse_line_clause_body(ctx.value)

    def write_context(self, name, key, value):

        v = unicode(value)

        ctx = self.session.query(model.Context).filter(
            model.Context.name == name, model.Context.key == key).first()
        if not ctx:
            ctx = model.Context(name=name, key=key, value=v, default_value=v)
            self.session.add(ctx)
        else:
            ctx.value = v

    def set_context_default(self, name, key, value):

        ctx = self.session.query(model.Context).filter(
            model.Context.name == name, model.Context.key == key).first()

        if not ctx:
            ctx = model.Context(name=name,
                                key=key,
                                value=value,
                                default_value=value)
            self.session.add(ctx)
        else:
            ctx.default_value = value

    def reset_context(self, name):

        for ctx in self.session.query(
                model.Context).filter(model.Context.name == name).all():
            ctx.value = ctx.default_value