Пример #1
0
def add_character_info(character_name: str, room_name: str, is_alive: bool, has_marks_on_neck: bool):
    if is_alive:
        s = [character_name + ' est vivant']
        print(s[0])
        a.add_any_clause(getStringResults(nltk.interpret_sents(s, '635/personne_vivant.fcfg')))
    else:
        s = [character_name + ' est morte']
        print(s[0])
        a.add_any_clause(getStringResults(nltk.interpret_sents(s, '635/personne_morte.fcfg')))

    if has_marks_on_neck:
        s = [character_name + ' a des marques au cou']
        print(s[0])
        a.add_any_clause(getStringResults(nltk.interpret_sents(s, '635/personne_marque.fcfg')))

    s = [character_name + ' est dans le ' + room_name]
    print(s[0])
    a.add_any_clause(getStringResults(nltk.interpret_sents(s, '635/personne_piece.fcfg')))
Пример #2
0
def sem_parser(sents, syntax, verbose=False, is_cs=False):
    """
    It parses sentences with an FDFG grammar and returns a dictionary of 
    sentences to their semantic representations.
    
    Parameters:
    sents: a list of sentences to be parsed.
    fcfg_string: a string listing all fcfg rules with SEM for the 
                 FeatureGrammar.
    verbose: boolean value. default value is `False`. 
             if verbose is True it prints results.
    is_cs: boolean value. Inicating if it is using Cooper Storage. Default value is `False`. 
    Returns:
    dict: dictionary of sentences translated to a list of their 
          semantic representaions.
    """
    sents_reps = {sent: [] for sent in sents}
    for sent, results in zip(sents, nltk.interpret_sents(sents, syntax)):
        if verbose:
            display(Markdown(f"----\n{sent}: {len(results)} result(s)"))
        for j, (synrep, semrep) in enumerate(results):
            if is_cs:
                cs_semrep = cs.CooperStore(semrep)
                cs_semrep.s_retrieve(trace=False)
                for reading in cs_semrep.readings:
                    sents_reps[sent].append(reading)
                    if verbose:
                        display_latex(
                            reading)  # prints the SEM feature of a tree
                if verbose:
                    display_tree(synrep)  # show the parse tree
            else:
                sents_reps[sent].append(semrep)
                if verbose:
                    display_latex(semrep)  # prints the SEM feature of a tree
                    display_tree(synrep)  # show the parse tree
    return sents_reps
Пример #3
0

#example4('Jean tua Marie avec une corde dans la cuisine')


def example5(s1):
    ep = load_parser('635/quant01.fcfg')
    for tree in ep.parse(s1.split()):
        tree.draw()


#example5('tous les cochons sont gros')

sents1 = ['La victime a été tuée à 22h']
sents2 = ['Jean tua Marie avec une corde']
sents3 = ['une corde tua Marie avec une corde dans la cuisine']
sents4 = ['tous les cochons sont gros']


def printResults(results):
    for result in results:
        for (synrep, semrep) in result:
            print(semrep)


printResults(nltk.interpret_sents(sents1, 'heure_crime.fcfg'))
#printResults(nltk.interpret_sents(sents2, '635/sem02app.fcfg'))
#printResults(nltk.interpret_sents(sents3, '635/sem03app.fcfg'))
#printResults(nltk.interpret_sents(sents3, '635/sem04.fcfg'))
#printResults(nltk.interpret_sents(sents4, '635/quant01.fcfg'))
Пример #4
0
    def to_fol(self, fact, grammar):

        results = nltk.interpret_sents([fact,], grammar)
        sent = self.results_as_string(results)
        return sent
Пример #5
0
print(expr.simplify())  # β-约简

# simple-sem.fcfg包含了一个用于分析和翻译简单例子的小型规则集合
from nltk import load_parser

parser = load_parser('grammars/book_grammars/simple-sem.fcfg', trace=0)
parser = load_parser('grammars/book_grammars/simple-sem.fcfg', trace=2)
sentence = 'Angus gives a bone to every dog'
tokens = sentence.split()
for tree in parser.parse(tokens):
    print(tree.label()['SEM'])

# interpret_sents() 用于批量地解释输入的语句列表。输出的是语法表达(synrep)和语义表达(semrep)
sents = ['Irene walks', 'Cyril bites an ankle']
grammar_file = 'grammars/book_grammars/simple-sem.fcfg'
for results in nltk.interpret_sents(sents, grammar_file):
    for (synrep, semrep) in results:
        print("语法表达:")
        print(synrep)
        print("语义表达:")
        print(semrep)

# evaluate_sents() 用于批量地评估输入的语句列表,输出的是语法表达(synrep)、语义表达(semrep)和真值(value)
v = """
bertie=>b
olive=>o
cyril=>c
boy=>{b}
girl=>{o}
dog=>{c}
walk=>{o,c}
Пример #6
0
 def to_fol(self, fact, grammar):
     sent = self.results_as_string(nltk.interpret_sents(fact, grammar))
     # print(sent)
     return sent
Пример #7
0
 def who_was_with_who(self, sentence):
     result = str(nltk.interpret_sents(sentence, self.was_with_who_grammar)[0][0][1])
     return self.parse_result_was_with(result)
Пример #8
0
 def where_in_room_in_hour(self, sentence):
     result = str(nltk.interpret_sents(sentence, self._location_hour_grammar_file)[0][0][1])
     return self.parse_result_room_hour(result)
Пример #9
0
 def what_is_the_crime_hour(self, sentence):
     return 'H' + str(nltk.interpret_sents(sentence, self._crime_hour_grammar_file)[0][0][1])
Пример #10
0
 def where_is_person(self, sentence):
     result = str(nltk.interpret_sents(sentence, self._location_person_grammar)[0][0][1])
     return self.parse_result_person_location(result)
Пример #11
0
 def where_am_i(self, sentence):
     return str(nltk.interpret_sents(sentence, self._location_grammar_file)[0][0][1])
Пример #12
0
 def who_is_the_victime(self, sentence):
     return str(nltk.interpret_sents(sentence, self._victim_grammar_file)[0][0][1])
Пример #13
0
def add_weapon_found_fact(weapon_name: str, room_name: str):
    s = ['Le ' + weapon_name + ' est dans le ' + room_name]
    print(s[0])
    a.add_any_clause(getStringResults(nltk.interpret_sents(s, '635/arme_piece.fcfg')))
Пример #14
0
def add_hour_of_death(victim_name: str, hour_of_death: int):
    s = [victim_name + ' est morte à ' + str(hour_of_death) + 'h']
    print(s[0])
    a.add_any_clause(getStringResults(nltk.interpret_sents(s, '635/personne_morte_heure.fcfg')))
    a.add_any_clause('HeureCrimePlusOne({})'.format(hour_of_death + 1))
Пример #15
0
def add_room_after_crime(character_name: str, une_heure_apres: int, room_after_crime: str):
    s = [character_name + ' était dans le ' + room_after_crime + ' à ' + str(une_heure_apres) + 'h']
    print(s[0])
    a.add_any_clause(getStringResults(nltk.interpret_sents(s, '635/personne_piece_heure.fcfg')))
Пример #16
0
from src import inference
import nltk

a = inference.InferenceCrime()


def getStringResults(results):
    res = ''
    for result in results:
        for (synrep, semrep) in result:
            res += str(semrep)
    return res


sents1 = ['Scarlet est morte']
a.add_any_clause(getStringResults(nltk.interpret_sents(sents1, '635/personne_morte.fcfg')))

sents2 = ['Mustard est vivant']
a.add_any_clause(getStringResults(nltk.interpret_sents(sents2, '635/personne_vivant.fcfg')))

sents3 = ['Peacock est vivant']
a.add_any_clause(getStringResults(nltk.interpret_sents(sents3, '635/personne_vivant.fcfg')))

sents4 = ['Plum est vivant']
a.add_any_clause(getStringResults(nltk.interpret_sents(sents4, '635/personne_vivant.fcfg')))

sents5 = ['White est vivant']
a.add_any_clause(getStringResults(nltk.interpret_sents(sents5, '635/personne_vivant.fcfg')))

#####   step 1 : Bureau