def tag_sentences(self, text, hyp, verbose=False): """ Tag a RTEPair as to whether the hypothesis can be inferred from the text. """ text_drs_list = drt_glue.parse_to_meaning(text, dependency=True) if text_drs_list: text_ex = text_drs_list[0].simplify().toFol() else: if verbose: print 'ERROR: No readings were be generated for the Text' hyp_drs_list = drt_glue.parse_to_meaning(hyp, dependency=True) if hyp_drs_list: hyp_ex = hyp_drs_list[0].simplify().toFol() else: if verbose: print 'ERROR: No readings were be generated for the Hypothesis' #1. proof T -> H #2. proof (BK & T) -> H #3. proof :(BK & T) #4. proof :(BK & T & H) #5. satisfy BK & T #6. satisfy BK & T & H result = inference.get_prover(hyp_ex, [text_ex]).prove() if verbose: print 'prove: T -> H: %s' % result if not result: bk = tagger._generate_BK(text, hyp, verbose) bk_exs = [bk_pair[0] for bk_pair in bk] if verbose: print 'Generated Background Knowledge:' for bk_ex in bk_exs: print bk_ex.infixify() result = inference.get_prover(hyp_ex, [text_ex]+bk_exs).prove() if verbose: print 'prove: (T & BK) -> H: %s' % result if not result: consistent = self.check_consistency(bk_exs+[text_ex]) if verbose: print 'consistency check: (BK & T): %s' % consistent if consistent: consistent = self.check_consistency(bk_exs+[text_ex, hyp_ex]) if verbose: print 'consistency check: (BK & T & H): %s' % consistent return result
def demo_drt_glue_remove_duplicates(show_example=-1): from nltk_contrib.gluesemantics import drt_glue examples = ['David sees Mary', 'David eats a sandwich', 'every man chases a dog', 'John chases himself', 'John likes a cat', 'John likes every cat', 'he likes a dog', 'a dog walks and he leaves'] example_num = 0 hit = False for sentence in examples: if example_num==show_example or show_example==-1: print '[[[Example %s]]] %s' % (example_num, sentence) readings = drt_glue.parse_to_meaning(sentence, True) for j in range(len(readings)): reading = readings[j].simplify().resolve_anaphora() print reading print '' hit = True example_num += 1 if not hit: print 'example not found'
def _exampleList_select(self, event): selection = self._exampleList.curselection() if len(selection) != 1: return self._curExample = int(selection[0]) example = self._examples[self._curExample] if example: self._exampleList.selection_clear(0, 'end') self._exampleList.selection_set(self._curExample) self._readings = drt_glue.parse_to_meaning(example, self._remove_duplicates) self._populate_readingListbox() self._drs = None self._redraw() else: # Reset the example selections. self._exampleList.selection_clear(0, 'end')
def _exampleList_select(self, event): selection = self._exampleList.curselection() if len(selection) != 1: return self._curExample = int(selection[0]) example = self._examples[self._curExample] if example: self._exampleList.selection_clear(0, 'end') self._exampleList.selection_set(self._curExample) self._readings = drt_glue.parse_to_meaning(example, self._remove_duplicates) self._populate_readingListbox() self._drs = None self._redraw() else: # Reset the example selections. self._exampleList.selection_clear(0, 'end')
def demo_inference_tagger(verbose=False): tagger = RTEInferenceTagger() text = 'John see a car' print 'Text: ', text hyp = 'John watch an auto' print 'Hyp: ', hyp # text_ex = logic.LogicParser().parse('some e x y.((david x) and ((own e) and ((subj e x) and ((obj e y) and (car y)))))))') # hyp_ex = logic.LogicParser().parse('some e x y.((david x) and ((have e) and ((subj e x) and ((obj e y) and (auto y)))))))') text_drs_list = drt_glue.parse_to_meaning(text, dependency=True, verbose=verbose) if text_drs_list: text_ex = text_drs_list[0].simplify().toFol() else: print 'ERROR: No readings were be generated for the Text' hyp_drs_list = drt_glue.parse_to_meaning(hyp, dependency=True, verbose=verbose) if hyp_drs_list: hyp_ex = hyp_drs_list[0].simplify().toFol() else: print 'ERROR: No readings were be generated for the Hypothesis' print 'Text: ', text_ex print 'Hyp: ', hyp_ex print '' #1. proof T -> H #2. proof (BK & T) -> H #3. proof :(BK & T) #4. proof :(BK & T & H) #5. satisfy BK & T #6. satisfy BK & T & H result = inference.get_prover(hyp_ex, [text_ex]).prove() print 'prove: T -> H: %s' % result if result: print 'Logical entailment\n' else: print 'No logical entailment\n' bk = tagger._generate_BK(text, hyp, verbose) bk_exs = [bk_pair[0] for bk_pair in bk] print 'Generated Background Knowledge:' for bk_ex in bk_exs: print bk_ex.infixify() print '' result = inference.get_prover(hyp_ex, [text_ex]+bk_exs).prove() print 'prove: (T & BK) -> H: %s' % result if result: print 'Logical entailment\n' else: print 'No logical entailment\n' # Check if the background knowledge axioms are inconsistant result = inference.get_prover(assumptions=bk_exs+[text_ex]).prove() print 'prove: (BK & T): %s' % result if result: print 'Inconsistency -> Entailment unknown\n' else: print 'No inconsistency\n' result = inference.get_prover(assumptions=bk_exs+[text_ex, hyp_ex]).prove() print 'prove: (BK & T & H): %s' % result if result: print 'Inconsistency -> Entailment unknown\n' else: print 'No inconsistency\n' result = inference.get_model_builder(assumptions=bk_exs+[text_ex]).build_model() print 'satisfy: (BK & T): %s' % result if result: print 'No inconsistency\n' else: print 'Inconsistency -> Entailment unknown\n' result = inference.get_model_builder(assumptions=bk_exs+[text_ex, hyp_ex]).build_model() print 'satisfy: (BK & T & H): %s' % result if result: print 'No inconsistency\n' else: print 'Inconsistency -> Entailment unknown\n'