Ejemplo n.º 1
0
    def _init_glue(self):
        tagger = RegexpTagger([
            ('^(David|Mary|John)$', 'NNP'),
            ('^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$',
             'VB'), ('^(go|order|vanish|find|approach)$', 'VB'),
            ('^(a)$', 'ex_quant'), ('^(every)$', 'univ_quant'),
            ('^(sandwich|man|dog|pizza|unicorn|cat|senator)$', 'NN'),
            ('^(big|gray|former)$', 'JJ'), ('^(him|himself)$', 'PRP')
        ])

        depparser = MaltParser(tagger=tagger)
        self._glue = DrtGlue(depparser=depparser, remove_duplicates=False)
Ejemplo n.º 2
0
 def __init__(self, semtype_file=None, remove_duplicates=False,
              depparser=None):
     """
     :param semtype_file: name of file where grammar can be loaded
     :param remove_duplicates: should duplicates be removed?
     :param depparser: the dependency parser
     """
     if semtype_file is None:
         semtype_file = 'drt_glue.semtype'
     self._glue = DrtGlue(semtype_file=semtype_file,
                          remove_duplicates=remove_duplicates,
                          depparser=depparser)
Ejemplo n.º 3
0
    def tag_sentences(self, text, hyp, verbose=False):
        """
        Tag a RTEPair as to whether the hypothesis can be inferred from the text.
        """
        glueclass = DrtGlue()
        text_drs_list = glueclass.parse_to_meaning(text)
        if text_drs_list:
            text_ex = text_drs_list[0].simplify().toFol()
        else:
            if verbose: print 'ERROR: No readings were generated for the Text'

        hyp_drs_list = glueclass.parse_to_meaning(hyp)
        if hyp_drs_list:
            hyp_ex = hyp_drs_list[0].simplify().toFol()
        else:
            if verbose:
                print 'ERROR: No readings were generated for the Hypothesis'

        #1. proof T -> H
        #2. proof (BK & T) -> H
        #3. proof :(BK & T)
        #4. proof :(BK & T & H)
        #5. satisfy BK & T
        #6. satisfy BK & T & H

        result = inference.Prover9().prove(hyp_ex, [text_ex])
        if verbose: print 'prove: T -> H: %s' % result

        if not result:
            bk = self._generate_BK(text, hyp, verbose)
            bk_exs = [bk_pair[0] for bk_pair in bk]

            if verbose:
                print 'Generated Background Knowledge:'
                for bk_ex in bk_exs:
                    print bk_ex

            result = inference.Prover9().prove(hyp_ex, [text_ex] + bk_exs)
            if verbose: print 'prove: (T & BK) -> H: %s' % result

            if not result:
                consistent = self.check_consistency(bk_exs + [text_ex])
                if verbose:
                    print 'consistency check: (BK & T): %s' % consistent

                if consistent:
                    consistent = self.check_consistency(bk_exs +
                                                        [text_ex, hyp_ex])
                    if verbose:
                        print 'consistency check: (BK & T & H): %s' % consistent

        return result
Ejemplo n.º 4
0
def demo_inference_tagger(verbose=False):
    tagger = RTEInferenceTagger()

    text = 'John see a car'
    print 'Text: ', text
    hyp = 'John watch an auto'
    print 'Hyp:  ', hyp

    #    text_ex = LogicParser().parse('exists e x y.(david(x) & own(e)  & subj(e,x) & obj(e,y) & car(y))')
    #    hyp_ex  = LogicParser().parse('exists e x y.(david(x) & have(e) & subj(e,x) & obj(e,y) & auto(y))')

    glueclass = DrtGlue(verbose=verbose)
    text_drs_list = glueclass.parse_to_meaning(text)
    if text_drs_list:
        text_ex = text_drs_list[0].simplify().toFol()
    else:
        print 'ERROR: No readings were be generated for the Text'

    hyp_drs_list = glueclass.parse_to_meaning(hyp)
    if hyp_drs_list:
        hyp_ex = hyp_drs_list[0].simplify().toFol()
    else:
        print 'ERROR: No readings were be generated for the Hypothesis'

    print 'Text: ', text_ex
    print 'Hyp:  ', hyp_ex
    print ''

    #1. proof T -> H
    #2. proof (BK & T) -> H
    #3. proof :(BK & T)
    #4. proof :(BK & T & H)
    #5. satisfy BK & T
    #6. satisfy BK & T & H

    result = inference.Prover9().prove(hyp_ex, [text_ex])
    print 'prove: T -> H: %s' % result
    if result:
        print 'Logical entailment\n'
    else:
        print 'No logical entailment\n'

    bk = tagger._generate_BK(text, hyp, verbose)
    bk_exs = [bk_pair[0] for bk_pair in bk]

    print 'Generated Background Knowledge:'
    for bk_ex in bk_exs:
        print bk_ex
    print ''

    result = inference.Prover9().prove(hyp_ex, [text_ex] + bk_exs)
    print 'prove: (T & BK) -> H: %s' % result
    if result:
        print 'Logical entailment\n'
    else:
        print 'No logical entailment\n'

    # Check if the background knowledge axioms are inconsistent
    result = inference.Prover9().prove(assumptions=bk_exs + [text_ex]).prove()
    print 'prove: (BK & T): %s' % result
    if result:
        print 'Inconsistency -> Entailment unknown\n'
    else:
        print 'No inconsistency\n'

    result = inference.Prover9().prove(assumptions=bk_exs + [text_ex, hyp_ex])
    print 'prove: (BK & T & H): %s' % result
    if result:
        print 'Inconsistency -> Entailment unknown\n'
    else:
        print 'No inconsistency\n'

    result = inference.Mace().build_model(assumptions=bk_exs + [text_ex])
    print 'satisfy: (BK & T): %s' % result
    if result:
        print 'No inconsistency\n'
    else:
        print 'Inconsistency -> Entailment unknown\n'

    result = inference.Mace().build_model(assumptions=bk_exs +
                                          [text_ex, hyp_ex]).build_model()
    print 'satisfy: (BK & T & H): %s' % result
    if result:
        print 'No inconsistency\n'
    else:
        print 'Inconsistency -> Entailment unknown\n'