def check_consistency(self, assumptions, verbose=False):
        # Set up two thread, Prover and ModelBuilder to run in parallel
        prover = inference.get_prover(assumptions=assumptions)
        model_builder = inference.get_model_builder(assumptions=assumptions)
        
        prover_result = [None]
        prover_thread = ProverThread(prover, prover_result, verbose)
        model_builder_result = [None]
        model_builder_thread = ModelBuilderThread(model_builder, model_builder_result, verbose)
        
        prover_thread.start()
        model_builder_thread.start()
        
        while not prover_result[0] and not model_builder_result[0]:
            # wait until either the prover or the model builder is done
            pass
    
        if prover_result[0]:
            consistency = prover_result[0]
        else:
            consistency = model_builder_result[0]

        return (consistency == 'consistent')
def demo_inference_tagger(verbose=False):
    tagger = RTEInferenceTagger()
    
    text = 'John see a car'
    print 'Text: ', text
    hyp = 'John watch an auto'
    print 'Hyp:  ', hyp

#    text_ex = logic.LogicParser().parse('some e x y.((david x) and ((own e) and ((subj e x) and ((obj e y) and (car y)))))))')
#    hyp_ex = logic.LogicParser().parse('some e x y.((david x) and ((have e) and ((subj e x) and ((obj e y) and (auto y)))))))')

    text_drs_list = drt_glue.parse_to_meaning(text, dependency=True, verbose=verbose)
    if text_drs_list:
        text_ex = text_drs_list[0].simplify().toFol()
    else:
        print 'ERROR: No readings were be generated for the Text'
    
    hyp_drs_list  = drt_glue.parse_to_meaning(hyp, dependency=True, verbose=verbose)
    if hyp_drs_list:
        hyp_ex = hyp_drs_list[0].simplify().toFol()
    else:
        print 'ERROR: No readings were be generated for the Hypothesis'

    print 'Text: ', text_ex
    print 'Hyp:  ', hyp_ex
    print ''

    #1. proof T -> H
    #2. proof (BK & T) -> H
    #3. proof :(BK & T)
    #4. proof :(BK & T & H)
    #5. satisfy BK & T
    #6. satisfy BK & T & H
        
    result = inference.get_prover(hyp_ex, [text_ex]).prove()
    print 'prove: T -> H: %s' % result
    if result:
        print 'Logical entailment\n'
    else:
        print 'No logical entailment\n'

    bk = tagger._generate_BK(text, hyp, verbose)
    bk_exs = [bk_pair[0] for bk_pair in bk]
    
    print 'Generated Background Knowledge:'
    for bk_ex in bk_exs:
        print bk_ex.infixify()
    print ''
        
    result = inference.get_prover(hyp_ex, [text_ex]+bk_exs).prove()
    print 'prove: (T & BK) -> H: %s' % result
    if result:
        print 'Logical entailment\n'
    else:
        print 'No logical entailment\n'

    # Check if the background knowledge axioms are inconsistant
    result = inference.get_prover(assumptions=bk_exs+[text_ex]).prove()
    print 'prove: (BK & T): %s' % result
    if result:
        print 'Inconsistency -> Entailment unknown\n'
    else:
        print 'No inconsistency\n'

    result = inference.get_prover(assumptions=bk_exs+[text_ex, hyp_ex]).prove()
    print 'prove: (BK & T & H): %s' % result
    if result:
        print 'Inconsistency -> Entailment unknown\n'
    else:
        print 'No inconsistency\n'

    result = inference.get_model_builder(assumptions=bk_exs+[text_ex]).build_model()
    print 'satisfy: (BK & T): %s' % result
    if result:
        print 'No inconsistency\n'
    else:
        print 'Inconsistency -> Entailment unknown\n'

    result = inference.get_model_builder(assumptions=bk_exs+[text_ex, hyp_ex]).build_model()
    print 'satisfy: (BK & T & H): %s' % result
    if result:
        print 'No inconsistency\n'
    else:
        print 'Inconsistency -> Entailment unknown\n'
 def _parallel_prove_satisfy(self, goal=None, assumptions=[]):
     prover = inference.get_prover(assumptions=bk_exs+[text_ex])
     model_builder = inference.get_model_builder(assumptions=bk_exs+[text_ex])
     
     inconsistent = ProverThread(prover).start()
     consistent = ModelBuilderThread(model_builder).start()