Beispiel #1
0
def start_inference(question):

    global inference_helper, inference_object

    inference_object = do_start_inference(out_dir, hparams)

    inference_helper = lambda question: do_inference(question, *inference_object)

    if preprocessing['use_bpe']:
        apply_bpe_load()

    return inference_helper(question)
Beispiel #2
0
def start_inference(question):

    global inference_helper, inference_object

    # Set global tuple with model, flags and hparams
    inference_object = setup_inference_parameters(out_dir, hparams)

    # Update inference_helper with actual inference function as we have completed inference parameter settings
    inference_helper = lambda question: do_inference(question, *
                                                     inference_object)

    # Load BPE join pairs
    if preprocessing['use_bpe']: apply_bpe_load()

    # Finally start inference
    return inference_helper(question)
Beispiel #3
0
def start_inference(question):
    global inference_helper, inference_object

    # Start inference, set global tuple with model, flags and hparams
    inference_object = do_start_inference(out_dir, hparams)

    # First inference() call calls that method
    # Now we have everything running, so replace inference() with actual function call
    inference_helper = lambda question: do_inference(question, *inference_object)

    # Load BPE join pairs
    if preprocessing['use_bpe']:
        apply_bpe_load()

    # Rerun inference() call
    return inference_helper(question)