Beispiel #1
0
def test():
    gspec = AbstractGuesser.list_enabled_guessers()[0]
    guesser_dir = AbstractGuesser.output_path(gspec.guesser_module,
            gspec.guesser_class, '')
    guesser = ElasticSearchWikidataGuesser.load(guesser_dir)

    torch.cuda.set_device(0)
    predictor = Predictor()
    predictor.cuda()

    dataset = BonusPairsDataset()
    examples = [x for x in dataset.examples if x['start'] != -1]
    
    guesses = []
    for example in tqdm(examples):
        document = example['content']
        question = example['query']
        answer = example['answer']
        predictions = predictor.predict(document, question, top_n=1)
        prediction = predictions[0][0]

        gs = guesser.guess_single(example['query'])
        gs = sorted(gs.items(), key=lambda x: x[1])[::-1]
        guess = gs[0][0].replace('_', ' ')

        guesses.append((prediction, guess, example['answer']))

    with open('results.pkl', 'wb') as f:
        pickle.dump(guesses, f)
Beispiel #2
0
def process(document, question, candidates=None,top_n=1):
    predictor = Predictor(None,'spacy',num_workers=0,normalize=True)
    predictions = predictor.predict(document,question,candidates,top_n)
    #table = prettytable.PrettyTable(['Rank','Span','Score'])
    val = []
    for i,p in enumerate(predictions,1):
        val.append(p[0])
    return val[0]
Beispiel #3
0
def process():
    data = request.json
    document = data['document']
    #print(document)
    question = data['question']
    #print(question)
    predictor = Predictor(None, 'spacy', num_workers=0, normalize=True)
    predictions = predictor.predict(document, question, None, 1)
    val = []
    for i, p in enumerate(predictions, 1):
        val.append(p[0])
    return jsonify(val)