コード例 #1
0
ファイル: interpreter.py プロジェクト: dbpedia/neural-qa
def interpret(input_dir, query):

    model_dir = input_dir
    model_dir += '/training_checkpoints'
    config = NeuralMT.load(input_dir)
    neural_mt = NeuralMT(config)
    checkpoint = neural_mt.checkpoint
    checkpoint.restore(tf.train.latest_checkpoint(model_dir)).expect_partial()

    finaltrans = "input query: \n"
    finaltrans += query

    finaltrans += "\n \n \n output query: \n"
    finaltranso = translate(query, input_dir, config, neural_mt)
    finaltrans += finaltranso

    finaltrans += '\n \n \n output query decoded: \n'
    finaltranso = decode(finaltranso)
    finaltranso = fix_URI(finaltranso)
    print('Decoded translation: {}'.format(finaltranso))
    finaltrans += finaltranso

    outputfile = open((input_dir + '/output_query.txt'), 'w', encoding="utf8")
    outputfile.writelines([finaltrans])
    outputfile.close()
コード例 #2
0
ファイル: interpreter.py プロジェクト: dbpedia/neural-qa
def interpreter(val):
    reload(sys)
    sys.setdefaultencoding("utf-8")
    encoded_sparql = val
    decoded_sparql = decode(encoded_sparql)
    decoded_sparql = fix_URI(decoded_sparql)
    return (decoded_sparql)
コード例 #3
0
def test_encoding():
    original = 'SELECT ?city WHERE { ?m skos:broader dbc:Cities_in_Germany . ?city dct:subject ?m . ?city dbo:areaTotal ?area . ?b dbo:artist dbr:John_Halsey_(musician) } order by asc (?area)'
    expected_encoding = 'SELECT var_city WHERE brack_open var_m skos_broader dbc_Cities_in_Germany sep_dot var_city dct_subject var_m sep_dot var_city dbo_areaTotal var_area sep_dot var_b dbo_artist dbr_John_Halsey_ attr_open musician attr_close  brack_close _oba_ var_area '

    result = generator_utils.encode(original)

    assert result == expected_encoding
    assert str.strip(generator_utils.decode(result)) == original
コード例 #4
0
def get_bot_response():
    userText = request.args.get('msg')
    finaltranso = brain(userText)
    finaltranso = decode(finaltranso)
    print("decoded : " + finaltranso)
    finaltranso = fix_URI(finaltranso)
    print("fixed uri : " + finaltranso)
    return finaltranso.replace('<', '&lt').replace('>', '&gt')
コード例 #5
0
ファイル: interpreter.py プロジェクト: chiennv2000/neural-qa
#!/usr/bin/env python
"""

Neural SPARQL Machines - Interpreter module.

'SPARQL as a Foreign Language' by Tommaso Soru and Edgard Marx et al., SEMANTiCS 2017
https://arxiv.org/abs/1708.07624

Version 1.0.0

"""
import sys
import re

from generator_utils import decode, fix_URI
import importlib

if __name__ == '__main__':
    importlib.reload(sys)
    encoded_sparql = sys.argv[1]
    decoded_sparql = decode(encoded_sparql)
    decoded_sparql = fix_URI(decoded_sparql)
    print(decoded_sparql)
コード例 #6
0
ファイル: interpreter.py プロジェクト: iteachmachines/NSpM
def translate(sentence, ou_dir):
    result, sentence, attention_plot = evaluate(sentence)

    print('Input: %s' % (sentence))
    print('Predicted translation: {}'.format(result))

    attention_plot = attention_plot[:len(result.split(' ')
                                         ), :len(sentence.split(' '))]
    plot_attention(attention_plot, sentence.split(' '), result.split(' '),
                   ou_dir)
    return result


inputs = args.inputstr
model_dir = input_dir
model_dir += '/training_checkpoints'
checkpoint.restore(tf.train.latest_checkpoint(model_dir))
finaltrans = "input qurey : \n"
finaltrans += inputs
finaltrans += "\n \n \n output qurey : \n"
finaltranso = translate(inputs, input_dir)
finaltrans += finaltranso
finaltrans += '\n \n \n output query decoded : \n'
finaltranso = decode(finaltranso)
finaltranso = fix_URI(finaltranso)
print('Decoded translation: {}'.format(finaltranso))
finaltrans += finaltranso
outputfile = open((input_dir + '/output_query.txt'), 'w', encoding="utf8")
outputfile.writelines([finaltrans])
outputfile.close()
コード例 #7
0
import re

from generator_utils import decode, fix_URI, query_dbpedia

if __name__ == '__main__':
    reload(sys)
    sys.setdefaultencoding("utf-8")

    # python2 interpreter.py ./fairseq/results/$1_$2.out.sys ./fairseq/results/$1_$2.out.ref 
    with open(sys.argv[1], 'r') as f_sys, open(sys.argv[2], 'r') as f_ref, \
        open(sys.argv[1]+'.decoded2', 'w') as f_decoded, open(sys.argv[1]+".decoded.final2", 'w') as f_out:
        
        count_false = 0

        for encoded_sparql_sys, encoded_sparql_ref in zip(f_sys, f_ref):
            decoded_sparql = decode(encoded_sparql_sys)
            f_decoded.write(decoded_sparql)
            decoded_sparql = fix_URI(decoded_sparql)
            return_json = query_dbpedia(decoded_sparql)
            is_sparql_right = "True"
            
            if '<unk>' in encoded_sparql_ref or ("results" in return_json.keys() and len(return_json["results"]["bindings"]) == 0):
                #print(encoded_sparql_sys)
                #print(encoded_sparql_ref)
                #print("false"+ str(count_false))
                is_sparql_right = "False"
                count_false = count_false + 1

            if count_false%100 == 0:
                print(count_false)
コード例 #8
0
        return decoded_words, decoder_attentions[:di + 1]


#evaluate random sentences from the training set and print out the input, target, and output to make some subjective quality judgements
def evaluateRandomly(encoder, decoder, n=10):
    for i in range(n):
        pair = random.choice(pairs)
        print('>', pair[0])
        print('=', pair[1])
        output_words, attentions = evaluate(encoder, decoder, pair[0])
        output_sentence = ' '.join(output_words)
        print('<', output_sentence)
        print('')


#Initialize network and start training

hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words,
                               dropout_p=0.1).to(device)
encoder1.load_state_dict(torch.load('encoder1.pt'))
attn_decoder1.load_state_dict(torch.load('attn_decoder1.pt'))
output_words, attentions = evaluate(encoder1, attn_decoder1, sys.argv[1])
# plt.matshow(attentions.numpy())
print(output_words)
print(" ".join(output_words))
print(decode(" ".join(output_words[:-1])))
with open('output.txt', 'w') as f:
    print(decode(" ".join(output_words[:-1])), file=f)
コード例 #9
0
import argparse
from generator_utils import decode, encode

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('mode',
                        nargs='?',
                        choices=['encode', 'decode'],
                        default='decode')
    parser.add_argument('input_path')
    args = parser.parse_args()

    with open(args.input_path, 'r') as input_file:
        for line in input_file:
            if args.mode == 'decode':
                print(decode(line.strip()))
            elif args.mode == 'encode':
                print(encode(line.strip()))