コード例 #1
0
ファイル: main.py プロジェクト: zhaohu19870717/ChatBot
 def generate_samples():
     _data_c, _data_x = zip(*gen.next())
     samples = fake_sample.eval(session=session,
                                feed_dict={
                                    real_inputs_discrete_c: _data_c,
                                    real_inputs_discrete_x: _data_x
                                })
     decoded_samples = []
     for i in xrange(len(samples)):
         decoded_samples.append('Q: ' + Parser.decodeData(_data_c[i]))
         decoded_samples.append('A: ' + Parser.decodeData(samples[i]))
         decoded_samples.append('\n')
     return decoded_samples
コード例 #2
0
ファイル: chatbot.py プロジェクト: haanjack/ChatBot
    print 'Usage: python chatbot.py [Text pickle] [TRT Model]'
    sys.exit(0)

with open(sys.argv[1]) as f:
    wmap, iwmap = pickle.load(f)
Parser.runtimeLoad(wmap, iwmap)
print '[ChatBot] load word map from ' + sys.argv[1]

engine = tensorNet.createTrtFromUFF(sys.argv[2])
tensorNet.prepareBuffer(engine)
print '[ChatBot] create tensorrt engine from ' + sys.argv[2]

while True:
    b = raw_input('\n\n\x1b[1;105;97m' +
                  'Please write your question(q for quite):' + '\x1b[0m')
    if b == 'q':
        print 'Bye Bye!!'
        break
    elif len(b) > 0:
        raw = Parser.runtimeParser(b + ' ', SEQ_LEN)
        question = []
        question.append([wmap[c] for c in raw])

        _input = np.array(question[0], np.int32)
        _output = np.zeros([SEQ_LEN], np.int32)
        tensorNet.inference(engine, _input, _output)
        print 'Q: ' + '\x1b[1;39;94m' + Parser.decodeData(
            question[0]) + '\x1b[0m'
        print 'A: ' + '\x1b[1;39;92m' + Parser.decodeData(
            _output.tolist()) + '\x1b[0m'
コード例 #3
0
ファイル: chatbot.py プロジェクト: haanjack/ChatBot
dims_h1 = engine.get_binding_dimensions(4).to_DimsCHW()
dims_c1 = engine.get_binding_dimensions(5).to_DimsCHW()
dims_ot = engine.get_binding_dimensions(10).to_DimsCHW()

output_ot = cuda.pagelocked_empty(dims_ot.C() * dims_ot.H() * dims_ot.W() * MAX_BATCHSIZE, dtype=np.float32)

d_in_enc = cuda.mem_alloc(MAX_BATCHSIZE * dims_enc.C() * dims_enc.H() * dims_enc.W() * _enc_text.dtype.itemsize)
d_in_dec = cuda.mem_alloc(MAX_BATCHSIZE * dims_dec.C() * dims_dec.H() * dims_dec.W() * _dec_text.dtype.itemsize)
d_in_h0  = cuda.mem_alloc(MAX_BATCHSIZE * dims_h0.C()  * dims_h0.H()  * dims_h0.W()  * _h0.dtype.itemsize)
d_in_c0  = cuda.mem_alloc(MAX_BATCHSIZE * dims_c0.C()  * dims_c0.H()  * dims_c0.W()  * _c0.dtype.itemsize)
d_in_h1  = cuda.mem_alloc(MAX_BATCHSIZE * dims_h1.C()  * dims_h1.H()  * dims_h1.W()  * _h1.dtype.itemsize)
d_in_c1  = cuda.mem_alloc(MAX_BATCHSIZE * dims_c1.C()  * dims_c1.H()  * dims_c1.W()  * _c1.dtype.itemsize)
d_ot_ot  = cuda.mem_alloc(MAX_BATCHSIZE * dims_ot.C()  * dims_ot.H()  * dims_ot.W()  * output_ot.dtype.itemsize)

bindings = [int(d_in_enc), int(d_in_dec), int(d_in_h0), int(d_in_c0), int(d_in_h1), int(d_in_c1),
            int(d_in_h0),  int(d_in_c0),  int(d_in_h1), int(d_in_c1),  int(d_ot_ot)]
stream = cuda.Stream()

while True:
    b = raw_input('\n\n\x1b[1;105;97m'+'Please write your question(q for quite):'+'\x1b[0m')
    if b=='q':
        print 'Bye Bye!!'
        break
    elif len(b)>0:
        raw = Parser.runtimeParser(b+' ', SEQ_LEN)
        question = []
        question.append([wmap[c] for c in raw])
        question = np.array(question,dtype='int32')
        print 'Q: '+'\x1b[1;39;94m'+Parser.decodeData(question[0])+'\x1b[0m'
        print 'A: '+'\x1b[1;39;92m'+Parser.decodeData(inference(question[0]))+'\x1b[0m'