def chatbot(txt):
    #chatbot code here 
    # Importing the dataset
    metadata, idx_q, idx_a = data_preprocessing.load_data(PATH = './')
    # Splitting the dataset into the Training set and the Test set
    (trainX, trainY), (testX, testY), (validX, validY) = data_utils_1.split_dataset(idx_q, idx_a)
    # Embedding
    xseq_len = trainX.shape[-1]
    yseq_len = trainY.shape[-1]
    batch_size = 16
    vocab_twit = metadata['idx2w']
    xvocab_size = len(metadata['idx2w'])  
    yvocab_size = xvocab_size
    emb_dim = 1024
    idx2w, w2idx, limit = data_utils_2.get_metadata()
    # Building the seq2seq model
    model = seq2seq_wrapper.Seq2Seq(xseq_len = xseq_len,
                                yseq_len = yseq_len,
                                xvocab_size = xvocab_size,
                                yvocab_size = yvocab_size,
                                ckpt_path = './weights',
                                emb_dim = emb_dim,
                                num_layers = 3)
    # Loading the weights and Running the session
    session = model.restore_last_session()
    # Getting the ChatBot predicted answer
    def respond(question):
        encoded_question = data_utils_2.encode(question, w2idx, limit['maxq'])
        answer = model.predict(session, encoded_question)[0]
        return data_utils_2.decode(answer, idx2w) 
    # Setting up the chat 
    #while True :
        ''''
        engine = pyttsx3.init()
        engine.runAndWait()
        try:
            r = sr.Recognizer()
            mic = sr.Microphone()
            with mic as source:
                r.adjust_for_ambient_noise(source)
                audio = r.listen(source)
                print('You :')
                x=r.recognize_google(audio)
                print(x)
        except:
            continue
        question=x.lower()'''
    question=txt
        #question = input("You: ")
        #if question=='good bye':
        #print('Ok Bye')
        #break
        #answer = respond(question)
    return respond(question)
示例#2
0
def load_data():
    # Importing the dataset
    metadata, idx_q, idx_a = data_preprocessing.load_data(PATH='./')

    # Splitting the dataset into the Training set and the Test set
    (trainX,
     trainY), (testX,
               testY), (validX,
                        validY) = data_utils_1.split_dataset(idx_q, idx_a)

    # Embedding
    xseq_len = trainX.shape[-1]
    yseq_len = trainY.shape[-1]
    batch_size = 16
    vocab_twit = metadata['idx2w']
    xvocab_size = len(metadata['idx2w'])
    yvocab_size = xvocab_size
    emb_dim = 1024
    idx2w, w2idx, limit = data_utils_2.get_metadata()

    return (xseq_len, yseq_len, xvocab_size, yvocab_size, emb_dim)
示例#3
0
imp.reload(seq2seq_wrapper)
import data_preprocessing
import data_utils_1
import data_utils_2
from flask import Flask, jsonify, render_template, request


########## PART 1 - DATA PREPROCESSING ##########



# Importing the dataset
metadata, idx_q, idx_a = data_preprocessing.load_data(PATH = './')

# Splitting the dataset into the Training set and the Test set
(trainX, trainY), (testX, testY), (validX, validY) = data_utils_1.split_dataset(idx_q, idx_a)

# Embedding
xseq_len = trainX.shape[-1]
yseq_len = trainY.shape[-1]
batch_size = 16
vocab_twit = metadata['idx2w']
xvocab_size = len(metadata['idx2w'])  
yvocab_size = xvocab_size
emb_dim = 1024
idx2w, w2idx, limit = data_utils_2.get_metadata()



########## PART 2 - BUILDING THE SEQ2SEQ MODEL ##########
示例#4
0
import data_prep
import data_utils_1

#data preprocess
data_prep.process_data()

# load data from pickle and npy files
metadata, idx_q, idx_a = data.load_data(PATH='./')
(trainX, trainY), (testX,
                   testY), (validX,
                            validY) = data_utils_1.split_dataset(idx_q, idx_a)

# parameters
xseq_len = trainX.shape[-1]
yseq_len = trainY.shape[-1]
batch_size = 32
xvocab_size = len(metadata['idx2w'])
yvocab_size = xvocab_size
emb_dim = 1024

import seq2seq_wrapper

model = seq2seq_wrapper.Seq2Seq(xseq_len=xseq_len,
                                yseq_len=yseq_len,
                                xvocab_size=xvocab_size,
                                yvocab_size=yvocab_size,
                                ckpt_path='./ckpt/',
                                emb_dim=emb_dim,
                                num_layers=3)

val_batch_gen = data_utils_1.rand_batch_gen(validX, validY, 32)