Beispiel #1
0
def get_bot_response():
    if request.method == "POST":
        bot = "Convo"
        user_data = request.json

        sentence = user_data['message']
        sentence = normalization(sentence)
        sentence = tokenization(sentence)
        # print(sentence)
        # print(word_list)
        # return jsonify(convo_response="Bot started 2...")

        x = bag_of_words(sentence, word_list)
        x = torch.from_numpy(x)
        x = x.reshape(-1, x.shape[0])
        x = x.to(device)  # x=torch.tensor(x)# print(x.shape)

        output, hidden = model(x)
        _, predicted = torch.max(output, dim=1)
        tag = tags[predicted.item()]

        prob = torch.softmax(output, dim=1)
        probability = prob[0][predicted.item()]

        if (probability.item() > 0.80):

            for i in data['data']:
                if tag == i['tag']:
                    return jsonify(random.choice(i['bot_responses']))
        else:
            return jsonify("I do not understand...")
Beispiel #2
0
def get_bot_response2():
    try:
        device = torch.device("cpu")

        with open('data2.json', 'r') as instances:
            data = json.load(instances)

        FILE = "dataserialized2.pth"
        dataserialized = torch.load(FILE)

        seq_length = dataserialized["seq_length"]
        input_size = dataserialized["input_size"]
        hidden_size = dataserialized["hidden_size"]
        num_layers = dataserialized["num_layers"]
        num_classes = dataserialized["num_classes"]
        word_list = dataserialized["word_list"]
        tags = dataserialized["tags"]
        model_state = dataserialized["model_state"]

        model = LSTM(seq_length, input_size, hidden_size, num_layers,
                     num_classes).to(device)
        model.load_state_dict(model_state)
        model.eval()
    except Exception as e:
        print(e)
    if request.method == "POST":
        bot = "Convo"
        user_data = request.json

        sentence = user_data['message']  #
        sentence = normalization(sentence)
        sentence = tokenization(sentence)
        x = bag_of_words(sentence, word_list)
        x = torch.from_numpy(x)
        x = x.reshape(-1, x.shape[0])
        x = x.to(device)  # x=torch.tensor(x)# print(x.shape)

        output, hidden = model(x)
        _, predicted = torch.max(output, dim=1)
        tag = tags[predicted.item()]

        prob = torch.softmax(output, dim=1)
        probability = prob[0][predicted.item()]

        if (probability.item() > 0.80):

            for i in data['data']:
                if tag == i['tag']:
                    return jsonify(random.choice(i['bot_responses']))
        else:
            return jsonify("I do not understand...")
Beispiel #3
0
import numpy as np
from pre_processing import normalization, tokenization, stemming, bag_of_words
from model import LSTM

with open('data3.json', 'r') as instances:
    data = json.load(instances)

word_list = []  #create an array with all the words
tags = []
xy = []
for i in data['data']:
    tag = i['tag']
    tags.append(tag)
    for user_response in i['user_responses']:
        normalized = normalization(user_response)
        words = tokenization(normalized)
        word_list.extend(words)
        xy.append(
            (words, tag))  #array of user responses with the respective tags

word_list = [stemming(word) for word in word_list]
word_list = sorted(set(word_list))
print(tags)
print(word_list)
print(xy)

x_train = []
y_train = []
for (tokenized, tag) in xy:
    bag = bag_of_words(tokenized, word_list)
    x_train.append(bag)