def sms_reply(): with open('dis.json', 'r') as f: intents = json.load(f) if torch.cuda.is_available(): map_location = lambda storage, loc: storage.cuda() else: map_location = 'cpu' FILE = "data.pth" data = torch.load(FILE, map_location=map_location) input_size = data["input_size"] hidden_size = data["hidden_size"] output_size = data["output_size"] all_words = data["all_words"] tags = data["tags"] model_state = data["model_state"] model = NeuralNet(input_size, hidden_size, output_size) model.load_state_dict(model_state) model.eval() while True: # Fetch the message msg = request.form.get('Body') #sentence = input(msg) #if sentence == "quit": # break #sentence = tokenize(sentence) msg = tokenize(msg) X = bag_of_words(msg, all_words) X = X.reshape(1, X.shape[0]) X = torch.from_numpy(X) output = model(X) _, predicted = torch.max(output, dim=1) tag = tags[predicted.item()] probs = torch.softmax(output, dim=1) prob = probs[0][predicted.item()] if prob.item() > 0.75: for intent in intents["intents"]: if tag == intent["tag"]: # Create reply resp = MessagingResponse() resp.message( random.choice(intent['responses']).format(msg)) else: resp = MessagingResponse() resp.message("I do not understand...".format(msg)) return str(resp)
def input_process(user_input): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') with open('intents.json', 'r') as json_data: intents = json.load(json_data) FILE = "data.pth" data = torch.load(FILE) input_size = data["input_size"] hidden_size = data["hidden_size"] output_size = data["output_size"] all_words = data['all_words'] tags = data['tags'] model_state = data["model_state"] model = NeuralNet(input_size, hidden_size, output_size).to(device) model.load_state_dict(model_state) model.eval() #bot_name = "Ash" print("Let's chat! (type 'quit' to exit)") while True: #sentence = input("You: ") sentence = user_input #if sentence == "quit": #break sentence = tokenize(sentence) print(sentence) print(all_words) X = bag_of_words(sentence, all_words) print(X) X = X.reshape(1, X.shape[0]) print(X) X = torch.from_numpy(X).to(device) output = model(X) _, predicted = torch.max(output, dim=1) tag = tags[predicted.item()] probs = torch.softmax(output, dim=1) prob = probs[0][predicted.item()] if prob.item() > 0.75: for intent in intents['intents']: if tag == intent["tag"]: #print(f"{bot_name}: {random.choice(intent['responses'])}") return random.choice(intent['responses']) else: #print(f"{bot_name}: I do not understand...") return "I do not understand..."
def get_prediction(text, embedding_matrix): model = NeuralNet(embedding_matrix) model.to(torch.device("cpu")) model.load_state_dict(torch.load('./pytorch_model/best_model.pt')) model.eval() with torch.no_grad(): pred = model(text) pred_logit = sigmoid(pred.detach().cpu().numpy())[0][0] pred_label = np.where(pred_logit >= 0.5, 1, 0) answer = '긍정' if pred_label == 1 else '부정' return answer
def load(): global all_words, ids, model, nodes try: nodes = retrieve('nodes') data = torch.load("data.pth") all_words = data['all_words'] ids = data['ids'] model = NeuralNet(data["input_size"], data["hidden_size"], data["output_size"]).to(device) model.load_state_dict(data["model_state"]) model.eval() except: print("An exception occurred")
def test(args, ckpt_file): print("========== In the test step ==========") batch_size = args["batch_size"] lr = args["learning_rate"] momentum = args["momentum"] epochs = args["train_epochs"] train_split = args["split_train"] loader = processData(args, stageFor="test") net = NeuralNet() net = net.to(device=device) net.load_state_dict( torch.load(os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))["model"] ) net.eval() predix = 0 predictions = {} truelabels = {} n_val = args["test_size"] with tqdm(total=n_val, desc="Testing round", unit="batch", leave=False) as pbar: for step, (batch_x, batch_y) in enumerate(loader): with torch.no_grad(): batch_x = batch_x.to(device) batch_y = batch_y.to(device) prediction = net(batch_x) for logit, label in zip(prediction, batch_y): # predictions[predix] = logit.cpu().numpy().tolist() truelabels[predix] = label.cpu().numpy().tolist() class_probabilities = logit.cpu().numpy().tolist() index_max = np.argmax(class_probabilities) predictions[predix] = index_max predix += 1 pbar.update() # unpack predictions predictions = [val for key, val in predictions.items()] truelabels = [val for key, val in truelabels.items()] return {"predictions": predictions, "labels": truelabels}
def solve(msg): with open('intents.json', 'r', encoding='utf-8', errors='ignore') as f: intents = json.load(f) FILE = "data.pth" data = torch.load(FILE) input_size = data["input_size"] hidden_size = data["hidden_size"] output_size = data["output_size"] all_words = data["all_words"] tags = data["tags"] model_state = data["model_state"] model = NeuralNet(input_size, hidden_size, output_size).to(device) model.load_state_dict(model_state) model.eval() global btags global bques sentence = msg sentence = tokenize(sentence) X = bag_of_words(sentence, all_words) X = X.reshape(1, X.shape[0]) X = torch.from_numpy(X).to(dtype=torch.float).to(device) output = model(X) _, predicted = torch.max(output, dim=1) tag = tags[predicted.item()] probs = torch.softmax(output, dim=1) prob = probs[0][predicted.item()] if prob.item() > 0.75: for intent in intents["intents"]: if tag == intent["tag"]: if tag not in btags: btags += tag + " - " response = random.choice(intent['responses']) return (response) + "\n\n" else: return "" else: continue else: if msg not in bques: bques += msg + " - " global bnon bnon = True return msg + ": Mình tạm thời chưa có đáp án \n Bạn hãy để lại sđt và câu hỏi tại mục report hoặc liên hệ trực tiếp số điện thoại 0868355415" + "\n\n" else: return ""
def chatbot(question): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') with open('intents.json', 'r',encoding="utf8") as json_data: intents = json.load(json_data) FILE = "data.pth" data = torch.load(FILE,map_location='cpu') input_size = data["input_size"] hidden_size00 = data["hidden_size00"] hidden_size01 = data["hidden_size01"] hidden_size02 = data["hidden_size02"] output_size = data["output_size"] all_words = data['all_words'] tags = data['tags'] model_state = data["model_state"] model = NeuralNet(input_size, hidden_size00, hidden_size01, hidden_size02, output_size).to(device) model.load_state_dict(model_state) model.eval() while True: # sentence = "do you use credit cards?" sentence = question sentence = tokenize(sentence) X = bag_of_words(sentence, all_words) X = X.reshape(1, X.shape[0]) X = torch.from_numpy(X).to(device) output = model(X) _, predicted = torch.max(output, dim=1) tag = tags[predicted.item()] probs = torch.softmax(output, dim=1) prob = probs[0][predicted.item()] if prob.item() > 0.75: for intent in intents['intents']: if tag == intent["tag"]: return(random.choice(intent['responses'])) else: return('Sorry I do not understand..')
def infer(args, unlabeled, ckpt_file): print("========== In the inference step ==========") batch_size = args["batch_size"] lr = args["learning_rate"] momentum = args["momentum"] epochs = args["train_epochs"] train_split = args["split_train"] loader = processData(args, stageFor="infer", indices=unlabeled) net = NeuralNet() net = net.to(device=device) net.load_state_dict( torch.load(os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))["model"] ) net.eval() n_val = len(unlabeled) predictions = {} predix = 0 with tqdm(total=n_val, desc="Inference round", unit="batch", leave=False) as pbar: for step, (batch_x, batch_y) in enumerate(loader): with torch.no_grad(): batch_x = batch_x.to(device) batch_y = batch_y.to(device) prediction = net(batch_x) for logit in prediction: predictions[unlabeled[predix]] = {} class_probabilities = logit.cpu().numpy().tolist() predictions[unlabeled[predix]]["pre_softmax"] = class_probabilities index_max = np.argmax(class_probabilities) predictions[unlabeled[predix]]["prediction"] = index_max predix += 1 pbar.update() return {"outputs": predictions}
def load_chatbot(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #if we have GPU support with open('CB.json','r') as f: intents = json.load(f) FILE="data.pth" data = torch.load(FILE) input_size = data["input_size"] hidden_size = data["hidden_size"] output_size = data["output_size"] all_words = data["all_words"] tags = data["tags"] model_state = data["model_state"] model = NeuralNet(input_size, hidden_size, output_size) model.load_state_dict(model_state) model.eval() return model, all_words, tags, intents
def chatbot(): device=torch.device('cuda' if torch.cuda.is_available() else 'cpu') with open('intents.json','r') as f: intents=json.load(f) FILE="data.pth" data=torch.load(FILE) input_size = data["input_size"] hidden_size = data["hidden_size"] output_size = data["output_size"] all_words = data['all_words'] tags = data['tags'] model_state = data["model_state"] model = NeuralNet(input_size, hidden_size, output_size).to(device) model.load_state_dict(model_state) model.eval() sentence = request.args.get("msg") #get data from input,we write js to index.html sentence=tokenize(sentence) X=bag_of_words(sentence,all_words) X=X.reshape(1,X.shape[0]) X=torch.from_numpy(X) output=model(X) _,predicted=torch.max(output,dim=1) tag=tags[predicted.item()] bot="I do not know...try something different😊" probs = torch.softmax(output, dim=1) prob = probs[0][predicted.item()] print(prob.item()) if prob.item() < 0.75: bot="I do not know...try something different😊" elif prob.item() > 0.75: for intent in intents['intents']: if tag == intent["tag"]: bot=random.choice(intent['responses']) return bot
def process(s): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") with open('intents.json', 'r') as f: intents = json.load(f) FILE = "data.pth" data = torch.load(FILE) input_size = data["input_size"] output_size = data["output_size"] all_words = data["all_words"] tags = data["tags"] model_state = data["model_state"] hidden_size = data["hidden_size"] model = NeuralNet(input_size, hidden_size, output_size).to(device) model.load_state_dict(model_state) model.eval() bot_name = "Zoey" sentance = tokenize(s) x = bag_of_words(sentance, all_words) x = x.reshape(1, x.shape[0]) x = torch.from_numpy(x) output = model(x) _, predicted = torch.max(output, dim=1) probability = torch.softmax(output, dim=1) probability = probability[0][predicted.item()] if (probability < .45): return "sorry" tag = tags[predicted.item()] return tag # for intent in intents["intents"]: # if tag == intent["tag"]: # choice = random.choice(intent["responses"]) # print(f"{bot_name}: {choice}")
class NeuralNetworkTagDecider(TagDecider): def __init__(self, intents_file_path=None, trained_model_path=None, features=None, model_name=None): super().__init__() self.intents_file_path = intents_file_path self.trained_model_path = trained_model_path self.model_name = model_name self.features = features if features else [] self.intents_json = None # Accessing intents.json file. # ---------------------------- try: # Check to see that intents.json file exists. with open(self.intents_file_path, 'r') as json_data: intents_json = json.load(json_data) # Check whether any features have been added/removed or if # no trained model is present. assert (len(self.features) == len(intents_json['intents'])) assert (os.path.exists(self.trained_model_path)) except (FileNotFoundError, AssertionError): # remove intents file if it exists try: print('Detected modification in feature list.') os.remove(self.intents_file_path) except OSError: print('intents.json file not found.') # Update intents.json if features have been added/removed # or the file does not exist. spinner.start(text='Generating new intents json file...') intents = {} intents['intents'] = [] for x, feature in enumerate(self.features): tag = {} tag["tag"] = feature.tag_name tag["patterns"] = feature.patterns tag["index"] = x intents['intents'].append(tag) intents_json = json.dumps(intents, indent=4) with open(self.intents_file_path, 'w') as f: f.write(intents_json) spinner.succeed(text=f'{self.intents_file_path} file generated.') self.trainer = IntentsTrainer(self.intents_file_path, model_name=self.model_name) # Retrain the NeuralNet spinner.start(text='Training NeuralNet.') self.trainer.train() spinner.succeed('NeuralNet trained.') finally: # Save the intents json file self.intents_json = intents_json # Prepping the Neural Net to be used. self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.model_data = torch.load(self.trained_model_path) input_size = self.model_data["input_size"] hidden_size = self.model_data["hidden_size"] output_size = self.model_data["output_size"] self.all_words = self.model_data["all_words"] self.tags = self.model_data["tags"] self.model_state = self.model_data["model_state"] self.model = NeuralNet(input_size, hidden_size, output_size).to(self.device) self.model.load_state_dict(self.model_state) self.model.eval() def decide(self, spoken_text): """ This function utilizes a neural network to determine which feature to run based on the input text. See for More: https://www.techwithtim.net/tutorials/ai-chatbot/ """ text = tokenize(spoken_text) x = bag_of_words(text, self.all_words) x = x.reshape(1, x.shape[0]) x = torch.from_numpy(x).to(self.device) output = self.model(x) _, predicted = torch.max(output, dim=1) probs = torch.softmax(output, dim=1) prob = probs[0][predicted.item()] predicted_tag = self.tags[predicted.item()] predicted_tag_probability = prob.item() # if no accurate action is found from # spoken_text, default to chatbot feature. tag = predicted_tag if predicted_tag_probability >= 0.8 else 'chatbot' return tag
def train(): for i, (train_idx, valid_idx) in enumerate(splits): # split data in train / validation according to the KFold indeces # also, convert them to a torch tensor and store them on the GPU (done with .cuda()) x_train = np.array(x_train) y_train = np.array(y_train) features = np.array(features) x_train_fold = torch.tensor(x_train[train_idx.astype(int)], dtype=torch.long).cuda() y_train_fold = torch.tensor(y_train[train_idx.astype(int), np.newaxis], dtype=torch.float32).cuda() kfold_X_features = features[train_idx.astype(int)] kfold_X_valid_features = features[valid_idx.astype(int)] x_val_fold = torch.tensor(x_train[valid_idx.astype(int)], dtype=torch.long).cuda() y_val_fold = torch.tensor(y_train[valid_idx.astype(int), np.newaxis], dtype=torch.float32).cuda() # model = BiLSTM(lstm_layer=2,hidden_dim=40,dropout=DROPOUT).cuda() model = NeuralNet() # make sure everything in the model is running on the GPU model.cuda() # define binary cross entropy loss # note that the model returns logit to take advantage of the log-sum-exp trick # for numerical stability in the loss loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum') step_size = 300 base_lr, max_lr = 0.001, 0.003 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr) ################################################################################################ scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size=step_size, mode='exp_range', gamma=0.99994) ############################################################################################### train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold) valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold) train = MyDataset(train) valid = MyDataset(valid) ##No need to shuffle the data again here. Shuffling happens when splitting for kfolds. train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') for epoch in range(n_epochs): # set train mode of the model. This enables operations which are only applied during training like dropout start_time = time.time() model.train() avg_loss = 0. for i, (x_batch, y_batch, index) in enumerate(train_loader): # Forward pass: compute predicted y by passing x to the model. ################################################################################################ f = kfold_X_features[index] y_pred = model([x_batch,f]) ################################################################################################ ################################################################################################ if scheduler: scheduler.batch_step() ################################################################################################ # Compute and print loss. loss = loss_fn(y_pred, y_batch) # Before the backward pass, use the optimizer object to zero all of the # gradients for the Tensors it will update (which are the learnable weights # of the model) optimizer.zero_grad() # Backward pass: compute gradient of the loss with respect to model parameters loss.backward() # Calling the step function on an Optimizer makes an update to its parameters optimizer.step() avg_loss += loss.item() / len(train_loader) # set evaluation mode of the model. This disabled operations which are only applied during training like dropout model.eval() # predict all the samples in y_val_fold batch per batch valid_preds_fold = np.zeros((x_val_fold.size(0))) test_preds_fold = np.zeros((len(df_test))) avg_val_loss = 0. for i, (x_batch, y_batch, index) in enumerate(valid_loader): f = kfold_X_valid_features[index] y_pred = model([x_batch,f]).detach() avg_val_loss += loss_fn(y_pred, y_batch).item() / len(valid_loader) valid_preds_fold[i * batch_size:(i+1) * batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time)) avg_losses_f.append(avg_loss) avg_val_losses_f.append(avg_val_loss) # predict all samples in the test set batch per batch for i, (x_batch,) in enumerate(test_loader): f = test_features[i * batch_size:(i+1) * batch_size] y_pred = model([x_batch,f]).detach() test_preds_fold[i * batch_size:(i+1) * batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits) print('All \t loss={:.4f} \t val_loss={:.4f} \t '.format(np.average(avg_losses_f),np.average(avg_val_losses_f)))
def infer(args, unlabeled, ckpt_file): print("========== In the inference step ==========") batch_size = args["batch_size"] lr = args["learning_rate"] momentum = args["momentum"] epochs = args["train_epochs"] train_split = args["split_train"] CSV_FILE = "./data/mushrooms.csv" dataset = MushroomDataset(CSV_FILE) train_dataset = torch.utils.data.Subset( dataset, list(range(int(train_split * len(dataset)))) ) train_subset = Subset(train_dataset, unlabeled) train_loader = DataLoader(train_subset, batch_size=batch_size, shuffle=False) net = NeuralNet() net = net.to(device=device) net.load_state_dict( torch.load(os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))["model"] ) net.eval() n_val = len(unlabeled) predictions = {} predix = 0 with tqdm(total=n_val, desc="Inference round", unit="batch", leave=False) as pbar: for step, (batch_x, batch_y) in enumerate(train_loader): with torch.no_grad(): batch_x = batch_x.to(device) batch_y = batch_y.to(device) prediction = net(batch_x) for logit in prediction: predictions[unlabeled[predix]] = {} prediction = 0 # since I include sigmoid as the last layer's activation function, logit = % if logit.cpu().numpy() > 0.5: prediction = 1 predictions[unlabeled[predix]]["prediction"] = prediction predictions[unlabeled[predix]]["pre_softmax"] = [ [ logit_fn(logit.cpu().numpy()[0]), logit_fn(1 - logit.cpu().numpy()[0]), ] ] predix += 1 pbar.update() # print("predictions",predictions) return {"outputs": predictions}
def test(args, ckpt_file): print("========== In the test step ==========") batch_size = args["batch_size"] lr = args["learning_rate"] momentum = args["momentum"] epochs = args["train_epochs"] train_split = args["split_train"] CSV_FILE = "./data/mushrooms.csv" dataset = MushroomDataset(CSV_FILE) test_dataset = torch.utils.data.Subset( dataset, list(range(int(train_split * len(dataset)), len(dataset))) ) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) net = NeuralNet() net = net.to(device=device) net.load_state_dict( torch.load(os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))["model"] ) net.eval() predix = 0 predictions = {} truelabels = {} n_val = len(test_dataset) with tqdm(total=n_val, desc="Testing round", unit="batch", leave=False) as pbar: for step, (batch_x, batch_y) in enumerate(test_loader): with torch.no_grad(): batch_x = batch_x.to(device) batch_y = batch_y.to(device) prediction = net(batch_x) for logit, label in zip(prediction, batch_y): predictions[predix] = logit.cpu().numpy().tolist() truelabels[predix] = label.cpu().numpy().tolist() predix += 1 pbar.update() truelabels_ = [] predictions_ = [] for key in predictions: if predictions[key][0] > 0.5: predictions_.append(1) else: predictions_.append(0) for key in truelabels: truelabels_.append(truelabels[key][0]) truelabels = truelabels_ predictions = predictions_ # print("predictions",predictions) return {"predictions": predictions, "labels": truelabels}
with open('intents.json', 'r') as json_data: intents = json.load(json_data) FILE = "data.pth" data = torch.load(FILE) input_size = data["input_size"] hidden_size = data["hidden_size"] output_size = data["output_size"] all_words = data['all_words'] tags = data['tags'] model_state = data["model_state"] model = NeuralNet(input_size, hidden_size, output_size).to(device) model.load_state_dict(model_state) model.eval() bot_name = "Corby" print("Let's chat! (type 'quit' to exit)") while True: sentence = input("You: ") if sentence == "quit": break sentence = tokenize(sentence) X = bag_of_words(sentence, all_words) X = X.reshape(1, X.shape[0]) X = torch.from_numpy(X).to(device) output = model(X)
def response(): app.logger.info('start') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') app.logger.info(device) with open( 'intents.json', 'r' ) as json_data: #abro el archivo json que es el archivo con los comandos intents = json.load(json_data) app.logger.info(intents) FILE = "data.pth" data = torch.load( FILE) #abro el archivo data que es el archivo ya entrenado app.logger.info(data) input_size = data["input_size"] #recogo el tamaño de los datos de entrada hidden_size = data["hidden_size"] output_size = data["output_size"] #recogo el tamaño de los datos de salida all_words = data['all_words'] #la bolsa de palabras del archivo entrenado tags = data['tags'] #las etiquetas tag del archivo entrenado model_state = data["model_state"] #El modelo de datos del archivo emtrenad model = NeuralNet(input_size, hidden_size, output_size).to( device) #Selecciono los datos que voy ha utilizar model.load_state_dict(model_state) model.eval() #Evaluo los datos del modelo # return '<h2>sdfjk</h2>' query = dict( request.form )['Globalquery'] #Recogo los datos de Flutter que vienen con la etiqueta query res = query res = tokenize(res) #tokenizo los datos de flutter X = bag_of_words(res, all_words) #creo la bolsa de palabras X = X.reshape(1, X.shape[0]) #calculo su tamaño X = torch.from_numpy(X).to( device) #Solicito utilizar la libreria entrenada output = model(X) #selecciono el modelo _, predicted = torch.max(output, dim=1) tag = tags[predicted.item( )] #selleciono la etiequeta que es la selleccionada a taves de las predicciones anteriores probs = torch.softmax(output, dim=1) prob = probs[0][predicted.item( )] #ya seleccionadas las etiquetas miro la que mas probabilidad de que sea tenga if prob.item( ) > 0.70: #Si el comando tiene una probabilidad de que sea la acertada de mas del 70%.... app.logger.info('%d logged in successfully', prob.item()) app.logger.info(intents['intents']) for intent in intents['intents']: if tag == intent["tag"]: if intent["tag"] == "goodbye": f = open("database.py") f = open("randomDatabase.py") #f = open("train.py") os.system('python train.py') return jsonify( {"response": random.choice(intent['responses'])}) # elif intent["tag"] == "goodbye": # os.system('python train.py') # return jsonify({"response" : train.epoch}) else: return jsonify( {"response": random.choice(intent['responses'])}) # elif prob.item() > 0.50 < 0.70: # app.logger.info('%d logged in successfully', prob.item()) # app.logger.info(intents['intents']) # return jsonify({"response": random.choice(['I siee...', 'mmmmmm', 'ops..', 'O_O'])}) else: #return jsonify({"response": random.choice(['I siee...', 'mmmmmm', 'ops..', 'O_O', 'jumm..', 'okeyyy', 'ok', 'tell me more'])}) #return jsonify({"response": random.choice(['I siee...', 'mmmmmm', ', 'jumm..', 'okeyyy', 'ok', 'tell me more', '\N{thinking face} \N{thinking face}', '\N{face without mouth} ', '\N{lying face} \N{lying face} jajaj', '\N{relieved face} \N{relieved face}', '\N{OK hand} \N{OK hand} \N{OK hand} \N{OK hand}', '\N{face with open mouth} \N{face with open mouth} \N{face with open mouth}', 'ou \N{flexed biceps} \N{flexed biceps}' , '.. \N{eyes} \N{eyes} ...' ])}) return jsonify({ "response": random.choice([ 'I siee...', 'mmmmmm', 'ops..', 'O_O', 'jumm..', 'okeyyy', 'ok', 'tell me more', '\N{slightly smiling face} \N{slightly smiling face} \N{slightly smiling face}', '\N{thinking face} \N{thinking face}', '\N{face without mouth} ', '\N{lying face} \N{lying face} jajaj', '\N{relieved face} \N{relieved face}', '\N{face with open mouth} \N{face with open mouth} \N{face with open mouth}', 'ou \N{flexed biceps} \N{flexed biceps}', '.. \N{eyes} \N{eyes} ...' ]) })