def classify(): global stress ERROR_THRESHOLD = 0.75 context = None sentence = request.json['query'] # print(sentence) sentiment = sentiment_analysis.sentiment_analyzer(sentence) if "context" in request.json: # print("Context present : ", request.json['context']) context = request.json['context'] else: print("Context not present!") context = None # entities = entity_extraction.named_entity_extraction(sentence) # generate probabilities from the model input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input']) results = model.predict([input_data])[0] # filter out predictions below a threshold # print("Before Filter : ", results) results = [[i, r] for i, r in enumerate(results) if r > ERROR_THRESHOLD] # print("After Filter : ", results) # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] output_context = None # print("Results Length : ", len(results)) if len(results) == 0: return jsonify({ "result": { "fulfillment": { "messages": [{ "type": 0, "platform": "facebook", "speech": random.choice(fallback_dict) }] } } }) # return_list.append({"query": sentence, "intent": "fallback", "response": random.choice(fallback_dict), "context": None, "probability": "0.00", "sentiment":sentiment}) else: # print("Inference Exists") for r in results: if context != None: classes[r[0]] = context # print("Class Value : ", classes[r[0]]) for x_tend in intents['intents']: if classes[r[0]] == x_tend['tag']: # print("Entities Length : ", len(entities)) if x_tend['context'] == "": output_context = None # if entities is None: # entities = None # elif len(entities) == 0: # entities = None # print(random.choice(x_tend['responses'])) return jsonify({ "result": { "fulfillment": { "messages": [{ "type": 0, "platform": "facebook", "speech": random.choice(x_tend['responses']) }] } } })
def classify(): global stress # ERROR_THRESHOLD = THRESHOLD context = None sentence = request.json['sentence'] nlu = json.loads(luis.luis_api(sentence)) return_list = [] # output_context = None entities = None # print("LUIS NLU Inference : ", nlu['topScoringIntent']['intent']) sentiment = sentiment_analysis.sentiment_analyzer( sentence, nlu['sentimentAnalysis']['score'], nlu['sentimentAnalysis']['label']) if nlu['topScoringIntent']['intent'] == "None": # print("Fallback detected") stress_payload = stress_analysis.stress_analyzer( sentiment['polarity'], 'fallback', stress) stress = stress_payload['stress'] trigger = stress_payload['trigger'] responsive = stress_payload['responsive'] reaction = stress_payload['reaction'] completion = stress_payload['completion'] return_list.append({ "query": sentence, "intent": "fallback", "response": random.choice(fallback_dict), "context": context, "probability": "{0:.2f}".format(nlu['topScoringIntent']['score']), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': False }) else: # print("Inference Exists") stress_payload = stress_analysis.stress_analyzer( sentiment['polarity'], nlu['topScoringIntent']['intent'], stress) stress = stress_payload['stress'] trigger = stress_payload['trigger'] responsive = stress_payload['responsive'] reaction = stress_payload['reaction'] completion = stress_payload['completion'] if completion is True: # print("Extraction completion event triggered!") return_list.append({ "query": sentence, "intent": nlu['topScoringIntent']['intent'], "response": random.choice(extraction_dict), "context": context, "probability": "{0:.2f}".format(nlu['topScoringIntent']['score']), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': completion }) else: if stress_payload['repeat'] is not None: return_list.append({ "query": sentence, "intent": nlu['topScoringIntent']['intent'], "response": random.choice(repeat_dict) + " " + "You are only talking about " + nlu['topScoringIntent']['intent'].replace("_", " "), "context": context, "probability": "{0:.2f}".format(nlu['topScoringIntent']['score']), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': completion }) else: for x_tend in intents['intents']: if nlu['topScoringIntent']['intent'] == x_tend['tag']: normal_response = random.choice(x_tend['responses']) return_list.append({ "query": sentence, "intent": nlu['topScoringIntent']['intent'], "response": normal_response, "context": context, "probability": "{0:.2f}".format(nlu['topScoringIntent']['score']), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': completion }) response = jsonify({"result": return_list, "error": None}) # print("Completion Status : {}".format(completion)) if completion: stress = DEFAULT_STRESS # print("Level complete. Resetting Stress to default {}".format(DEFAULT_STRESS)) print("GLOBAL STRESS LEVEL : {}".format(stress)) return response
def classify(): global stress ERROR_THRESHOLD = 0.75 context = None sentence = request.json['sentence'] # print(sentence) sentiment = sentiment_analysis.sentiment_analyzer(sentence) if "context" in request.json: # print("Context present : ", request.json['context']) context = request.json['context'] else: # print("Context not present!") context = None entities = entity_extraction.named_entity_extraction(sentence) # generate probabilities from the model input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input']) results = model.predict([input_data])[0] # filter out predictions below a threshold # print("Before Filter : ", results) results = [[i, r] for i, r in enumerate(results) if r > ERROR_THRESHOLD] # print("After Filter : ", results) # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] output_context = None # print("Results Length : ", len(results)) if len(results) == 0: return_list.append({ "query": sentence, "intent": "fallback", "response": random.choice(fallback_dict), "context": None, "probability": "0.00", "entities": None, "sentiment": sentiment, "stress": stress, "trigger": 'confused', "responsive": True, "reaction": None, 'completion': False }) else: # print("Inference Exists") for r in results: if context != None: classes[r[0]] = context # print("Class Value : ", classes[r[0]]) for x_tend in intents['intents']: if classes[r[0]] == x_tend['tag']: # print("Entities Length : ", len(entities)) if x_tend['context'] == "": output_context = None if entities is None: entities = None elif len(entities) == 0: entities = None stress_payload = stress_analysis.stress_analyzer( sentiment['polarity'], classes[r[0]], stress) stress = stress_payload['stress'] trigger = stress_payload['trigger'] responsive = stress_payload['responsive'] reaction = stress_payload['reaction'] completion = stress_payload['completion'] return_list.append({ "query": sentence, "intent": classes[r[0]], "response": random.choice(x_tend['responses']), "context": output_context, "probability": str(round(r[1], 2)), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': completion }) # return tuple of intent and probability response = jsonify({"result": return_list, "error": None}) print("GLOBAL STRESS LEVEL : {}".format(stress)) return response
def classify(): global stress ERROR_THRESHOLD = THRESHOLD context = None sentence = request.json['sentence'] # print(sentence) sentiment = sentiment_analysis.sentiment_analyzer(sentence) if "context" in request.json: # print("Context present : ", request.json['context']) context = request.json['context'] else: # print("Context not present!") context = None entities = entity_extraction.named_entity_extraction(sentence) # generate probabilities from the model input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input']) results = model.predict([input_data])[0] # filter out predictions below a threshold # print("Before Filter : ", results) results = [[i, r] for i, r in enumerate(results) if r > ERROR_THRESHOLD] # print("After Filter : ", results) # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] output_context = None # print("Results Length : ", len(results)) if len(results) == 0: # print("Fallback detected") stress_payload = stress_analysis.stress_analyzer( sentiment['polarity'], 'fallback', stress) stress = stress_payload['stress'] trigger = stress_payload['trigger'] responsive = stress_payload['responsive'] reaction = stress_payload['reaction'] completion = stress_payload['completion'] fallback_logger(sentence) return_list.append({ "query": sentence, "intent": "fallback", "response": random.choice(fallback_dict), "context": None, "probability": "0.00", "entities": None, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': False }) else: # print("Inference Exists") for r in results: if context != None: classes[r[0]] = context # print("Class Value for context: ", classes[r[0]]) for x_tend in intents['intents']: if classes[r[0]] == x_tend['tag']: # print("Entities Length : ", len(entities)) if x_tend['context'] == "": output_context = None if entities is None: entities = None elif len(entities) == 0: entities = None stress_payload = stress_analysis.stress_analyzer( sentiment['polarity'], classes[r[0]], stress) stress = stress_payload['stress'] trigger = stress_payload['trigger'] responsive = stress_payload['responsive'] reaction = stress_payload['reaction'] completion = stress_payload['completion'] if completion is True: print("Extraction completion event triggered!") return_list.append({ "query": sentence, "intent": classes[r[0]], "response": random.choice(extraction_dict), "context": output_context, "probability": "{0:.5f}".format(r[1]), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': completion }) # if reaction == 'extreme': if stress_payload['repeat'] is not None: return_list.append({ "query": sentence, "intent": classes[r[0]], "response": random.choice(repeat_dict) + " " + "You are only talking about " + classes[r[0]].replace("_", " "), "context": output_context, "probability": "{0:.5f}".format(r[1]), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': completion }) elif reaction == 'shock': return_list.append({ "query": sentence, "intent": classes[r[0]], "response": "", "context": output_context, "probability": "{0:.5f}".format(r[1]), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': completion }) normal_response = random.choice(x_tend['responses']) normal_intent = classes[r[0]] conversation_logger(sentence, normal_intent, normal_response) return_list.append({ "query": sentence, "intent": normal_intent, "response": normal_response, "context": output_context, "probability": "{0:.5f}".format(r[1]), "entities": entities, "sentiment": sentiment, "stress": stress, "trigger": trigger, "responsive": responsive, "reaction": reaction, 'completion': completion }) # return tuple of intent and probability response = jsonify({"result": return_list, "error": None}) # print("Completion Status : {}".format(completion)) if completion: stress = DEFAULT_STRESS # print("Level complete. Resetting Stress to default {}".format(DEFAULT_STRESS)) # print("GLOBAL STRESS LEVEL : {}".format(stress)) return response