def post(self): # This method will be executed for every POST request received by the server on the # "/" endpoint (see below 'add_resource') # We assume that the body of the incoming request is formatted as JSON (i.e., its Content-Type is JSON) # We parse the JSON content and we obtain a dictionary object request_data = request.get_json(force=True) # We wrap the resulting dictionary in a custom object that allows data access via dot-notation request_data = DictQuery(request_data) # We extract several information from the state user_utterance = request_data.get( "current_state.state.nlu.annotations.processed_text") intent = send(user_utterance) print("intent: " + intent["intent"]["name"]) last_bot = request_data.get("current_state.state.last_bot") logger.info("------- Turn info ----------") logger.info("User utterance: {}".format(user_utterance)) logger.info("Last bot: {}".format(last_bot)) logger.info("---------------------------") output = Manager.manager(user_utterance, intent) print(output) self.response.result = output # we store in the dictionary 'bot_params' the current time. Remember that this information will be stored # in the database only if the bot is selected self.response.bot_params["time"] = str(datetime.now()) # The response generated by the bot is always considered as a list (we allow a bot to generate multiple response # objects for the same turn). Here we create a singleton list with the response in JSON format return [self.response.toJSON()]
def post(self): # This method will be executed for every POST request received by the server on the # "/" endpoint (see below 'add_resource') # We assume that the body of the incoming request is formatted as JSON (i.e., its Content-Type is JSON) # We parse the JSON content and we obtain a dictionary object request_data = request.get_json(force=True) # We wrap the resulting dictionary in a custom object that allows data access via dot-notation request_data = DictQuery(request_data) # We extract several information from the state user_utterance = request_data.get( "current_state.state.nlu.annotations.processed_text") last_bot = request_data.get("current_state.state.last_bot") logger.info("------- Turn info ----------") logger.info("User utterance: {}".format(user_utterance)) logger.info("Last bot: {}".format(last_bot)) logger.info("---------------------------") # the 'result' member is intended as the actual response of the bot resp = self._call_rasa_agent(user_utterance) self.response.result = resp # The response generated by the bot is always considered as a list (we allow a bot to generate multiple response # objects for the same turn). Here we create a singleton list with the response in JSON format return [self.response.toJSON()]
def post(self): # TODO populate the attributes in the constructor request_data = request.get_json(force=True) request_data = DictQuery(request_data) # pp.pprint(request_data['current_state']) # pp.pprint(request_data.get('current_state')) self.response.result = self.get_answer( request_data.get('current_state')) if self.response.result: self.response.lock_requested = True logger.debug("Bot_params: {}".format(self.bot_attributes)) return [self.response.toJSON()]
def load(self): utils.log.set_logger_params( 'alana-' + BRANCH, logfile=self.options['logfile'], file_level=self.options['file_verbosity'], console_level=self.options['console_verbosity']) with open(self.options['config_file'], 'r', encoding='UTF-8') as fh: config = DictQuery(yaml.load(fh)) ranker = create_selection_strategy(config) postprocessor = Postprocessor(filter_attr=config.SENTENCE_FILTER) bucket_filter = BatchFilter(config.SENTENCE_FILTER) emotional_model = EmotionalModel(logger=self.logger) api.add_resource(AlanaMain, "/", resource_class_kwargs={ "config": config, "ranker": ranker, "postprocessor": postprocessor, "bucket_filter": bucket_filter, "emotional_model": emotional_model }) return self.application
def post(self): request_data = request.get_json(force=True) request_data = DictQuery(request_data) return self.get_answer( session_id=request_data.get('session_id'), asr_hypotheses=request_data.get('asr_hypotheses'), text=request_data.get('question'), timestamp=request_data.get('timestamp'), user_id=request_data.get('user_id'), debug_info_requested=request_data.get('request_debug_info', False))
def post(self): print('\n') try: return self._get_answer(DictQuery(request.get_json(force=True))) except Exception: exc_message = traceback.format_exc(chain=False) logger.critical(exc_message) return { "traceback": exc_message }, 500, { 'Content-Type': 'application/json' }
def post(self): # This method will be executed for every POST request received by the server on the # "/" endpoint (see below 'add_resource') # We assume that the body of the incoming request is formatted as JSON (i.e., its Content-Type is JSON) # We parse the JSON content and we obtain a dictionary object request_data = request.get_json(force=True) # We wrap the resulting dictionary in a custom object that allows data access via dot-notation request_data = DictQuery(request_data) # We extract several information from the state user_utterance = request_data.get( "current_state.state.nlu.annotations.processed_text") last_bot = request_data.get("current_state.state.last_bot") # print(request_data) # question = request_data["question"] logger.info("------- Turn info ----------") logger.info("User utterance: {}".format(user_utterance)) logger.info("Last bot: {}".format(last_bot)) logger.info("---------------------------") #-----------BOT OUTPUT------------------------------ alp = ActionLanguageProcessor(mongodb_url=MONGODB_URL, model_file=None) self.response.result = alp.analyse_utterance(user_utterance) # print(self.response.result) # the 'result' member is intended as the actual response of the bot # self.response.result = random.choice(self.greetings) # print(self.response.result) # we store in the dictionary 'bot_params' the current time. Remember that this information will be stored # in the database only if the bot is selected self.response.bot_params["time"] = str(datetime.now()) # The response generated by the bot is always considered as a list (we allow a bot to generate multiple response # objects for the same turn). Here we create a singleton list with the response in JSON format return [self.response.toJSON()]
def __call__(self, *args, **kwargs): annotations = kwargs.get("annotations", None) if not annotations: return None p_annotation = annotations.get("processed_text") # entity linking for the current user utterance (ignore when the user says their name) context = DictQuery(kwargs.get("context", {})) user_ents = None if p_annotation and not self._name_intent.search(p_annotation): user_ents = self._get_entity_mentions(p_annotation, context=context) # link entities from the last bot utterance # bot_ents = None # if context: # context = DictQuery(context) # last_bot = context.get('current_state.state.last_bot') # last_bot_resp = next(iter(context.get('current_state.last_state.state.response', {'': ''}).values())) # last_bot_resp = re.sub(r'<[^>]*>', '', last_bot_resp) # remove SSML tags before linking # username = context.get('user_attributes.user_name') # ignored_names = [username] if username else [] # ignore user name # # use entities provided by ontologies bot directly # if last_bot == "ontology_bot": # bot_ents = annotations.get("bot_entities", {}) # # check if we added coherence driver -- if so, tag entities from there # driver = context.get('current_state.last_state.state.response_edits.driver_added') # if driver: # driver = re.sub(r'<[^>]*>', '', driver) # remove SSML tags before linking # driver_ents = self._get_entity_mentions(driver, ignore_names=ignored_names) # for driver_ent in driver_ents.values(): # shift offset for these # driver_ent['span']['startOffset'] += len(last_bot_resp) - len(driver) # driver_ent['span']['endOffset'] += len(last_bot_resp) - len(driver) # bot_ents.update(driver_ents) # # tag entities in the bot's response if the last bot was something else # elif last_bot_resp: # bot_ents = self._get_entity_mentions(last_bot_resp, ignore_names=ignored_names) return { "entity_linking": user_ents, #"bot_entities": bot_ents }
def cli(args): utils.log.set_logger_params('alana-' + BRANCH, logfile=args.logfile, file_level=args.file_verbosity, console_level=args.console_verbosity) with open(args.config_file, 'r', encoding='UTF-8') as fh: config = DictQuery(yaml.load(fh)) ranker = create_selection_strategy(config) postprocessor = Postprocessor(filter_attr=config.SENTENCE_FILTER) bucket_filter = BatchFilter(config.SENTENCE_FILTER) emotional_model = EmotionalModel() session_id = args.override_sessid if args.override_sessid else 'CLI-' + str( uuid.uuid4()) user_id = args.userid alana_params = { "config": config, "ranker": ranker, "postprocessor": postprocessor, "bucket_filter": bucket_filter, "emotional_model": emotional_model } while True: try: text = input("> ") if text == "stop" or "": break alana = AlanaMain(**alana_params) result = (alana.get_answer(session_id=session_id, text=text, user_id=user_id)) print("Alana: %s" % result.get('result')) except KeyboardInterrupt: break print("Completed conversation with session ID {}".format(session_id))
def get_answer(self, state): result = '' state = DictQuery(state) # text = state.get('state.nlu.annotations.processed_text') text = state.get('state.input.text') intent = state.get('state.nlu.annotations.intents.intent') prev_resp_list = list( state.get('last_state', {}).get('state', {}).get('response', {}).items()) try: prev_sys, prev_sys_response = prev_resp_list[0] except IndexError: prev_sys, prev_sys_response = (None, None) self.annotated_intents = state.get('state.nlu.annotations.intents') last_bot = state.get('state.last_bot') self.bot_attributes = state.get('state.bot_states', {}) \ .get(self.response.bot_name, {}) \ .get('bot_attributes', {}) self.user_id = state.get('user_id') self.stack = self.bot_attributes.get('task_stack', []) #self.tasks = self.bot_attributes.get('tasks', []) code = self.codes.get(intent, {}).get('code', {}) task_intent = '' print() logger.info("==================================") logger.info("Input: {}".format(text)) logger.debug("Task Stask: {}".format(self.stack)) # Check if the input was a user utterance or command try: text = json.loads(text) except: pass text = str(text) if isinstance(text, int) else text if not isinstance(text, dict): if not result and intent in list( self.codes.keys() ): # and (not task.get('status') or task.get('action_name') != intent): task_id = str(uuid.uuid4()) # New task ID logger.info("Generating new task ID: %s", task_id) result = code.format(confirmation=self.codes.get( intent, {}).get('confirmation', ''), user_id=self.user_id, task_id=task_id, **self.annotated_intents) # logger.info("New node: %s", result) # Save task id in the stack (in the format {task_id: last question asked}) unique = self.codes.get(intent, {}).get('unique') if unique is not None and unique: del_list = [] for idx, task in enumerate(self.stack): t = list(task.values())[0] if t and "action_name" in t and t[ "action_name"] == intent: del_list.append(idx) for idx in del_list[::-1]: del self.stack[idx] self.stack.insert(0, {task_id: None}) # Append this (initialised) task to the bot_attributes #new_task = {task_id: {}} #self.tasks.append(new_task) if not result: #for task in self.tasks: for task in self.stack: found = False for k, v in list(task.items()): logger.debug("TASK ID %s, STATUS %s", k, v.get('status')) if v.get('status') and v.get('status', '').startswith('waiting'): # text = (text + " " + prev_sys_response) if prev_sys_response is not None else text # logger.debug(f"Previous System Response: {prev_sys_response}") # logger.debug(f"Concat text: {text}") logger.info("STATUS: %s", v.get('status')) logger.info("TASK ID: %s", k) task_id = k # Get the status from the previous turn (since input text is simple string) status = v.get('status', '').split('-')[-1] logger.info("STATUS: %s", status) task_intent = (intent if (intent and intent.startswith('task')) else v.get('action_name')) logger.debug("INTENT %s, TASK_INTENT %s", intent, task_intent) result, task_status, task_params = self.status_handler( v, task_intent, task_id=task_id, return_value=v.get('params'), param=self._code_part(text, 'params.place_frame') if self._code_part( text, 'params.place_frame') else "", status=status, text=text) logger.info("Task Outcome: %s", result) if result: values = { 'status': task_status, 'params': task_params, 'action_name': task_intent, } #self.update_task(task_id, values) self.update_stack(task_id, values) found = True if found: break else: # First get the correct task using the provided id status = self._code_part(text, 'status') logger.info("STATUS %s", status) task_id = self._code_part(text, 'task_id') try: #task = [list(x.values())[0] for x in self.tasks if task_id in list(x.keys())][0] task = [ list(x.values())[0] for x in self.stack if task_id in list(x.keys()) ][0] except IndexError: task = {} task = task if task is not None else {} # logger.debug("+++++++ %s", state.get('last_state.state.nlu.annotations.intents.intent')) task_intent = intent if (intent and intent.startswith('task') ) else task.get('action_name') logger.debug("INTENT %s, TASK_INTENT %s", intent, task_intent) result, task_status, task_params = self.status_handler( task, task_intent, self._code_part(text, 'return_value'), param=self._code_part(text, 'params.place_frame'), status=status, text=text, task_id=task_id) if result: values = { 'status': task_status, 'params': task_params, 'action_name': task_intent, } #self.update_task(task_id, values) self.update_stack(task_id, values) while True: try: result = json.loads(result) except: logger.debug("Output %s was a String", result) break print("RESULT: ", result, type(result)) self.response.bot_params = { 'task_stack': self.stack #, # 'tasks': self.tasks } return result
def _code_part(self, input, key): if isinstance(input, dict): input = DictQuery(input) return input.get(key)
def status_handler(self, task, intent, return_value, status, param, text=None, task_id=None): logger.debug("Intent %s", intent) logger.debug("Param %s", param) logger.debug(">>>> status %s", status) node = self.codes.get(intent).get('status') node = DictQuery(DictQuery(node).get(status)) logger.debug("NODE: %s TYPE %s", node, type(node)) # logger.debug(node.get('return_tts.text')) logger.debug("return_value %s - %s", return_value, type(return_value)) logger.debug("Task ID: %s", task_id) result = None params = None new_status = None logger.debug("Status: {} | task_status: {}".format( status, task.get('status'))) if not task.get('status') or status in ( "succeded", "preempted", "failed" ): # If I am not waiting for anything from the user from last turn or supervisor sent a new (overriding) status logger.debug("Found a new status") result = random.choice(node.get('return_tts.text')).format( task_id=task_id, value=eval( node.get('return_tts.value', '').format( return_value=return_value, options=node.get('return_tts.options')))) if node.get( 'return_tts.value') else ( random.choice(node.get('return_tts.text')) if isinstance(node.get('return_tts.text'), list) else node.get('return_tts.text')) if 'return_cmd' in node: # and 'execute' not in status: new_status = 'waiting-for-' + status #try: self.update_stack( task_id=task_id, values={"prev_response": result.split(".")[-1]}) #TASK_STACK[task_id] = result # Also add the last question asked for this task_id to the stack #except: # pass params = return_value elif task.get('status') == 'waiting-for-' + status: logger.debug("Found waiting for status") for k, p in self.compile_resolution_patterns(node.get('resolve'), value=return_value, frame=return_value): logger.debug(f"Searching for pattern: {p.search(text)}") if p.search(text) is not None: result = node.get('return_cmd', '').format( task_id=task_id, result=json.dumps(k) if (k is not None and not isinstance(k, str)) else k if k is not None else p.search(text).group(0), confirmation=random.choice( node.get('confirmation', 'null')), intent=intent, param=param) break # If task completed succesfully - remove it from the stack if status == 'succeeded' or status == 'failed': #self.update_task(task_id=task_id, delete=True) self.update_stack(task_id=task_id, delete=True) return result, new_status, params
def _get_answer(self, request_data): nlu_data = DictQuery( request_data.get('current_state.state.nlu.annotations')) # Reset the locking self.response.lock_requested = False question = nlu_data.get('processed_text', '') intent = nlu_data.get('intents', {}).get('intent') intent_param = nlu_data.get('intents', {}).get('param') topic = nlu_data.get('topics') postags = nlu_data.get('postag') nes_in_q = bool( list(nlu_data.get('entity_linking', {}).keys()) or list(y for x in nlu_data.get('ner', {}).values() for y in x)) self.response.bot_params = request_data.get( 'current_state.state.bot_states', {}).get(BOT_NAME, {}).get('bot_attributes', {}) self.user_attributes = request_data.get('user_attributes', {}) turn = int(request_data.get('current_state.state.turn_no')) last_bot = request_data.get('current_state.state.last_bot') last_state = request_data.get('history') last_state = DictQuery(last_state[-1]) if last_state else None last_response = next( iter(last_state.get('state.response', { '': None }).values())) if last_state else None last_intent = last_state.get( 'state.nlu.annotations.intents.intent') if last_state else None asr_hyps = request_data.get('current_state.state.input.hypotheses', []) self.session_id = request_data.get('current_state.session_id') # Check if a rapport driver was used in the last turn if last_bot == BOT_NAME and self.response.bot_params.get( 'driver_type') == 'RAPPORT': self.response.bot_params['rapport_last_turn'] = turn - 1 # Check whether the asr confidence is sufficient enough to process the utterance # but do not do it 2 times in a row if is_low_confidence_asr( asr_hyps) and last_response not in ASR_RESPONSES: self.response.result = random.choice(ASR_RESPONSES) self.response.lock_requested = True else: if not self.response.result: # try intro (will return nothing on later turns) self.response.result = self.get_intro_response( question, turn, intent, intent_param, last_bot, last_intent, topic, postags) # try handling builtin intents if not self.response.result: self.response.result = self.handle_intents( intent, intent_param, question, turn, topic, last_bot, last_response, last_intent, postags, nes_in_q) if self.response.result: # if we have intro/intent response, we should lock self.response.lock_requested = False else: # always offer a driver self.response.result = self.get_driver(question, turn, last_bot, topic, intent, intent_param) result = self.response.toJSON() result['user_attributes'] = self.user_attributes logger.debug("Bot Params: {}".format(self.response.bot_params)) return [result]
def get_answer( self, event=None, session_id='CLI-%s' % time.strftime('%Y-%m-%d--%H-%M-%S'), asr_hypotheses=None, text=None, timestamp=time.strftime('%Y-%m-%dT%H:%M:%SZ'), user_id='dummy-user', debug_info_requested=False, ): """ The main exposed function to the lambda function. Can either get as input an ASK event of individual attributes (e.g. if lambda function extract these or for CLI """ # Create the state object from input + db if isinstance(event, dict): event = DictQuery(event) self.state_manager.prepare_state(event) else: self.state_manager.prepare_state_from_external( session_id=session_id, text=text, user_id=user_id, timestamp=timestamp, hypotheses=asr_hypotheses, hostname=HOSTNAME, hub_version=VERSION) logger.info("\n\n") logger.info("--------- Current turn ----------") logger.info("Session ID: %s Timestamp %s Host %s Version %s" % (session_id, timestamp, HOSTNAME, VERSION)) logger.info("System emotional state: {}".format( self.emotional_model.em_state)) logger.info("Input text: %s" % text) if asr_hypotheses: logger.debug("Avg ASR token confidence score: %g" % (sum(asr_hypotheses[0]['token_conf']) / len(asr_hypotheses[0]['token_conf']))) logger.debug("Min ASR token confidence score: %g" % min(asr_hypotheses[0]['token_conf'])) logger.info("Amazon ASR confidence score: %g" % asr_hypotheses[0]["confidence"]) logger.debug("ASR hypotheses:\n" + "\n".join([ "-- [%.3f]: " % hyp['confidence'] + " ".join([ "%s %.3f" % (tok, conf) for tok, conf in zip(hyp['tokens'], hyp['token_conf']) ]) for hyp in asr_hypotheses ])) else: logger.info("No ASR information in chat-based mode.") # Forward data to the NLU pipeline (https://github.com/WattSocialBot/mercury-nlu/blob/master/notes.md) history = self.state_manager.get_history( self.state_manager.current_state.get('session_id')) user_attributes = self.state_manager.get_user_attributes( self.state_manager.current_state.get('user_id')) logger.debug("User attributes: %s" % str(user_attributes)) # Run RegexClassifier on each item on the n-best list if asr_hypotheses: for candidate, score in self.state_manager.hypotheses_list: nlu_data = DictQuery( self.nlu_wrapper.annotate(candidate, modules=[ 'Preprocessor', 'RegexIntents', 'PersonaRegexTopicClassifier' ])) if nlu_data and nlu_data.get('annotations.intents.intent') and \ not nlu_data.get('annotations.intents.intent') == 'stop': # self.state_manager.hypotheses_list[0][1] - score < 0.2: # Just to notify us if an alternative was found if self.state_manager.hypotheses_list.index( (candidate, score)) > 0: logger.debug("Alternative intent found: {}".format( nlu_data.get('annotations.intents.intent'))) self.state_manager.text = candidate break nlu_data = { "state": { "utterance": self.state_manager.text, "context": { "current_state": self.state_manager.current_state } } } if history: nlu_data["state"]["context"]["history"] = history if user_attributes: nlu_data["state"]["context"]["user_attributes"] = user_attributes nlu_annotations = call_module(self.config.SERVICES["nlu"], nlu_data, self.config.BOT_TIMEOUT, module_name='NLU') # we can only continue if NLU worked if nlu_annotations is not None: logger.debug("Executed NLU modules: %s", ",".join(nlu_annotations["modules"])) # Update state with NLU annotations self.state_manager.add_annotations_to_state(nlu_annotations) logger.info("Linker: {}".format( format_linker_info(nlu_annotations["annotations"].get( 'entity_linking', {})))) logger.info("Topic: {}".format( nlu_annotations['annotations'].get('topics'))) logger.info("Intents: {}".format( nlu_annotations['annotations'].get('intents'))) logger.info("Multi-turn intents: {}".format( nlu_annotations['annotations'].get('multi_turn_intents'))) logger.info("NPs: {}".format( nlu_annotations['annotations'].get('nps'))) logger.info("Bot NER: {} / user NER: {}".format( nlu_annotations['annotations'].get('bot_ner'), nlu_annotations['annotations'].get('ner'))) logger.info("Processed text: {}".format( nlu_annotations['annotations'].get('processed_text', ''))) # logger.info("--------------------------------") # Forward state to bot ensemble bucket = self.call_bots(self.state_manager.current_state, history=history, user_attributes=user_attributes) # NLU fail -- can't call bots, just go to empty bucket else: bucket = None # Send populated bucket to the selection strategy module to pick a response # Since now coherence_bot is being called on each turn, but only written to the db if used, if no other # bot has a response the coherence_bot will if not bucket: # if the bucket is empty generates a message for the user response = Response({ 'result': random.choice(no_response_templates), 'bot_name': 'empty_bucket', 'user_attributes': {}, 'bot_params': {}, 'lock_requested': False }) else: logger.debug("---------------- Bucket -----------------------") logger.info('Bots in bucket: %s' % ', '.join([x.bot_name for x in bucket])) logger.debug("Bucket: \n{}".format("\n".join( map( lambda x: "-- [{}]{}: {}".format( x.bot_name, "[L]" if x.lock_requested else "", x.result), bucket)))) # Filter bucket for profanity, length, etc. bucket = self.bucket_filter.filter(bucket, history, nlu_annotations) logger.info('Bots in filtered bucket: %s' % ', '.join([x.bot_name for x in bucket])) # logger.debug("Filtered bucket: \n{}".format( # "\n".join(map(lambda x: "[{}]: {}".format(x.bot_name, x.result), bucket))) # ) # Send populated bucket to the selection strategy module to pick a response response = self.ranker.select_response( bucket, self.state_manager.current_state, history) logger.debug("----------------------------------------------") # adding drivers if needed response.result, coherence_attributes, coherence_lock = self.postprocessor.fix_drivers( response.result, bucket) # Update state with info from selected bot self.state_manager.update_state_with_response( result=response.result, bot_name=response.bot_name, lock_requested=response.lock_requested, bot_state=response.bot_params, user_attr=response.user_attributes) # replacing user name in the response response.result = self.postprocessor.replace_username( response.result, response.user_attributes.get('user_name', self.state_manager.user_name)) ################### EMOTIONAL STUFF ################### # get the emotional output tag and shift if present # TODO: (IDEA) Proccess multiple tags in the same sentence response.result, tts_emotion, emotion_shift = self.postprocessor.emotion_postprocess( response.result) if tts_emotion: response.emotion = tts_emotion logger.debug("Response is said as: %s" % tts_emotion) if emotion_shift: self.emotional_model.adjust_emotion(emotion_shift) self.state_manager.system_emotion = self.emotional_model.check_state( ) logger.debug("Emotion shifted by: {}. System emotion is {}".format( emotion_shift, self.state_manager.system_emotion)) # updating coherence attributes if a driver was added if coherence_attributes: self.state_manager.bot_states['coherence_bot'] = { 'lock_requested': coherence_lock, 'bot_attributes': coherence_attributes } # self.state_manager.current_state['state']['bot_states']['coherence_bot'] = coherence_attributes self.state_manager.set_response_edits( {'driver_added': coherence_attributes.get('driver', '')}) self.state_manager.save_current_state() # if requested, add NLU annotations + list of bots in bucket to the returned value if debug_info_requested: response.debug_info = self._prepare_debug_info( nlu_annotations, bucket) logger.info("Selected response: [{}]: {}".format( response.bot_name, response.result)) # logger.debug(json.dumps(self.state_manager.current_state, indent=4)) logger.debug(response.toJSON()) return response.toJSON()