def __init__(self): try: self.mongodb = get_mongodb() except Exception as ex: self.mongodb = MongoDB() logger.error(ex) self.run_id = rospy.get_param('/run_id', '') rospy.Subscriber('/blender_api/set_emotion_state', EmotionState, self.log_emotion) rospy.Subscriber('/blender_api/set_gesture', SetGesture, self.log_gesture) rospy.Subscriber('face_recognizer/faces', Faces, self.log_faces)
def __init__(self): self.client = Client() self.executor = TTSExecutor() self.emo_enabled = False self.emotion_params = {} self.tts_params = {} self.voices = rospy.get_param('voices', {}) try: self.mongodb = get_mongodb() except Exception as ex: logger.error(ex) self.mongodb = MongoDB() self.run_id = rospy.get_param('/run_id', '') self.service = rospy.Service('tts_length', TTSLength, self.tts_length) tts_topic = rospy.get_param('tts_topic', 'chatbot_responses') rospy.Subscriber(tts_topic, TTS, self.say)
def __init__(self): self.botname = rospy.get_param('botname', 'sophia') self.client = Client(HR_CHATBOT_AUTHKEY, self.botname, response_listener=self, stdout=Console()) self.client.chatbot_url = rospy.get_param('chatbot_url', 'http://localhost:8001') # chatbot now saves a bit of simple state to handle sentiment analysis # after formulating a response it saves it in a buffer if S.A. active # It has a simple state transition - initialized in wait_client # after getting client if S.A. active go to wait_emo # in affect_express call back publish response and reset to wait_client self._response_buffer = '' self._state = 'wait_client' # argumment must be to activate sentiment analysis self._sentiment_active = False # sentiment dictionary self.polarity = Polarity() self._polarity_threshold = 0.2 self.speech = False self.enable = True self.mute = False self.insert_behavior = False self._locker = Locker() try: self.mongodb = get_mongodb() except Exception as ex: self.mongodb = MongoDB() self.node_name = rospy.get_name() self.output_dir = os.path.join( HR_CHATBOT_REQUEST_DIR, dt.datetime.strftime(dt.datetime.utcnow(), '%Y%m%d')) if not os.path.isdir(self.output_dir): os.makedirs(self.output_dir) self.requests_fname = os.path.join(self.output_dir, '{}.csv'.format(str(uuid.uuid1()))) self.input_stack = [] self.timer = None self.delay_response = rospy.get_param('delay_response', False) self.recover = False self.delay_time = rospy.get_param('delay_time', 5) self.run_id = rospy.get_param('/run_id', '') self.client.set_run_id(self.run_id) logger.info("Set run_id %s", self.run_id) rospy.Subscriber('chatbot_speech', ChatMessage, self._request_callback) rospy.Subscriber('speech_events', String, self._speech_event_callback) # robot starts to speak rospy.Subscriber('chat_events', String, self._chat_event_callback) # user starts to speak rospy.Subscriber('audio_sensors', audiodata, self._audio_sensors_callback) self.tts_ctrl_pub = rospy.Publisher('tts_control', String, queue_size=1) self._response_publisher = rospy.Publisher('chatbot_responses', TTS, queue_size=1) # send communication non-verbal blink message to behavior self._blink_publisher = rospy.Publisher('chatbot_blink', String, queue_size=1) # Perceived emotional content; and emotion to express # Perceived: based on what chatbot heard, this is how robot should # feel. Expressed: the emotional content that the chatbot should # put into what it says. self._affect_publisher = rospy.Publisher('chatbot_affect_perceive', String, queue_size=1) # Echo chat messages as plain strings. self._echo_publisher = rospy.Publisher('perceived_text', String, queue_size=1) rospy.Subscriber('chatbot_speech', ChatMessage, self._echo_callback) rospy.set_param('node_status/chatbot', 'running') self.btree_publisher = rospy.Publisher('/behavior_switch', String, queue_size=1) self._gesture_publisher = rospy.Publisher('/blender_api/set_gesture', SetGesture, queue_size=1) self._look_at_publisher = rospy.Publisher( '/blender_api/set_face_target', Target, queue_size=1) # r2_perception self._perception_assign_publisher = rospy.Publisher( 'perception/api/assign', Assign, queue_size=1) self._perception_forget_publisher = rospy.Publisher( 'perception/api/forget', Forget, queue_size=1) self._perception_forget_all_publisher = rospy.Publisher( 'perception/api/forget_all', ForgetAll, queue_size=1) self._perception_state_subscriber = rospy.Subscriber( 'perception/state', State, self._perception_state_callback) self.perception_users = {} self.face_cache = [] self.main_face = None self.faces = {} # faceid(session) -> face self.current_user = None
import logging import traceback import uuid from config import HISTORY_DIR, TEST_HISTORY_DIR, SESSION_REMOVE_TIMEOUT from response_cache import ResponseCache from collections import defaultdict from chatbot.server.character import TYPE_AIML from chatbot.db import get_mongodb, MongoDB logger = logging.getLogger('hr.chatbot.server.session') report_logger = logging.getLogger('hr.chatbot.server.session.report') try: mongodb = get_mongodb() except ImportError as ex: mongodb = MongoDB() logger.error(ex) ROBOT_NAME = os.environ.get('NAME', 'default') class SessionContext(dict): def __init__(self): self.context = defaultdict(dict) def __setitem__(self, key, item): self.__dict__[key] = item def __getitem__(self, key): return self.__dict__[key]
class MongoLogger(object): def __init__(self): try: self.mongodb = get_mongodb() except Exception as ex: self.mongodb = MongoDB() logger.error(ex) self.run_id = rospy.get_param('/run_id', '') rospy.Subscriber('/blender_api/set_emotion_state', EmotionState, self.log_emotion) rospy.Subscriber('/blender_api/set_gesture', SetGesture, self.log_gesture) rospy.Subscriber('face_recognizer/faces', Faces, self.log_faces) def _log(self, collection, record): try: result = collection.insert_one(record) logger.info("Added record to mongodb") except Exception as ex: self.mongodb.client = None logger.error(traceback.format_exc()) logger.warn("Deactivate mongodb") def log_emotion(self, msg): record = { 'Datetime': dt.datetime.utcnow(), 'RunID': self.run_id, 'Name': msg.name, 'Magnitude': msg.magnitude, 'Duration': msg.duration.nsecs, } if self.mongodb.client is not None: collection = self.mongodb.client[self.mongodb.dbname][ROBOT_NAME]['blender']['emotion_state'] self._log(collection, record) def log_gesture(self, msg): record = { 'Datetime': dt.datetime.utcnow(), 'RunID': self.run_id, 'Name': msg.name, 'Repeat': msg.repeat, 'Speed': msg.speed, 'Magnitude': msg.magnitude, } if self.mongodb.client is not None: collection = self.mongodb.client[self.mongodb.dbname][ROBOT_NAME]['blender']['gesture'] self._log(collection, record) def log_faces(self, msg): time = dt.datetime.utcnow() for face in msg.faces: record = { 'Datetime': time, 'RunID': self.run_id, 'FaceID': face.faceid, 'Left': face.left, 'Top': face.top, 'Right': face.right, 'Bottom': face.bottom, 'Confidence': face.confidence, } if self.mongodb.client is not None: collection = self.mongodb.client[self.mongodb.dbname][ROBOT_NAME]['face_recognizer']['faces'] sharecollection = self.mongodb.get_share_collection() self._log(collection, record) self._log(sharecollection, {'node': 'face_recognizer', 'msg': record})