def show_emotion(self, expression, intensity, duration, trigger): # Try to avoid showing more than one expression at once now = time.time() since = self.blackboard["show_expression_since"] durat = self.blackboard["current_emotion_duration"] if since is not None and (now - since < 0.7 * durat) : return # Update the blackboard self.blackboard["current_emotion"] = expression self.blackboard["current_emotion_intensity"] = intensity self.blackboard["current_emotion_duration"] = duration # Create the message exp = EmotionState() exp.name = self.blackboard["current_emotion"] exp.magnitude = self.blackboard["current_emotion_intensity"] intsecs = int(duration) exp.duration.secs = intsecs exp.duration.nsecs = 1000000000 * (duration - intsecs) # emotion_pub goes to blender and tts; if (self.do_pub_emotions) : self.emotion_pub.publish(exp) self.write_log(exp.name, time.time(), trigger) print "----- Show expression: " + expression + " (" + str(intensity)[:5] + ") for " + str(duration)[:4] + " seconds" self.blackboard["show_expression_since"] = time.time()
def sad(self): # Create the message exp = EmotionState() exp.name = 'sad' exp.magnitude = 1.0 exp.duration.secs = 5 exp.duration.nsecs = 250*1000000 self.emotion_pub.publish(exp) print "Just published: ", exp.name
def expression(self, name, intensity, duration): # Create the message exp = EmotionState() exp.name = name exp.magnitude = intensity exp.duration.secs = int(duration) exp.duration.nsecs = 1000000000 * (duration - int(duration)) self.emotion_pub.publish(exp) print "Publish expression: ", exp.name
def expression(self, name, intensity, duration): if 'noop' == name or (not self.control_mode & self.C_EXPRESSION): return # Create the message exp = EmotionState() exp.name = name exp.magnitude = intensity exp.duration.secs = int(duration) exp.duration.nsecs = 1000000000 * (duration - int(duration)) self.expression_pub.publish(exp) print "Publish facial expression:", exp.name
def expression(self, name, intensity, duration): if 'noop' == name or (not self.control_mode & self.C_EXPRESSION): return # Create the message exp = EmotionState() exp.name = name exp.magnitude = intensity exp.duration.secs = int(duration) exp.duration.nsecs = 1000000000 * (duration - int(duration)) self.emotion_pub.publish(exp) print "Publish expression:", exp.name
def expression(self, name, intensity, duration): if 'noop' == name: return # Create the message exp = EmotionState() exp.name = name exp.magnitude = intensity exp.duration.secs = int(duration) exp.duration.nsecs = 1000000000 * (duration - int(duration)) self.emotion_pub.publish(exp) print "Publish expression:", exp.name
def update_head_position(self, event): rospy.logdebug(' '.join([ '%s: %.3f' % (EMOTIONS[n], STATES[n]) for n in xrange(len(EMOTIONS)) ])) for i, emo in enumerate(EMOTIONS): expression = EmotionState() expression.name = EXPRESSIONS[i] expression.magnitude = STATES[i] if expression.magnitude > 0.005: self.expressions_pub.publish(expression)
def update_robot_emotions_cb(self, event): with self.states_lock: rospy.logdebug(' '.join([ '%s: %.3f' % (Mirroring.PERSON_EMOTIONS[n], self.states[n]) for n in xrange(len(Mirroring.PERSON_EMOTIONS)) ])) for i, emo in enumerate(Mirroring.PERSON_EMOTIONS): expression = EmotionState() expression.name = Mirroring.ROBOT_EMOTIONS[i] expression.magnitude = self.states[i] if expression.magnitude > 0.005: self.emotion_pub.publish(expression)
def sendEmotion(self, emotion): msg = EmotionState() args = emotion['name'].split(',', 2) logger.info(args) msg.magnitude = 1 msg.duration.secs = 1 if len(args) >= 1: msg.name = str(args[0]) if len(args) >= 2: msg.magnitude = float(args[1]) if len(args) >= 3: msg.duration.secs = float(args[2]) logger.info("Send emotion {}".format(msg)) self.emotion_topic.publish(msg)
def show_emotion(self, expression, intensity, duration): # Update the blackboard self.blackboard["current_emotion"] = expression self.blackboard["current_emotion_intensity"] = intensity self.blackboard["current_emotion_duration"] = duration # Create the message exp = EmotionState() exp.name = self.blackboard["current_emotion"] exp.magnitude = self.blackboard["current_emotion_intensity"] intsecs = int(duration) exp.duration.secs = intsecs exp.duration.nsecs = 1000000000 * (duration - intsecs) self.emotion_pub.publish(exp) print "----- Show expression: " + expression + " (" + str(intensity)[:5] + ") for " + str(duration)[:4] + " seconds" self.blackboard["show_expression_since"] = time.time()
def emote(self, name, magnitude, duration, blend): """ Set the robot's emotional state :param str name: the id of the emotion :param float magnitude: the magnitude of the emotion from 0.0 to 1.0 :param float duration: the time in seconds that the emotion lasts for :param bool blend: blend the emotion with other emotions that also have blend=True. If an emotion is sent with blend=False, then it will overwrite all previously sent and active blendable emotions. :return: None """ msg = EmotionState() msg.name = name msg.magnitude = magnitude msg.duration.secs = int(duration) msg.duration.nsecs = 1000000000 * (duration - int(duration)) if blend: self.emotion_value_pub.publish(msg) else: self.emotion_state_pub.publish(msg) rospy.logdebug("published emote(name={}, magnitude={}, duration={})".format(name, magnitude, duration))
def chatbot_affect_perceive_callback(self, emo): rospy.loginfo('chatbot perceived emo class ='+emo.data) # for now pass through to blender using random positive or non_positive class # in future we want more cognitive / behavior # pick random emotions may not do anything depending on random number so add force optional arg force=True if emo.data == 'happy': chosen_emo=self.pick_random_expression("positive_emotions",force) else: chosen_emo=self.pick_random_expression("frustrated_emotions",force) # publish this message to cause chatbot to emit response if it's waiting # exp = EmotionState() # getting from blackboard seems to be inconsistent with expected state exp.name = self.blackboard["current_emotion"] exp.magnitude = 0.5 # use zero for duration, tts can compute if needed exp.duration.secs = 3.0 exp.duration.nsecs = 0 self.affect_pub.publish(exp) rospy.loginfo('picked and expressed '+chosen_emo.name)
def start(self, run_time): self.runner.topics['emotion'].publish( EmotionState(self.data['emotion'], self._magnitude(self.data['magnitude']), rospy.Duration.from_sec(self.data['duration'])))
def HandleTimer(self,data): # this is the heart of the synthesizer, here the lookat and eyecontact state machines take care of where the robot is looking, and random expressions and gestures are triggered to look more alive (like RealSense Tracker) ts = data.current_expected # ==== handle lookat if self.lookat == LookAt.IDLE: # no specific target, let Blender do it's soma cycle thing () elif self.lookat == LookAt.AVOID: # TODO: find out where there is no saliency, hand or face # TODO: head_focus_pub () elif self.lookat == LookAt.SALIENCY: self.saliency_counter -= 1 if self.saliency_counter == 0: self.InitSaliencyCounter() self.SelectNextSaliency() if self.current_saliency_ts != 0: cursaliency = self.saliencies[self.current_saliency_ts] self.UpdateGaze(cursaliency.direction) elif self.lookat == LookAt.HAND: # stare at hand if self.hand != None: self.UpdateGaze(self.hand.position) elif self.lookat == LookAt.AUDIENCE: self.audience_counter -= 1 if self.audience_counter == 0: self.InitAudienceCounter() self.SelectNextAudience() # TODO: self.UpdateGaze() elif self.lookat == LookAt.SPEAKER: () # TODO: look at the speaker, according to speaker ROI else: if self.lookat == LookAt.ALL_FACES: self.faces_counter -= 1 if self.faces_counter == 0: self.InitFacesCounter() self.SelectNextFace() # take the current face if self.current_face_id != 0: curface = self.faces[self.current_face_id] face_pos = curface.position # ==== handle eyecontact (only for LookAt.ONE_FACE and LookAt.ALL_FACES) # calculate where left eye, right eye and mouth are on the current face left_eye_pos = Float32XYZ() right_eye_pos = Float32XYZ() mouth_pos = Float32XYZ() # all are 5cm in front of the center of the face left_eye_pos.x = face_pos.x - 0.05 right_eye_pos.x = face_pos.x - 0.05 mouth_pos.x = face_pos.x - 0.05 left_eye_pos.y = face_pos.y + 0.03 # left eye is 3cm to the left of the center right_eye_pos.y = face_pos.y - 0.03 # right eye is 3cm to the right of the center mouth_pos.y = face_pos.y # mouth is dead center left_eye_pos.z = face_pos.z + 0.06 # left eye is 6cm above the center right_eye_pos.z = face_pos.z + 0.06 # right eye is 6cm above the center mouth_pos.z = face_pos.z - 0.04 # mouth is 4cm below the center if self.eyecontact == EyeContact.IDLE: # look at center of the head self.UpdateGaze(face_pos) elif self.eyecontact == EyeContact.LEFT_EYE: # look at left eye self.UpdateGaze(left_eye_pos) elif self.eyecontact == EyeContact.RIGHT_EYE: # look at right eye self.UpdateGaze(right_eye_pos) elif self.eyecontact == EyeContact.BOTH_EYES: # switch between eyes back and forth self.eyes_counter -= 1 if self.eyes_counter == 0: self.InitEyesCounter() if self.current_eye == 1: self.current_eye = 0 else: self.current_eye = 1 # look at that eye if self.current_eye == 0: cur_eye_pos = left_eye_pos else: cur_eye_pos = right_eye_pos self.UpdateGaze(cur_eye_pos) elif self.eyecontact == EyeContact.TRIANGLE: # cycle between eyes and mouth self.eyes_counter -= 1 if self.eyes_counter == 0: self.InitEyesCounter() if self.current_eye == 2: self.current_eye = 0 else: self.current_eye += 1 # look at that eye if self.current_eye == 0: cur_eye_pos = left_eye_pos elif self.current_eye == 1: cur_eye_pos = right_eye_pos elif self.current_eye == 2: cur_eye_pos = mouth_pos self.UpdateGaze(cur_eye_pos) # mirroring msg = pau() msg.m_coeffs = [ ] msg.m_shapekeys = [ ] if self.mirroring == Mirroring.EYEBROWS or self.mirroring == Mirroring.EYES or self.mirroring == Mirroring.MOUTH_EYEBROWS or self.mirroring == Mirroring.ALL: # mirror eyebrows left_brow = curface.left_brow right_brow = curface.right_brow msg.m_coeffs.append("brow_outer_UP.L") msg.m_shapekeys.append(left_brow) msg.m_coeffs.append("brow_inner_UP.L") msg.m_shapekeys.append(left_brow * 0.8) msg.m_coeffs.append("brow_outer_DN.L") msg.m_shapekeys.append(1.0 - left_brow) msg.m_coeffs.append("brow_outer_up.R") msg.m_shapekeys.append(right_brow) msg.m_coeffs.append("brow_inner_UP.R") msg.m_shapekeys.append(right_brow * 0.8) msg.m_coeffs.append("brow_outer_DN.R") msg.m_shapekeys.append(1.0 - right_brow) if self.mirroring == Mirroring.EYELIDS or self.mirroring == Mirroring.EYES or self.mirroring == Mirroring.MOUTH_EYELIDS or self.mirroring == Mirroring.ALL: # mirror eyelids eyes_closed = ((1.0 - curface.left_eyelid) + (1.0 - curface.right_eyelid)) / 2.0 msg.m_coeffs.append("eye-blink.UP.R") msg.m_shapekeys.append(eyes_closed) msg.m_coeffs.append("eye-blink.UP.L") msg.m_shapekeys.append(eyes_closed) msg.m_coeffs.append("eye-blink.LO.R") msg.m_shapekeys.append(eyes_closed) msg.m_coeffs.append("eye-blink.LO.L") msg.m_shapekeys.append(eyes_closed) if self.mirroring == Mirroring.MOUTH or self.mirroring == Mirroring.MOUTH_EYEBROWS or self.mirroring == Mirroring.MOUTH_EYELIDS: # mirror mouth mouth_open = curface.mouth_open msg.m_coeffs.append("lip-JAW.DN") msg.m_shapekeys.append(mouth_open) if self.mirroring != Mirroring.IDLE: self.StartPauMode() self.setpau_pub.publish(msg) # start random gestures self.gesture_counter -= 1 if self.gesture_counter == 0: self.InitGestureCounter() if self.animations != None: # list all gestures that would fire right now according to probability firing = [] for g in self.animations[self.current_gestures_name]: if random.uniform(0.0,1.0) <= g["probability"]: firing.append(g) # start randomly from that list if len(firing) > 0: g = firing[random.randint(0,len(firing) - 1)] msg = SetGesture() msg.name = g["name"] msg.repeat = False msg.speed = random.uniform(g["speed_min"],g["speed_max"]) msg.magnitude = random.uniform(g["magnitude_min"],g["magnitude_max"]) self.gestures_pub.publish(msg) # start random expressions self.expression_counter -= 1 if self.expression_counter == 0: self.InitExpressionCounter() if self.animations != None: # list all expressions that would fire right now according to probability firing = [] for g in self.animations[self.current_expressions_name]: if random.uniform(0.0,1.0) <= g["probability"]: firing.append(g) # start randomly from that list if len(firing) > 0: g = firing[random.randint(0,len(firing) - 1)] msg = EmotionState() msg.name = g["name"] msg.magnitude = random.uniform(g["magnitude_min"],g["magnitude_max"]) msg.duration = rospy.Duration(random.uniform(g["duration_min"],g["duration_max"])) self.expressions_pub.publish(msg) prune_before_time = ts - rospy.Duration.from_sec(self.keep_time) # flush faces dictionary, update current face accordingly to_be_removed = [] for face in self.faces.values(): if face.ts < prune_before_time: to_be_removed.append(face.cface_id) # remove the elements for key in to_be_removed: del self.faces[key] # make sure the selected face is always valid if self.current_face_id == key: self.SelectNextFace() # remove hand if it is too old if self.hand != None: if self.hand.ts < prune_before_time: self.hand = None # flush saliency dictionary to_be_removed = [] for key in self.saliencies.keys(): if key < prune_before_time: to_be_removed.append(key) # remove the elements for key in to_be_removed: del self.saliencies[key] # make sure the selected saliency is always valid if self.current_saliency_ts == key: self.SelectNextSaliency() # decay from FOCUSED to IDLE if hand was not seen for a while if self.state == State.FOCUSED and self.last_hand_ts < ts - rospy.Duration.from_sec(self.hand_state_decay): self.SetState(State.IDLE) self.UpdateStateDisplay() # decay from SPEAKING or LISTENING to IDLE if ((self.state == State.SPEAKING) or (self.state == State.LISTENING)) and self.last_talk_ts < ts - rospy.Duration.from_sec(self.face_state_decay): self.SetState(State.IDLE) self.UpdateStateDisplay() # have gaze or head follow head or gaze after a while if self.gaze_delay_counter > 0 and self.gaze_pos != None: self.gaze_delay_counter -= 1 if self.gaze_delay_counter == 0: if self.gaze == Gaze.GAZE_LEADS_HEAD: self.SetHeadFocus(self.gaze_pos,self.gaze_speed) self.gaze_delay_counter = int(self.gaze_delay * self.synthesizer_rate) elif self.gaze == Gaze.HEAD_LEADS_GAZE: self.SetGazeFocus(self.gaze_pos,self.gaze_speed) self.gaze_delay_counter = int(self.gaze_delay * self.synthesizer_rate) # when speaking, sometimes look at all faces if self.state == State.SPEAKING: if self.lookat == LookAt.AVOID: self.all_faces_start_counter -= 1 if self.all_faces_start_counter == 0: self.InitAllFacesStartCounter() self.SetLookAt(LookAt.ALL_FACES) self.UpdateStateDisplay() elif self.lookat == LookAt.ALL_FACES: self.all_faces_duration_counter -= 1 if self.all_faces_duration_counter == 0: self.InitAllFacesDurationCounter() self.SetLookAt(LookAt.AVOID) self.UpdateStateDisplay()
def emotion(emo, magnitude=1, duration=1): msg = EmotionState() msg.name = emo msg.magnitude = magnitude msg.duration = rospy.Duration.from_sec(duration) emotions_pub.publish(msg)