def nod(self):
		ges = SetGesture()
		ges.name = 'nod-2'
		ges.magnitude = 1.0
		ges.repeat = False
		ges.speed = 1.0
		self.gesture_pub.publish(ges)
		print "Just published gesture: ", ges.name
	def gesture(self, name, intensity, repeat, speed):
		# Create the message
		ges = SetGesture()
		ges.name = name
		ges.magnitude = intensity
		ges.repeat = repeat
		ges.speed = speed
		self.gesture_pub.publish(ges)
		print "Published gesture: ", ges.name
Beispiel #3
0
	def show_gesture(self, gesture, intensity, repeat, speed):
		ges = SetGesture()
		ges.name = gesture
		ges.magnitude = intensity
		ges.repeat = repeat
		ges.speed = speed
		self.gesture_pub.publish(ges)

		print "----- Show gesture: " + gesture + " (" + str(intensity)[:5] + ")"
	def gesture(self, name, intensity, repeat, speed):
		if 'noop' == name or (not self.control_mode & self.C_GESTURE):
			return
		# Create the message
		ges = SetGesture()
		ges.name = name
		ges.magnitude = intensity
		ges.repeat = repeat
		ges.speed = speed
		self.gesture_pub.publish(ges)
		print "Published gesture: ", ges.name
Beispiel #5
0
    def _request_callback(self, chat_message):
        if not self.enable:
            logger.info("Chatbot is disabled")
            return
        if self.speech:
            logger.info("In speech, ignore the question")
            return
        if 'shut up' in chat_message.utterance.lower():
            logger.info("Robot's talking wants to be interruptted")
            self.tts_ctrl_pub.publish("shutup")
            rospy.sleep(0.5)
            self._affect_publisher.publish(String('sad'))
            if not self.mute:
                self._response_publisher.publish(
                    TTS(text='Okay', lang=chat_message.lang))
            return

        # Handle chatbot command
        cmd, arg, line = self.client.parseline(chat_message.utterance)
        func = None
        try:
            if cmd is not None:
                func = getattr(self.client, 'do_' + cmd)
        except AttributeError as ex:
            pass
        if func:
            try:
                func(arg)
            except Exception as ex:
                logger.error("Executing command {} error {}".format(func, ex))
            return

        chat_message.utterance = self.handle_control(chat_message.utterance)

        # blink that we heard something, request, probability defined in
        # callback
        self._blink_publisher.publish('chat_heard')

        if self.delay_response:
            logger.info("Add input: {}".format(chat_message.utterance))
            self.input_stack.append((time.clock(), chat_message))
            self._gesture_publisher.publish(SetGesture('nod-2', 0, 1, 1))
            self._gesture_publisher.publish(
                SetGesture('blink-relaxed', 0, 1, 1))
            self.reset_timer()
        else:
            self.ask([chat_message])
Beispiel #6
0
def record(device, serial_port_data_file, pau_data_file, gesture, duration):
    raw_data_file = os.path.join(CWD, 'serial_port_data.raw')
    recorder = SerialPortRecorder(device, raw_data_file)

    rospy.init_node("record")
    rospy.wait_for_service('/blender_api/get_param')
    rospy.wait_for_service('/blender_api/set_param')
    blender_get_param = rospy.ServiceProxy('/blender_api/get_param', GetParam)
    blender_set_param = rospy.ServiceProxy('/blender_api/set_param', SetParam)

    blender_set_param("bpy.context.scene['commandListenerActive']", "False")
    time.sleep(2)
    os.system('cat < {}'.format(device))

    blender_set_param("bpy.context.scene['commandListenerActive']", "True")
    recorder.start()
    print "Start recording"

    # publish animation message
    queue = MessageQueue()
    queue.subscribe('/blender_api/get_pau', pau)
    pub = rospy.Publisher('/blender_api/set_gesture', SetGesture, latch=True)
    pub.publish(SetGesture(gesture, 1, 1, 1))
    timestamps = OrderedDict()
    timestamps['publish_set_gesture_msg'] = time.time()

    current_gestures = None
    while current_gestures is None:
        current_gestures = eval(blender_get_param("self.getGestures()").value)
        time.sleep(0.01)
    timestamps['blender_play_gestures'] = time.time()

    motor_msg = rospy.wait_for_message('/sophia/safe/head/command', MotorCommand)
    timestamps['motor_command_arrive'] = time.time()

    time.sleep(duration) # wait for animation to finish
    blender_set_param("bpy.context.scene['commandListenerActive']", "False")
    recorder.stop()
    print "Stop recording"

    pau_messages = queue.tolist()
    shapekeys = eval(blender_get_param("self.getFaceData()").value)
    pau_df = pd.DataFrame(columns = shapekeys.keys())
    for pau_message in pau_messages:
        pau_df.loc[len(pau_df)] = pau_message.m_coeffs
    pau_df.to_csv(pau_data_file, index=False)
    print "Write pau message to %s" % pau_data_file

    timestamps['first_serial_port_data'] = recorder.start_time
    timestamps['last_serial_port_data'] = recorder.stop_time
    for k, v in timestamps.items():
        print k, datetime.fromtimestamp(v)

    parse_raw_data(raw_data_file, serial_port_data_file)
    os.remove(raw_data_file)
def play(gesture, speed=1, magnitude=1, repeat=1):
    msg = SetGesture()
    msg.speed = speed
    msg.magnitude = magnitude
    msg.repeat = repeat
    msg.name = gesture
    gestures_pub.publish(msg)
Beispiel #8
0
    def blink(self):
        msg = SetGesture()
        msg.name = random.choice(self.blink_animations)
        msg.speed = 1.0
        msg.magnitude = 1.0
        msg.repeat = 0

        self.gesture_pub.publish(msg)
Beispiel #9
0
 def sendGesture(self, gesture):
     msg = SetGesture()
     args = gesture['name'].split(',', 2)
     logger.info(args)
     msg.speed = 1
     msg.magnitude = 1
     if len(args) >= 1:
         msg.name = str(args[0])
     if len(args) >= 2:
         msg.speed = float(args[1])
     if len(args) >= 3:
         msg.magnitude = float(args[2])
     logger.info("Send gesture {}".format(msg))
     self.gesture_topic.publish(msg)
Beispiel #10
0
 def gesture(self, name, intensity, repeat, speed):
     if 'noop' == name or (not self.control_mode & self.C_GESTURE):
         return
     # Create the message
     ges = SetGesture()
     ges.name = name
     ges.magnitude = intensity
     ges.repeat = repeat
     ges.speed = speed
     self.gesture_pub.publish(ges)
     print "Published gesture: ", ges.name
Beispiel #11
0
	def show_gesture(self, gesture, intensity, repeat, speed, trigger):
		ges = SetGesture()
		ges.name = gesture
		ges.magnitude = intensity
		ges.repeat = repeat
		ges.speed = speed
		if (self.do_pub_gestures) :
			self.gesture_pub.publish(ges)
			self.write_log(ges.name, time.time(), trigger)

		print "----- Show gesture: " + gesture + " (" + str(intensity)[:5] + ")"
Beispiel #12
0
    def gesture(self, name, speed, magnitude, repeat):
        """ Set a pose on the robot's face

        :param str name: the id of the gesture
        :param float speed:
        :param float magnitude: the magnitude of the gesture from 0.0 to 1.0
        :param int32 repeat: number of times to repeat
        :return: None
        """

        msg = SetGesture()
        msg.name = name
        msg.speed = speed
        msg.magnitude = magnitude
        msg.repeat = repeat

        self.gesture_pub.publish(msg)
        rospy.logdebug("published gesture(name={}, speed={}, magnitude={}, repeat={})".format(name, speed, magnitude,
                                                                                              repeat))
Beispiel #13
0
 def start(self, run_time):
     self.runner.topics['gesture'].publish(
         SetGesture(self.data['gesture'], 1, float(self.data['speed']),
                    self._magnitude(self.data['magnitude'])))
Beispiel #14
0
    def HandleTimer(self,data):

        # this is the heart of the synthesizer, here the lookat and eyecontact state machines take care of where the robot is looking, and random expressions and gestures are triggered to look more alive (like RealSense Tracker)

        ts = data.current_expected

        # ==== handle lookat
        if self.lookat == LookAt.IDLE:
            # no specific target, let Blender do it's soma cycle thing
            ()

        elif self.lookat == LookAt.AVOID:
            # TODO: find out where there is no saliency, hand or face
            # TODO: head_focus_pub
            ()

        elif self.lookat == LookAt.SALIENCY:
            self.saliency_counter -= 1
            if self.saliency_counter == 0:
                self.InitSaliencyCounter()
                self.SelectNextSaliency()
            if self.current_saliency_ts != 0:
                cursaliency = self.saliencies[self.current_saliency_ts]
                self.UpdateGaze(cursaliency.direction)

        elif self.lookat == LookAt.HAND:
            # stare at hand
            if self.hand != None:
                self.UpdateGaze(self.hand.position)

        elif self.lookat == LookAt.AUDIENCE:
            self.audience_counter -= 1
            if self.audience_counter == 0:
                self.InitAudienceCounter()
                self.SelectNextAudience()
                # TODO: self.UpdateGaze()

        elif self.lookat == LookAt.SPEAKER:
            ()
            # TODO: look at the speaker, according to speaker ROI

        else:
            if self.lookat == LookAt.ALL_FACES:
                self.faces_counter -= 1
                if self.faces_counter == 0:
                    self.InitFacesCounter()
                    self.SelectNextFace()

            # take the current face
            if self.current_face_id != 0:
                curface = self.faces[self.current_face_id]
                face_pos = curface.position

                # ==== handle eyecontact (only for LookAt.ONE_FACE and LookAt.ALL_FACES)

                # calculate where left eye, right eye and mouth are on the current face
                left_eye_pos = Float32XYZ()
                right_eye_pos = Float32XYZ()
                mouth_pos = Float32XYZ()

                # all are 5cm in front of the center of the face
                left_eye_pos.x = face_pos.x - 0.05
                right_eye_pos.x = face_pos.x - 0.05
                mouth_pos.x = face_pos.x - 0.05

                left_eye_pos.y = face_pos.y + 0.03  # left eye is 3cm to the left of the center
                right_eye_pos.y = face_pos.y - 0.03  # right eye is 3cm to the right of the center
                mouth_pos.y = face_pos.y  # mouth is dead center

                left_eye_pos.z = face_pos.z + 0.06  # left eye is 6cm above the center
                right_eye_pos.z = face_pos.z + 0.06  # right eye is 6cm above the center
                mouth_pos.z = face_pos.z - 0.04  # mouth is 4cm below the center

                if self.eyecontact == EyeContact.IDLE:
                    # look at center of the head
                    self.UpdateGaze(face_pos)

                elif self.eyecontact == EyeContact.LEFT_EYE:
                    # look at left eye
                    self.UpdateGaze(left_eye_pos)

                elif self.eyecontact == EyeContact.RIGHT_EYE:
                    # look at right eye
                    self.UpdateGaze(right_eye_pos)

                elif self.eyecontact == EyeContact.BOTH_EYES:
                    # switch between eyes back and forth
                    self.eyes_counter -= 1
                    if self.eyes_counter == 0:
                        self.InitEyesCounter()
                        if self.current_eye == 1:
                            self.current_eye = 0
                        else:
                            self.current_eye = 1
                    # look at that eye
                    if self.current_eye == 0:
                        cur_eye_pos = left_eye_pos
                    else:
                        cur_eye_pos = right_eye_pos
                    self.UpdateGaze(cur_eye_pos)

                elif self.eyecontact == EyeContact.TRIANGLE:
                    # cycle between eyes and mouth
                    self.eyes_counter -= 1
                    if self.eyes_counter == 0:
                        self.InitEyesCounter()
                        if self.current_eye == 2:
                            self.current_eye = 0
                        else:
                            self.current_eye += 1
                    # look at that eye
                    if self.current_eye == 0:
                        cur_eye_pos = left_eye_pos
                    elif self.current_eye == 1:
                        cur_eye_pos = right_eye_pos
                    elif self.current_eye == 2:
                        cur_eye_pos = mouth_pos
                    self.UpdateGaze(cur_eye_pos)

                # mirroring
                msg = pau()
                msg.m_coeffs = [ ]
                msg.m_shapekeys = [ ]

                if self.mirroring == Mirroring.EYEBROWS or self.mirroring == Mirroring.EYES or self.mirroring == Mirroring.MOUTH_EYEBROWS or self.mirroring == Mirroring.ALL:
                    # mirror eyebrows
                    left_brow = curface.left_brow
                    right_brow = curface.right_brow
                    msg.m_coeffs.append("brow_outer_UP.L")
                    msg.m_shapekeys.append(left_brow)
                    msg.m_coeffs.append("brow_inner_UP.L")
                    msg.m_shapekeys.append(left_brow * 0.8)
                    msg.m_coeffs.append("brow_outer_DN.L")
                    msg.m_shapekeys.append(1.0 - left_brow)
                    msg.m_coeffs.append("brow_outer_up.R")
                    msg.m_shapekeys.append(right_brow)
                    msg.m_coeffs.append("brow_inner_UP.R")
                    msg.m_shapekeys.append(right_brow * 0.8)
                    msg.m_coeffs.append("brow_outer_DN.R")
                    msg.m_shapekeys.append(1.0 - right_brow)

                if self.mirroring == Mirroring.EYELIDS or self.mirroring == Mirroring.EYES or self.mirroring == Mirroring.MOUTH_EYELIDS or self.mirroring == Mirroring.ALL:
                    # mirror eyelids
                    eyes_closed = ((1.0 - curface.left_eyelid) + (1.0 - curface.right_eyelid)) / 2.0
                    msg.m_coeffs.append("eye-blink.UP.R")
                    msg.m_shapekeys.append(eyes_closed)
                    msg.m_coeffs.append("eye-blink.UP.L")
                    msg.m_shapekeys.append(eyes_closed)
                    msg.m_coeffs.append("eye-blink.LO.R")
                    msg.m_shapekeys.append(eyes_closed)
                    msg.m_coeffs.append("eye-blink.LO.L")
                    msg.m_shapekeys.append(eyes_closed)

                if self.mirroring == Mirroring.MOUTH or self.mirroring == Mirroring.MOUTH_EYEBROWS or self.mirroring == Mirroring.MOUTH_EYELIDS:
                    # mirror mouth
                    mouth_open = curface.mouth_open
                    msg.m_coeffs.append("lip-JAW.DN")
                    msg.m_shapekeys.append(mouth_open)

                if self.mirroring != Mirroring.IDLE:
                    self.StartPauMode()
                    self.setpau_pub.publish(msg)


        # start random gestures
        self.gesture_counter -= 1
        if self.gesture_counter == 0:
            self.InitGestureCounter()

            if self.animations != None:

                # list all gestures that would fire right now according to probability
                firing = []
                for g in self.animations[self.current_gestures_name]:
                    if random.uniform(0.0,1.0) <= g["probability"]:
                        firing.append(g)

                # start randomly from that list
                if len(firing) > 0:
                    g = firing[random.randint(0,len(firing) - 1)]
                    msg = SetGesture()
                    msg.name = g["name"]
                    msg.repeat = False
                    msg.speed = random.uniform(g["speed_min"],g["speed_max"])
                    msg.magnitude = random.uniform(g["magnitude_min"],g["magnitude_max"])
                    self.gestures_pub.publish(msg)

        # start random expressions
        self.expression_counter -= 1
        if self.expression_counter == 0:
            self.InitExpressionCounter()

            if self.animations != None:

                # list all expressions that would fire right now according to probability
                firing = []
                for g in self.animations[self.current_expressions_name]:
                    if random.uniform(0.0,1.0) <= g["probability"]:
                        firing.append(g)

                # start randomly from that list
                if len(firing) > 0:
                    g = firing[random.randint(0,len(firing) - 1)]
                    msg = EmotionState()
                    msg.name = g["name"]
                    msg.magnitude = random.uniform(g["magnitude_min"],g["magnitude_max"])
                    msg.duration = rospy.Duration(random.uniform(g["duration_min"],g["duration_max"]))
                    self.expressions_pub.publish(msg)

        prune_before_time = ts - rospy.Duration.from_sec(self.keep_time)

        # flush faces dictionary, update current face accordingly
        to_be_removed = []
        for face in self.faces.values():
            if face.ts < prune_before_time:
                to_be_removed.append(face.cface_id)
        # remove the elements
        for key in to_be_removed:
            del self.faces[key]
            # make sure the selected face is always valid
            if self.current_face_id == key:
                self.SelectNextFace()
                
        # remove hand if it is too old
        if self.hand != None:
            if self.hand.ts < prune_before_time:
                self.hand = None

        # flush saliency dictionary
        to_be_removed = []
        for key in self.saliencies.keys():
            if key < prune_before_time:
                to_be_removed.append(key)
        # remove the elements
        for key in to_be_removed:
            del self.saliencies[key]
            # make sure the selected saliency is always valid
            if self.current_saliency_ts == key:
                self.SelectNextSaliency()

        # decay from FOCUSED to IDLE if hand was not seen for a while
        if self.state == State.FOCUSED and self.last_hand_ts < ts - rospy.Duration.from_sec(self.hand_state_decay):
            self.SetState(State.IDLE)
            self.UpdateStateDisplay()

        # decay from SPEAKING or LISTENING to IDLE
        if ((self.state == State.SPEAKING) or (self.state == State.LISTENING)) and self.last_talk_ts < ts - rospy.Duration.from_sec(self.face_state_decay):
            self.SetState(State.IDLE)
            self.UpdateStateDisplay()

        # have gaze or head follow head or gaze after a while
        if self.gaze_delay_counter > 0 and self.gaze_pos != None:

            self.gaze_delay_counter -= 1
            if self.gaze_delay_counter == 0:

                if self.gaze == Gaze.GAZE_LEADS_HEAD:
                    self.SetHeadFocus(self.gaze_pos,self.gaze_speed)
                    self.gaze_delay_counter = int(self.gaze_delay * self.synthesizer_rate)

                elif self.gaze == Gaze.HEAD_LEADS_GAZE:
                    self.SetGazeFocus(self.gaze_pos,self.gaze_speed)
                    self.gaze_delay_counter = int(self.gaze_delay * self.synthesizer_rate)


        # when speaking, sometimes look at all faces
        if self.state == State.SPEAKING:

            if self.lookat == LookAt.AVOID:

                self.all_faces_start_counter -= 1
                if self.all_faces_start_counter == 0:
                    self.InitAllFacesStartCounter()
                    self.SetLookAt(LookAt.ALL_FACES)
                    self.UpdateStateDisplay()

            elif self.lookat == LookAt.ALL_FACES:

                self.all_faces_duration_counter -= 1
                if self.all_faces_duration_counter == 0:
                    self.InitAllFacesDurationCounter()
                    self.SetLookAt(LookAt.AVOID)
                    self.UpdateStateDisplay()