def callback(self, gaze_msg, voice_msg, person_msg): nodes_to_update = [] gaze_attention_point = None voice_attention_point = None reactive_attention_point = None min_person_id = None self.human_distances = {} self.human_speaking = [] self.human_speakingto = {} self.human_lookat = {} self.human_perceived = [] self.human_close = [] self.human_near = [] # PERSON if self.is_active: min_dist = 10000 if len(person_msg.data) > 0: for i, person in enumerate(person_msg.data): if person.head_distance < min_dist: min_dist = person.head_distance min_person_id = person.person_id for i, person in enumerate(person_msg.data): if min_person_id == person.person_id: person.person_id = 0 human_id = person.person_id if human_id not in self.nb_gaze_detected: self.nb_gaze_detected[human_id] = 0 else: self.nb_gaze_detected[human_id] += 1 self.reco_id_table["human-"+str(person.person_id)] = ["human-"+str(person_id) for person_id in person.alternate_ids] for person_id in person.alternate_ids: if person_id not in self.inv_reco_id_table: self.inv_reco_id_table[person_id] = [] if "human-"+str(person.person_id) not in self.inv_reco_id_table[person_id]: self.inv_reco_id_table[person_id].append("human-"+str(person.person_id)) if person.is_identified > MIN_NB_DETECTION: try: new_node = self.create_human_pov(human_id) if human_id in self.human_cameras_ids: new_node.id = self.human_cameras_ids[human_id] t = [person.head_pose.position.x, person.head_pose.position.y, person.head_pose.position.z] q = [person.head_pose.orientation.x, person.head_pose.orientation.y, person.head_pose.orientation.z, person.head_pose.orientation.w] dist = math.sqrt(t[0] * t[0] + t[1] * t[1] + t[2] * t[2]) msg = self.tf_buffer.lookup_transform(self.reference_frame, person_msg.header.frame_id, rospy.Time()) trans = [msg.transform.translation.x, msg.transform.translation.y, msg.transform.translation.z] rot = [msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z, msg.transform.rotation.w] offset = euler_matrix(0, math.radians(90), math.radians(90), "rxyz") transform = numpy.dot(transformation_matrix(trans, rot), transformation_matrix(t, q)) new_node.transformation = numpy.dot(transform, offset) self.publish_perception_frame(person, person_msg.header) if person.person_id == 0: gaze_attention_point = PointStamped() gaze_attention_point.header.frame_id = "human_head_gaze-"+str(person.person_id) gaze_attention_point.header.stamp = rospy.Time.now() gaze_attention_point.point = Point(0, 0, 0) self.human_distances["human-"+str(human_id)] = dist self.human_cameras_ids[human_id] = new_node.id nodes_to_update.append(new_node) self.human_perceived.append("human-"+str(human_id)) if human_id not in self.human_bodies: self.human_bodies[human_id] = {} if "face" not in self.human_bodies[human_id]: new_node = Mesh(name="human_face-"+str(human_id)) new_node.properties["mesh_ids"] = self.human_meshes["face"] new_node.properties["aabb"] = self.human_aabb["face"] new_node.parent = self.human_cameras_ids[human_id] offset = euler_matrix(math.radians(90), math.radians(0), math.radians(90), 'rxyz') new_node.transformation = numpy.dot(new_node.transformation, offset) self.human_bodies[human_id]["face"] = new_node.id nodes_to_update.append(new_node) else: node_id = self.human_bodies[human_id]["face"] try: if self.target.scene.nodes[node_id].type == ENTITY: new_node = Mesh(name="human_face-" + str(human_id)) new_node.properties["mesh_ids"] = self.human_meshes["face"] new_node.properties["aabb"] = self.human_aabb["face"] new_node.parent = self.human_cameras_ids[human_id] offset = euler_matrix(math.radians(90), math.radians(0), math.radians(90), 'rxyz') new_node.transformation = numpy.dot(new_node.transformation, offset) new_node.id = node_id nodes_to_update.append(new_node) except Exception as e: pass except (tf2_ros.TransformException, tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: rospy.logwarn("[multimodal_human_monitor] Exception occurred : %s" % str(e)) # VOICE min_dist = 10000 for j, voice in enumerate(voice_msg.data): if min_person_id is not None: if voice.person_id == min_person_id: voice.person_id = 0 if voice.person_id in self.human_cameras_ids: if voice.is_speaking: try: self.human_speaking.append("human-" + str(voice.person_id)) if "human-" + str(voice.person_id) in self.human_distances: if self.human_distances["human-" + str(voice.person_id)] < min_dist: min_dist = self.human_distances["human-" + str(voice.person_id)] voice_attention_point = PointStamped() voice_attention_point.header.frame_id = "human_head_gaze-" + str(voice.person_id) voice_attention_point.point = Point(0, 0, 0) except: pass #GAZE for j, gaze in enumerate(gaze_msg.data): if min_person_id is not None: if gaze.person_id == min_person_id: gaze.person_id = 0 if gaze.person_id in self.human_cameras_ids: if gaze.probability_looking_at_robot > LOOK_AT_THRESHOLD: self.human_lookat["human-" + str(gaze.person_id)] = "robot" else: if gaze.probability_looking_at_screen > LOOK_AT_THRESHOLD: self.human_lookat["human-" + str(gaze.person_id)] = "robot" else: for attention in gaze.attentions: if attention.target_id in self.human_cameras_ids: if attention.probability_looking_at_target > LOOK_AT_THRESHOLD: self.human_lookat["human-"+str(gaze.person_id)] = "human-"+str(attention.target_id) for human, dist in self.human_distances.items(): if dist < NEAR_MAX_DISTANCE: if dist < CLOSE_MAX_DISTANCE: self.human_close.append(human) else: self.human_near.append(human) # if min_person_id is not None: # # update # if "human-"+str(min_person_id) in self.human_close: # self.human_close.append("human-0") # self.human_close.remove("human-" + str(min_person_id)) # # if "human-"+str(min_person_id) in self.human_perceived: # self.human_perceived.append("human-0") # self.human_perceived.remove("human-" + str(min_person_id)) # # if "human-"+str(min_person_id) in self.human_near: # self.human_near.append("human-0") # self.human_near.remove("human-"+str(min_person_id)) # # if "human-"+str(min_person_id) in self.human_speaking: # self.human_speaking.append("human-0") # self.human_speaking.remove("human-" + str(min_person_id)) # # if "human-" + str(min_person_id) in self.human_lookat: # self.human_lookat["human-0"] = self.human_lookat["human-" + str(min_person_id)] # del self.human_lookat["human-" + str(min_person_id)] # # for node in nodes_to_update: # if node.name == "human-"+str(min_person_id) or node.name == "human_face-"+str(min_person_id): # node.name = "human-0" # self.human_cameras_ids["human-0"] = node.id # # for i, person in enumerate(person_msg.data): # if person.person_id == min_person_id: # person.person_id = 0 # self.publish_perception_frame(person, person_msg.header) # computing speaking to for human in self.human_speaking: if human in self.human_lookat: self.human_speakingto[human] = self.human_lookat[human] if nodes_to_update: self.target.scene.nodes.update(nodes_to_update) self.compute_situations() priority = MONITORING_DEFAULT_PRIORITY if gaze_attention_point: sit_regex = "^isLookingAt\(%s" % gaze_attention_point.header.frame_id for sit in self.current_situations_map.values(): if re.match(sit_regex, sit.desc): if sit.endtime == 0: #if time.time() - sit.starttime > JOINT_ATTENTION_MIN_DURATION: _,_,obj = self.parse_situation_desc(sit.desc) if obj != "screen" and obj != "robot": gaze_attention_point.header.frame_id = obj gaze_attention_point.point = Point() priority = JOINT_ATTENTION_PRIORITY self.ros_pub["gaze"].publish(gaze_attention_point) reactive_attention_point = gaze_attention_point if voice_attention_point: self.ros_pub["voice"].publish(voice_attention_point) reactive_attention_point = voice_attention_point priority = VOICE_ATTENTION_PRIORITY if reactive_attention_point: #rospy.logwarn("TEST") self.ros_pub["reactive"].publish(reactive_attention_point) if reactive_attention_point: target_with_priority = TargetWithPriority() target_with_priority.target = reactive_attention_point target_with_priority.priority = priority self.ros_pub["monitoring_attention_point"].publish(target_with_priority) self.previous_human_perceived = self.human_perceived self.previous_human_speaking = self.human_speaking self.previous_human_speakingto = self.human_speakingto self.previous_human_lookat = self.human_lookat self.previous_human_close = self.human_close self.previous_human_near = self.human_near
def callbackGaze(self, msg): nodes_to_update = [] if msg.data: for i, gaze in enumerate(msg.data): human_id = gaze.person_id track_id = gaze.track_id if human_id not in self.nb_gaze_detected: self.nb_gaze_detected[human_id] = 0 else: self.nb_gaze_detected[human_id] += 1 if track_id == human_id: self.detection_time = time.time() self.record_time = True else: if self.record_time: self.reco_durations.append(time.time() - self.detection_time) self.record_time = False if gaze.head_gaze_available and self.nb_gaze_detected[human_id] > MIN_NB_DETECTION: new_node = self.create_human_pov(human_id) if human_id in self.human_cameras_ids: new_node.id = self.human_cameras_ids[human_id] else: self.human_cameras_ids[human_id] = new_node.id t = [gaze.head_gaze.position.x, gaze.head_gaze.position.y, gaze.head_gaze.position.z] q = [gaze.head_gaze.orientation.x, gaze.head_gaze.orientation.y, gaze.head_gaze.orientation.z, gaze.head_gaze.orientation.w] if math.sqrt(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]) < MIN_DIST_DETECTION: continue (trans, rot) = self.lookupTransform(self.reference_frame, msg.header.frame_id, rospy.Time(0)) offset = euler_matrix(0, math.radians(90), math.radians(90), 'rxyz') transform = numpy.dot(transformation_matrix(trans, rot), transformation_matrix(t, q)) new_node.transformation = numpy.dot(transform, offset) if translation_from_matrix(new_node.transformation)[2] > MAX_HEIGHT: continue self.added_human_id.append(human_id) nodes_to_update.append(new_node) if human_id not in self.human_bodies: self.human_bodies[human_id] = {} if "face" not in self.human_bodies[human_id]: new_node = Mesh(name="human_face-"+str(human_id)) new_node.properties["mesh_ids"] = self.human_meshes["face"] new_node.properties["aabb"] = self.human_aabb["face"] new_node.parent = self.human_cameras_ids[human_id] offset = euler_matrix(math.radians(90), math.radians(0), math.radians(90), 'rxyz') new_node.transformation = numpy.dot(new_node.transformation, offset) self.human_bodies[human_id]["face"] = new_node.id nodes_to_update.append(new_node) #if gaze.probability_looking_at_robot >= LOOK_AT_THRESHOLD: # self.target.timeline.start(Situation(desc="lookat(human-%s,%s)" % (str(gaze.person_id), self.robot_name))) if nodes_to_update: self.target.scene.nodes.update(nodes_to_update)