def create(radius, diffuse=(1, 1, 1, 1)): """ Creates a sphere of given radius. Origin at the center of the sphere. """ v = [[c * radius for c in vertex] for vertex in Sphere.vertices] mesh = Mesh(v, Sphere.faces, Sphere.normals, diffuse) mesh.aabb = ((2 * radius, 2 * radius, 2 * radius), (-2 * radius, -2 * radius, -2 * radius)) return mesh
def create(radius, diffuse = (1,1,1,1)): """ Creates a sphere of given radius. Origin at the center of the sphere. """ v = [[c * radius for c in vertex] for vertex in Sphere.vertices] mesh = Mesh(v, Sphere.faces, Sphere.normals, diffuse) mesh.aabb = ((2*radius, 2*radius, 2*radius), (-2*radius, -2*radius, -2*radius)) return mesh
def create(sizex, sizey, sizez, diffuse = (1,1,1,1)): """ Creates a box, centered around the origin. To create a cube, set sizex=sizey=sizez. """ sizex = float(sizex)/2 sizey = float(sizey)/2 sizez = float(sizez)/2 v = [[vertex[0] * sizex, vertex[1] * sizey, vertex[2] * sizez] for vertex in Box.vertices] mesh = Mesh(v, Box.faces, Box.normals, diffuse) mesh.aabb = ((sizex, sizey, sizez), (-sizex, -sizey, -sizez)) return mesh
def create(sizex, sizey, sizez, diffuse=(1, 1, 1, 1)): """ Creates a box, centered around the origin. To create a cube, set sizex=sizey=sizez. """ sizex = float(sizex) / 2 sizey = float(sizey) / 2 sizez = float(sizez) / 2 v = [[vertex[0] * sizex, vertex[1] * sizey, vertex[2] * sizez] for vertex in Box.vertices] mesh = Mesh(v, Box.faces, Box.normals, diffuse) mesh.aabb = ((sizex, sizey, sizez), (-sizex, -sizey, -sizez)) return mesh
def create_object(self, frame): if frame in self.ar_objects: node = Mesh(name=self.ar_objects[frame]["name"]) try: nodes_loaded = ModelLoader().load( self.mesh_dir + self.ar_objects[frame]["mesh"], self.ctx, world=self.output_world_name, root=None, only_meshes=True, scale=self.ar_objects[frame]["scale"]) for n in nodes_loaded: if n.type == MESH: node.properties["mesh_ids"] = n.properties["mesh_ids"] node.properties["aabb"] = n.properties["aabb"] return node except Exception as e: rospy.logwarn( "[env_provider] Exception occurred with %s : %s" % (self.ar_objects[frame]["name"], str(e))) return None
def mesh(self, id): mesh = self.rpc.getMesh( gRPC.MeshInContext(client=gRPC.Client(id=self.id), mesh=gRPC.Mesh(id=id)), _TIMEOUT_SECONDS_MESH_LOADING) return Mesh.deserialize(mesh)
def callback(self, gaze_msg, voice_msg, person_msg): nodes_to_update = [] gaze_attention_point = None voice_attention_point = None reactive_attention_point = None min_person_id = None self.human_distances = {} self.human_speaking = [] self.human_speakingto = {} self.human_lookat = {} self.human_perceived = [] self.human_close = [] self.human_near = [] # PERSON if self.is_active: min_dist = 10000 if len(person_msg.data) > 0: for i, person in enumerate(person_msg.data): if person.head_distance < min_dist: min_dist = person.head_distance min_person_id = person.person_id for i, person in enumerate(person_msg.data): if min_person_id == person.person_id: person.person_id = 0 human_id = person.person_id if human_id not in self.nb_gaze_detected: self.nb_gaze_detected[human_id] = 0 else: self.nb_gaze_detected[human_id] += 1 self.reco_id_table["human-"+str(person.person_id)] = ["human-"+str(person_id) for person_id in person.alternate_ids] for person_id in person.alternate_ids: if person_id not in self.inv_reco_id_table: self.inv_reco_id_table[person_id] = [] if "human-"+str(person.person_id) not in self.inv_reco_id_table[person_id]: self.inv_reco_id_table[person_id].append("human-"+str(person.person_id)) if person.is_identified > MIN_NB_DETECTION: try: new_node = self.create_human_pov(human_id) if human_id in self.human_cameras_ids: new_node.id = self.human_cameras_ids[human_id] t = [person.head_pose.position.x, person.head_pose.position.y, person.head_pose.position.z] q = [person.head_pose.orientation.x, person.head_pose.orientation.y, person.head_pose.orientation.z, person.head_pose.orientation.w] dist = math.sqrt(t[0] * t[0] + t[1] * t[1] + t[2] * t[2]) msg = self.tf_buffer.lookup_transform(self.reference_frame, person_msg.header.frame_id, rospy.Time()) trans = [msg.transform.translation.x, msg.transform.translation.y, msg.transform.translation.z] rot = [msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z, msg.transform.rotation.w] offset = euler_matrix(0, math.radians(90), math.radians(90), "rxyz") transform = numpy.dot(transformation_matrix(trans, rot), transformation_matrix(t, q)) new_node.transformation = numpy.dot(transform, offset) self.publish_perception_frame(person, person_msg.header) if person.person_id == 0: gaze_attention_point = PointStamped() gaze_attention_point.header.frame_id = "human_head_gaze-"+str(person.person_id) gaze_attention_point.header.stamp = rospy.Time.now() gaze_attention_point.point = Point(0, 0, 0) self.human_distances["human-"+str(human_id)] = dist self.human_cameras_ids[human_id] = new_node.id nodes_to_update.append(new_node) self.human_perceived.append("human-"+str(human_id)) if human_id not in self.human_bodies: self.human_bodies[human_id] = {} if "face" not in self.human_bodies[human_id]: new_node = Mesh(name="human_face-"+str(human_id)) new_node.properties["mesh_ids"] = self.human_meshes["face"] new_node.properties["aabb"] = self.human_aabb["face"] new_node.parent = self.human_cameras_ids[human_id] offset = euler_matrix(math.radians(90), math.radians(0), math.radians(90), 'rxyz') new_node.transformation = numpy.dot(new_node.transformation, offset) self.human_bodies[human_id]["face"] = new_node.id nodes_to_update.append(new_node) else: node_id = self.human_bodies[human_id]["face"] try: if self.target.scene.nodes[node_id].type == ENTITY: new_node = Mesh(name="human_face-" + str(human_id)) new_node.properties["mesh_ids"] = self.human_meshes["face"] new_node.properties["aabb"] = self.human_aabb["face"] new_node.parent = self.human_cameras_ids[human_id] offset = euler_matrix(math.radians(90), math.radians(0), math.radians(90), 'rxyz') new_node.transformation = numpy.dot(new_node.transformation, offset) new_node.id = node_id nodes_to_update.append(new_node) except Exception as e: pass except (tf2_ros.TransformException, tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: rospy.logwarn("[multimodal_human_monitor] Exception occurred : %s" % str(e)) # VOICE min_dist = 10000 for j, voice in enumerate(voice_msg.data): if min_person_id is not None: if voice.person_id == min_person_id: voice.person_id = 0 if voice.person_id in self.human_cameras_ids: if voice.is_speaking: try: self.human_speaking.append("human-" + str(voice.person_id)) if "human-" + str(voice.person_id) in self.human_distances: if self.human_distances["human-" + str(voice.person_id)] < min_dist: min_dist = self.human_distances["human-" + str(voice.person_id)] voice_attention_point = PointStamped() voice_attention_point.header.frame_id = "human_head_gaze-" + str(voice.person_id) voice_attention_point.point = Point(0, 0, 0) except: pass #GAZE for j, gaze in enumerate(gaze_msg.data): if min_person_id is not None: if gaze.person_id == min_person_id: gaze.person_id = 0 if gaze.person_id in self.human_cameras_ids: if gaze.probability_looking_at_robot > LOOK_AT_THRESHOLD: self.human_lookat["human-" + str(gaze.person_id)] = "robot" else: if gaze.probability_looking_at_screen > LOOK_AT_THRESHOLD: self.human_lookat["human-" + str(gaze.person_id)] = "robot" else: for attention in gaze.attentions: if attention.target_id in self.human_cameras_ids: if attention.probability_looking_at_target > LOOK_AT_THRESHOLD: self.human_lookat["human-"+str(gaze.person_id)] = "human-"+str(attention.target_id) for human, dist in self.human_distances.items(): if dist < NEAR_MAX_DISTANCE: if dist < CLOSE_MAX_DISTANCE: self.human_close.append(human) else: self.human_near.append(human) # if min_person_id is not None: # # update # if "human-"+str(min_person_id) in self.human_close: # self.human_close.append("human-0") # self.human_close.remove("human-" + str(min_person_id)) # # if "human-"+str(min_person_id) in self.human_perceived: # self.human_perceived.append("human-0") # self.human_perceived.remove("human-" + str(min_person_id)) # # if "human-"+str(min_person_id) in self.human_near: # self.human_near.append("human-0") # self.human_near.remove("human-"+str(min_person_id)) # # if "human-"+str(min_person_id) in self.human_speaking: # self.human_speaking.append("human-0") # self.human_speaking.remove("human-" + str(min_person_id)) # # if "human-" + str(min_person_id) in self.human_lookat: # self.human_lookat["human-0"] = self.human_lookat["human-" + str(min_person_id)] # del self.human_lookat["human-" + str(min_person_id)] # # for node in nodes_to_update: # if node.name == "human-"+str(min_person_id) or node.name == "human_face-"+str(min_person_id): # node.name = "human-0" # self.human_cameras_ids["human-0"] = node.id # # for i, person in enumerate(person_msg.data): # if person.person_id == min_person_id: # person.person_id = 0 # self.publish_perception_frame(person, person_msg.header) # computing speaking to for human in self.human_speaking: if human in self.human_lookat: self.human_speakingto[human] = self.human_lookat[human] if nodes_to_update: self.target.scene.nodes.update(nodes_to_update) self.compute_situations() priority = MONITORING_DEFAULT_PRIORITY if gaze_attention_point: sit_regex = "^isLookingAt\(%s" % gaze_attention_point.header.frame_id for sit in self.current_situations_map.values(): if re.match(sit_regex, sit.desc): if sit.endtime == 0: #if time.time() - sit.starttime > JOINT_ATTENTION_MIN_DURATION: _,_,obj = self.parse_situation_desc(sit.desc) if obj != "screen" and obj != "robot": gaze_attention_point.header.frame_id = obj gaze_attention_point.point = Point() priority = JOINT_ATTENTION_PRIORITY self.ros_pub["gaze"].publish(gaze_attention_point) reactive_attention_point = gaze_attention_point if voice_attention_point: self.ros_pub["voice"].publish(voice_attention_point) reactive_attention_point = voice_attention_point priority = VOICE_ATTENTION_PRIORITY if reactive_attention_point: #rospy.logwarn("TEST") self.ros_pub["reactive"].publish(reactive_attention_point) if reactive_attention_point: target_with_priority = TargetWithPriority() target_with_priority.target = reactive_attention_point target_with_priority.priority = priority self.ros_pub["monitoring_attention_point"].publish(target_with_priority) self.previous_human_perceived = self.human_perceived self.previous_human_speaking = self.human_speaking self.previous_human_speakingto = self.human_speakingto self.previous_human_lookat = self.human_lookat self.previous_human_close = self.human_close self.previous_human_near = self.human_near
def monitor_robot(self): """ This method read the frames of the robot if they exist in /tf and then update the poses/3D models of the robot in the output world @return : None """ try: nodes_to_update = [] node = Camera(name=self.robot_name) node.properties["clipplanenear"] = DEFAULT_CLIP_PLANE_NEAR node.properties["clipplanefar"] = DEFAULT_CLIP_PLANE_FAR node.properties["horizontalfov"] = math.radians(DEFAULT_HORIZONTAL_FOV) node.properties["aspect"] = DEFAULT_ASPECT msg = self.tfBuffer.lookup_transform(self.reference_frame, self.perspective_frame, rospy.Time(0)) trans = [msg.transform.translation.x, msg.transform.translation.y, msg.transform.translation.z] rot = [msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z, msg.transform.rotation.w] transform = transformation_matrix(trans, rot) node.transformation = numpy.dot(transform, self.cam_rot) if node.name in self.already_created_node_ids: node.id = self.already_created_node_ids[node.name] if not numpy.allclose(self.frames_transform[node.name], node.transformation, rtol=0, atol=EPSILON): self.frames_transform[node.name] = node.transformation nodes_to_update.append(node) else: self.already_created_node_ids[node.name] = node.id self.frames_transform[node.name] = node.transformation nodes_to_update.append(node) for frame in self.model_map: node = Mesh(name=frame) node.properties["mesh_ids"] = [mesh_id for mesh_id in self.model_map[frame]] node.properties["aabb"] = self.aabb_map[frame] msg = self.tfBuffer.lookup_transform(self.perspective_frame, frame, rospy.Time(0)) trans = [msg.transform.translation.x, msg.transform.translation.y, msg.transform.translation.z] rot = [msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z, msg.transform.rotation.w] node.transformation = transformation_matrix(trans, rot) node.parent = self.already_created_node_ids[self.robot_name] if node.name in self.already_created_node_ids: node.id = self.already_created_node_ids[frame] if not numpy.allclose(self.frames_transform[node.name], node.transformation, rtol=0, atol=EPSILON): self.frames_transform[node.name] = node.transformation nodes_to_update.append(node) else: self.already_created_node_ids[node.name] = node.id self.frames_transform[node.name] = node.transformation nodes_to_update.append(node) for node in self.source.scene.nodes: if node != self.source.scene.rootnode: new_node = node.copy() if node.id in self.node_mapping: new_node.id = self.node_mapping[node.id] if new_node in self.target.scene.nodes: if not numpy.allclose(self.target.scene.nodes[new_node.id].transformation, node.transformation, rtol=0, atol=EPSILON): nodes_to_update.append(node) else: self.node_mapping[node.id] = new_node.id self.frames_transform[new_node.name] = new_node.transformation nodes_to_update.append(new_node) if nodes_to_update: self.target.scene.nodes.update(nodes_to_update) self.previous_nodes_to_update = nodes_to_update except (tf2_ros.TransformException, tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): pass
def callbackGaze(self, msg): nodes_to_update = [] if msg.data: for i, gaze in enumerate(msg.data): human_id = gaze.person_id track_id = gaze.track_id if human_id not in self.nb_gaze_detected: self.nb_gaze_detected[human_id] = 0 else: self.nb_gaze_detected[human_id] += 1 if track_id == human_id: self.detection_time = time.time() self.record_time = True else: if self.record_time: self.reco_durations.append(time.time() - self.detection_time) self.record_time = False if gaze.head_gaze_available and self.nb_gaze_detected[human_id] > MIN_NB_DETECTION: new_node = self.create_human_pov(human_id) if human_id in self.human_cameras_ids: new_node.id = self.human_cameras_ids[human_id] else: self.human_cameras_ids[human_id] = new_node.id t = [gaze.head_gaze.position.x, gaze.head_gaze.position.y, gaze.head_gaze.position.z] q = [gaze.head_gaze.orientation.x, gaze.head_gaze.orientation.y, gaze.head_gaze.orientation.z, gaze.head_gaze.orientation.w] if math.sqrt(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]) < MIN_DIST_DETECTION: continue (trans, rot) = self.lookupTransform(self.reference_frame, msg.header.frame_id, rospy.Time(0)) offset = euler_matrix(0, math.radians(90), math.radians(90), 'rxyz') transform = numpy.dot(transformation_matrix(trans, rot), transformation_matrix(t, q)) new_node.transformation = numpy.dot(transform, offset) if translation_from_matrix(new_node.transformation)[2] > MAX_HEIGHT: continue self.added_human_id.append(human_id) nodes_to_update.append(new_node) if human_id not in self.human_bodies: self.human_bodies[human_id] = {} if "face" not in self.human_bodies[human_id]: new_node = Mesh(name="human_face-"+str(human_id)) new_node.properties["mesh_ids"] = self.human_meshes["face"] new_node.properties["aabb"] = self.human_aabb["face"] new_node.parent = self.human_cameras_ids[human_id] offset = euler_matrix(math.radians(90), math.radians(0), math.radians(90), 'rxyz') new_node.transformation = numpy.dot(new_node.transformation, offset) self.human_bodies[human_id]["face"] = new_node.id nodes_to_update.append(new_node) #if gaze.probability_looking_at_robot >= LOOK_AT_THRESHOLD: # self.target.timeline.start(Situation(desc="lookat(human-%s,%s)" % (str(gaze.person_id), self.robot_name))) if nodes_to_update: self.target.scene.nodes.update(nodes_to_update)
def mesh(self, id): mesh = self.rpc.getMesh(gRPC.MeshInContext(client=gRPC.Client(id=self.id), mesh=gRPC.Mesh(id=id)), _TIMEOUT_SECONDS_MESH_LOADING) return Mesh.deserialize(mesh)