def execute(self, userdata): yaw = self._robot.ssl.get_last_yaw(1.0) look_distance = 2.0 height = 1.7 if yaw: rospy.loginfo("SSL Yaw: %.2f", yaw) lookat_vector = kdl_conversions.VectorStamped( x=look_distance * math.cos(yaw), y=look_distance * math.sin(yaw), z=height, frame_id="/%s/base_link" % self._robot.robot_name).projectToFrame("map", self._robot.tf_listener) self._robot.head.look_at_point(lookat_vector, pan_vel=2.0) if yaw > math.pi: yaw -= 2 * math.pi if yaw < -math.pi: yaw += 2 * math.pi vyaw = 1.0 self._robot.base.force_drive(0, 0, (yaw / abs(yaw)) * vyaw, abs(yaw) / vyaw) else: rospy.loginfo("No SSL Yaw found ...") return "done"
def execute(self, userdata=None): rospy.sleep(0.1) # sleep because ed needs time to update selected_entity = self._selected_entity_designator.resolve() if not selected_entity: rospy.logerr("Could not resolve the selected entity!") return "failed" rospy.loginfo("The type of the entity is '%s'" % selected_entity.type) # If we don't know the entity type, try to classify again if selected_entity.type == "" or selected_entity.type == "unknown": # Make sure the head looks at the entity rospy.loginfo("entity: {}".format(selected_entity)) pos = selected_entity.pose.frame.p self._robot.head.look_at_point(kdl.VectorStamped( pos.x(), pos.y(), 0.8, "/map"), timeout=10) # This is needed because the head is not entirely still when the look_at_point function finishes rospy.sleep(1) # Inspect the entity again self._robot.ed.update_kinect(selected_entity.id) # Classify the entity again try: selected_entity.type = self._robot.ed.classify( ids=[selected_entity.id])[0].type rospy.loginfo("We classified the entity again; type = %s" % selected_entity.type) except Exception as e: rospy.logerr(e) return "self"
def detect_persons(_): global PERSON_DETECTIONS global NUM_LOOKS # with open('/home/rein/mates/floorplan-2019-07-05-11-06-52.pickle', 'r') as f: # PERSON_DETECTIONS = pickle.load(f) # rospy.loginfo("Loaded %d persons", len(PERSON_DETECTIONS)) # # # return "done" look_angles = np.linspace(-np.pi / 2, np.pi / 2, 8) # From -pi/2 to +pi/2 to scan 180 degrees wide head_goals = [kdl_conversions.VectorStamped(x=100 * math.cos(angle), y=100 * math.sin(angle), z=1.5, frame_id="/%s/base_link" % robot.robot_name) for angle in look_angles] sentences = deque([ "Hi there mates, where are you, please look at me!", "I am looking for my mates! Dippi dee doo! Pew pew!", "You are all looking great today! Keep looking at my camera. I like it when everybody is staring at me!" ]) while len(PERSON_DETECTIONS) < 4 and not rospy.is_shutdown(): for _ in range(NUM_LOOKS): sentences.rotate(1) robot.speech.speak(sentences[0], block=False) for head_goal in head_goals: robot.speech.speak("please look at me", block=False) robot.head.look_at_point(head_goal) robot.head.wait_for_motion_done() now = time.time() rgb, depth, depth_info = robot.perception.get_rgb_depth_caminfo() try: persons = robot.perception.detect_person_3d(rgb, depth, depth_info) except Exception as e: rospy.logerr(e) rospy.sleep(2.0) else: for person in persons: if person.face.roi.width > 0 and person.face.roi.height > 0: try: PERSON_DETECTIONS.append({ "map_ps": robot.tf_listener.transformPoint("map", PointStamped( header=rgb.header, point=person.position )), "person_detection": person, "rgb": rgb }) except Exception as e: rospy.logerr("Failed to transform valid person detection to map frame") rospy.loginfo("Took %.2f, we have %d person detections now", time.time() - now, len(PERSON_DETECTIONS)) rospy.loginfo("Detected %d persons", len(PERSON_DETECTIONS)) return 'done'
def execute(self, userdata): """ In this function an operator is located and their face is learned :param userdata: contains operator which is initially none, for global definition purposes :return: failed in case something went wrong done if an operator is found aborted if preempt is requested """ start_time = rospy.Time.now() self._robot.head.look_at_standing_person() operator = userdata.operator_learn_in while not operator: r = rospy.Rate(1.0) if self.preempt_requested(): return 'aborted' if(rospy.Time.now() - start_time).to_sec() > self._operator_timeout: return 'failed' operator = self._robot.ed.get_closest_laser_entity( radius=0.5, center_point=kdl_conversions.VectorStamped(x=1.0, y=0, z=1, frame_id="/{}/base_link".format(self._robot.robot_name))) rospy.loginfo("Operator: {op}".format(op=operator)) if not operator: options = ["Please stand in front of me.", "My laser can't see you, please get closer.", "Where are you? Please get closer."] sentence = random.choice(options) self._robot.speech.speak(sentence) else: self._robot.speech.speak("Please look at me while I learn to recognize you.", block=False) self._robot.head.look_at_standing_person() learn_person_start_time = rospy.Time.now() detection_counter = 0 while detection_counter < self._detection_threshold: if self._robot.perception.learn_person(self._operator_name): rospy.loginfo("Successfully detected you %i times" % (detection_counter + 1)) detection_counter += 1 elif (rospy.Time.now() - learn_person_start_time).to_sec() > self._learn_person_timeout: self._robot.speech.speak("Please stand in front of me and look at me") operator = None break r.sleep() rospy.loginfo("We have a new operator: %s" % operator.id) self._robot.speech.speak("Who is that handsome person? Oh, it is you!", mood='Excited') self._robot.speech.speak( "I will follow you now, please say , {}, stop , when we are at the car.".format(self._robot.robot_name)) self._robot.head.close() userdata.operator_learn_out = operator return 'done'
def _backup_register(self): """This only happens when the operator was just registered, and never tracked""" rospy.loginfo( "Operator already lost. Getting closest possible person entity at 1.5 m in front, radius = 1" ) self._operator = self._robot.ed.get_closest_laser_entity( radius=1, center_point=kdl_conversions.VectorStamped( x=1.5, y=0, z=1, frame_id="/%s/base_link" % self._robot.robot_name)) if self._operator: return True else: rospy.loginfo( "Operator still lost. Getting closest possible laser entity at 1.5 m in front, radius = 1" ) self._operator = self._robot.ed.get_closest_laser_entity( radius=1, center_point=kdl_conversions.VectorStamped( x=1.5, y=0, z=1, frame_id="/%s/base_link" % self._robot.robot_name)) if self._operator: return True else: rospy.loginfo("Trying to register operator again") self._robot.speech.speak("Oops, let's try this again...", block=False) self._register_operator() self._operator = self._robot.ed.get_entity(id=self._operator_id) if self._operator: self._last_operator = self._operator return True return False
query_type = "trashbin" radius_2 = 10 close_entities_of_type = robot.ed.get_entities(type=query_type, center_point=robot_loc, radius=radius_2) robot.speech.speak( "There are {count} {type}s within {radius} meters of my base".format( count=len(close_entities_of_type), radius=radius_2, type=query_type)) ####################### # Test Closest Entity # ####################### # TODO: center_point should also be a VectorStamped closest = robot.ed.get_closest_entity( center_point=kdl_conversions.VectorStamped(), radius=2.0) # This is implicitly in /map robot.speech.speak( "The closest entity to the center of the arena is {id}, of type {type}". format(id=closest.id[:10], type=closest.type)) closest2 = robot.ed.get_closest_entity( type=query_type, center_point=kdl_conversions.VectorStamped(), radius=10.0) robot.speech.speak( "The closest {type} to the center of the arena is {id}".format( id=closest2.id, type=query_type)) if closest2.type != query_type: failed_actions += ["get_closest_entity with type, center_point and radius"] ####################### # Test Get Entity #
def execute(self, userdata): """ :return: Failed: when taking too long, can't transform RGBD image to point or when preempt requested follow: When successfully recovered operator """ self._robot.head.look_at_standing_person() self._robot.speech.speak("%s, please look at me while I am looking for you" % self._operator_name, block=False) start_time = rospy.Time.now() look_distance = 2.0 look_angles = [0.0, math.pi / 6, math.pi / 4, math.pi / 2.3, 0.0, -math.pi / 6, -math.pi / 4, -math.pi / 2.3] head_goals = [kdl_conversions.VectorStamped(x=look_distance * math.cos(angle), y=look_distance * math.sin(angle), z=1.7, frame_id="/%s/base_link" % self._robot.robot_name) for angle in look_angles] i = 0 while (rospy.Time.now() - start_time).to_sec() < self._lost_timeout: if self.preempt_requested(): return 'Failed' self._robot.head.look_at_point(head_goals[i]) i += 1 if i == len(head_goals): i = 0 self._robot.head.wait_for_motion_done() raw_detections = self._robot.perception.detect_faces() best_detection = self._robot.perception.get_best_face_recognition(raw_detections, "operator", probability_threshold=3.0) rospy.loginfo("best_detection = {}".format(best_detection)) if best_detection: roi = best_detection.roi try: operator_pos_kdl = self._robot.perception.project_roi(roi=roi, frame_id="map") except Exception as e: rospy.logerr("head.project_roi failed: %s", e) return 'Failed' operator_pos_ros = kdl_conversions.kdl_vector_stamped_to_point_stamped(operator_pos_kdl) self._face_pos_pub.publish(operator_pos_ros) recovered_operator = self._robot.ed.get_closest_laser_entity(radius=self._lost_distance, center_point=operator_pos_kdl, ignore_z=True) if recovered_operator: print "Found one!" self._operator_id = recovered_operator.id print "Recovered operator id: %s" % self._operator_id self._operator = recovered_operator self._robot.speech.speak("There you are! Go ahead, I'll follow you again", block=False) self._robot.head.close() self._time_started = rospy.Time.now() userdata.recovered_operator = recovered_operator return 'follow' else: print "Could not find an entity {} meter near {}".format(self._lost_distance, operator_pos_kdl) self._robot.head.close() return 'Failed'
def execute(self, userdata=None): look_angles = None person_label = None if self._properties: try: person_label = self._properties["id"] ds.check_type(person_label, "str") person_label = person_label.resolve() if hasattr(person_label, 'resolve') else person_label rospy.loginfo("Trying to find {}".format(person_label)) except: # The try except is to check if a named person is queried for # in the properties. If there is none then exception is raised # and nothing is to be done with it pass if self._speak: self._robot.speech.speak( "{}please look at me while I am looking for you".format( person_label+', ' if person_label else ''), block=False) start_time = rospy.Time.now() head_goals = [kdl_conversions.VectorStamped(x=self._look_distance * math.cos(angle), y=self._look_distance * math.sin(angle), z=1.5, frame_id="/%s/base_link" % self._robot.robot_name) for angle in self._look_angles] i = 0 attempts = 0 rate = rospy.Rate(2) while not rospy.is_shutdown() and attempts < self._attempts and (rospy.Time.now() - start_time).to_sec() < self._search_timeout: if self.preempt_requested(): return 'failed' self._robot.head.look_at_point(head_goals[i]) i += 1 if i == len(head_goals): i = 0 attempts += 1 self._robot.head.wait_for_motion_done() found_people_ids = [] for _ in range(2): # TODO: parametrize self._image_data = self._robot.perception.get_rgb_depth_caminfo() if self._image_data: success, found_ids = self._robot.ed.detect_people(*self._image_data) else: rospy.logwarn("Could not get_rgb_depth_caminfo") success, found_ids = False, [] found_people_ids += found_ids # Use only unique IDs in the odd case ED sees the same people twice found_people = [self._robot.ed.get_entity(eid) for eid in set(found_people_ids)] rospy.loginfo("Found {} people: {}".format(len(found_people), found_people)) found_people = [p for p in found_people if p] rospy.loginfo("{} people remaining after None-check".format(len(found_people))) robot_pose = self._robot.base.get_location() # TODO: Check probable bug here found_people = filter(lambda x: (x.pose.frame.p - robot_pose.frame.p).Norm() < self._look_distance, found_people) rospy.loginfo("{} people remaining after distance < {}-check".format(len(found_people), self._look_distance)) if self._properties: for k, v in self._properties.items(): found_people = filter(lambda x: self._check_person_property(x, k, v), found_people) rospy.loginfo("{} people remaining after {}={} check".format(len(found_people), k, v)) result_people = [] if self._query_entity_designator: query_entity = self._query_entity_designator.resolve() if query_entity: result_people = filter(lambda x: query_entity.in_volume(x.pose.extractVectorStamped(), 'in'), found_people) rospy.loginfo("{} result_people remaining after 'in'-'{}' check".format(len(result_people), query_entity.id)) # If people not in query_entity then try if query_entity in # people if not result_people: # This is for a future feature when object recognition # becomes more advanced try: result_people = filter(lambda x: x.in_volume(query_entity.pose.extractVectorStamped(), 'in'), found_people) rospy.loginfo( "{} result_people remaining after 'in'-'{}' check".format(len(result_people), query_entity.id)) except Exception: pass else: result_people = found_people if result_people: if self._nearest: result_people.sort(key=lambda e: (e.pose.frame.p - robot_pose.frame.p).Norm()) if person_label and \ filter(lambda x: self._check_person_property(x, "id", person_label), result_people) \ and self._speak: self._robot.speech.speak("I think I found {}.".format(person_label, block=False)) self._robot.head.close() if self._found_people_designator: self._found_people_designator.write(result_people) return 'found' else: rospy.logwarn("Could not find people meeting the requirements") # rate.sleep() rospy.logwarn("Exceeded trial or time limit") self._robot.head.close() rospy.sleep(2.0) return 'failed'
def _recover_operator(self): rospy.loginfo("Trying to recover the operator") self._robot.head.look_at_standing_person() self._robot.speech.speak( "%s, please look at me while I am looking for you" % self._operator_name, block=False) # Wait for the operator and find his/her face operator_recovery_timeout = self._lost_timeout start_time = rospy.Time.now() recovered_operator = None look_distance = 2.0 look_angles = [ 0.0, math.pi / 6, math.pi / 4, math.pi / 2.3, 0.0, -math.pi / 6, -math.pi / 4, -math.pi / 2.3 ] head_goals = [ kdl_conversions.VectorStamped(x=look_distance * math.cos(angle), y=look_distance * math.sin(angle), z=1.7, frame_id="/%s/base_link" % self._robot.robot_name) for angle in look_angles ] i = 0 while (rospy.Time.now() - start_time).to_sec() < operator_recovery_timeout: self._robot.head.look_at_point(head_goals[i]) i += 1 if i == len(head_goals): i = 0 self._robot.head.wait_for_motion_done() # raw_detections is a list of Recognitions # a recognition contains a CategoricalDistribution # a CategoricalDistribution is a list of CategoryProbabilities # a CategoryProbability has a label and a float raw_detections = self._robot.head.detect_faces() best_detection = self._robot.head.get_best_face_recognition( raw_detections, "operator") rospy.loginfo("best_detection = {}".format(best_detection)) if best_detection: # print "Best detection: {}".format(best_detection) roi = best_detection.roi try: operator_pos_kdl = self._robot.head.project_roi( roi=roi, frame_id="map") except Exception as e: rospy.logerr("head.project_roi failed: %s", e) return False operator_pos_ros = kdl_conversions.kdlVectorStampedToPointStamped( operator_pos_kdl) self._face_pos_pub.publish(operator_pos_ros) recovered_operator = self._robot.ed.get_closest_laser_entity( radius=self._lost_distance, center_point=operator_pos_kdl) if recovered_operator: print "Found one!" self._operator_id = recovered_operator.id print "Recovered operator id: %s" % self._operator_id self._operator = recovered_operator self._robot.speech.speak( "There you are! Go ahead, I'll follow you again", block=False) self._robot.head.close() self._time_started = rospy.Time.now() return True else: print "Could not find an entity {} meter near {}".format( self._lost_distance, operator_pos_kdl) self._robot.head.close() self._turn_towards_operator() self._update_navigation() rospy.sleep(2.0) return False
def _register_operator(self): """Robots looks at the operator and asks whether the operator should follow. If he says yes, then set self._operator. Also adds the operator to the breadcrumb list""" start_time = rospy.Time.now() self._robot.head.look_at_standing_person() if self._operator_id: operator = self._robot.ed.get_entity(id=self._operator_id) else: operator = None while not operator: if (rospy.Time.now() - start_time).to_sec() > self._operator_timeout: return False if self._ask_follow: sentence = "Should I follow you?" self._robot.speech.speak(sentence, block=True) try: answer = self._robot.hmi.query(sentence, "T -> yes | no", "T") except TimeoutException as e: self._robot.speech.speak("I did not hear you!") rospy.sleep(2) else: if answer.sentence == "yes": operator = self._robot.ed.get_closest_laser_entity( radius=0.5, center_point=kdl_conversions.VectorStamped( x=1.0, y=0, z=1, frame_id="/%s/base_link" % self._robot.robot_name)) rospy.loginfo("Operator: {op}".format(op=operator)) if not operator: self._robot.speech.speak( "Please stand in front of me") else: if self._learn_face: self._robot.speech.speak( "Please look at me while I learn to recognize you.", block=True) self._robot.speech.speak("Just in case...", block=False) self._robot.head.look_at_standing_person() learn_person_start_time = rospy.Time.now() learn_person_timeout = 10.0 # TODO: Parameterize num_detections = 0 while num_detections < 5: if self._robot.head.learn_person( self._operator_name): num_detections += 1 elif (rospy.Time.now() - learn_person_start_time ).to_sec() > learn_person_timeout: self._robot.speech.speak( "Please stand in front of me and look at me" ) operator = None break else: return False else: operator = self._robot.ed.get_closest_laser_entity( radius=1, center_point=kdl_conversions.VectorStamped( x=1.5, y=0, z=1, frame_id="/%s/base_link" % self._robot.robot_name)) if not operator: rospy.sleep(1) print "We have a new operator: %s" % operator.id self._robot.speech.speak("Gotcha! I will follow you!", block=False) self._operator_id = operator.id self._operator = operator self._breadcrumbs.append(operator) self._robot.head.close() print("NOW!!!") rospy.sleep(3) return True
def detect_people(user_data): person_detections = [] #look_angles = np.linspace(-np.pi / 2, np.pi / 2, 8) # From -pi/2 to +pi/2 to scan 180 degrees wide look_angles = np.linspace(-np.pi / 4, np.pi / 4, 4) # From -pi/2 to +pi/2 to scan 180 degrees wide head_goals = [kdl_conversions.VectorStamped(x=100 * math.cos(angle), y=100 * math.sin(angle), z=1.5, frame_id="/%s/base_link" % robot.robot_name) for angle in look_angles] sentences = deque([ "Hi there mates, where are you, please look at me!", "I am looking for my mates! Dippi dee doo! Pew pew!", "You are all looking great today! Keep looking at my camera.", "I like it when everybody is staring at me!" ]) look_at_me_sentences = deque([ "Please look at me", "Let me check you out", "Your eyes are beautiful", "Try to look pretty, your face will pop up on the screen later!" ]) for _ in range(NUM_LOOKS): sentences.rotate(1) robot.speech.speak(sentences[0], block=False) for head_goal in head_goals: look_at_me_sentences.rotate(1) robot.speech.speak(look_at_me_sentences[0], block=False) robot.head.look_at_point(head_goal) robot.head.wait_for_motion_done() now = time.time() rgb, depth, depth_info = robot.perception.get_rgb_depth_caminfo() if rgb: try: persons = robot.perception.detect_person_3d(rgb, depth, depth_info) except Exception as e: rospy.logerr(e) rospy.sleep(2.0) else: for person in persons: if person.face.roi.width > 0 and person.face.roi.height > 0: try: person_detections.append({ "map_ps": robot.tf_listener.transformPoint("map", PointStamped( header=rgb.header, point=person.position )), "person_detection": person, "rgb": rgb }) except Exception as e: rospy.logerr( "Failed to transform valid person detection to map frame: {}".format(e)) rospy.loginfo("Took %.2f, we have %d person detections now", time.time() - now, len(person_detections)) rospy.loginfo("Detected %d persons", len(person_detections)) user_data.raw_detections = person_detections return 'done'
import sys from robot_skills.util import kdl_conversions from robot_skills import get_robot from robot_smach_states.navigation.guidance import _detect_operator_behind_robot robot = get_robot(sys.argv[1]) a = kdl_conversions.VectorStamped(-1, 0, 1, '/{}/base_link'.format(sys.argv[1])) hero.head.look_at_point(a) _detect_operator_behind_robot(robot)
def execute(self, userdata=None): person_label = self._person_label.resolve() if hasattr( self._person_label, 'resolve') else self._person_label rospy.loginfo("Trying to find {}".format(person_label)) self._robot.head.look_at_standing_person() self._robot.speech.speak( "{}, please look at me while I am looking for you".format( person_label), block=False) start_time = rospy.Time.now() look_distance = 2.0 # magic number 4 head_goals = [ kdl_conversions.VectorStamped(x=look_distance * math.cos(angle), y=look_distance * math.sin(angle), z=1.3, frame_id="/%s/base_link" % self._robot.robot_name) for angle in self._look_angles ] i = 0 rate = rospy.Rate(2) while (rospy.Time.now() - start_time ).to_sec() < self._search_timeout and not rospy.is_shutdown(): if self.preempt_requested(): return 'failed' self._robot.head.look_at_point(head_goals[i]) i += 1 if i == len(head_goals): i = 0 self._robot.head.wait_for_motion_done() self._image_data = self._robot.perception.get_rgb_depth_caminfo() if self._image_data: success, found_people_ids = self._robot.ed.detect_people( *self._image_data) else: rospy.logwarn("Could not get_rgb_depth_caminfo") success, found_people_ids = False, [] found_people = [ self._robot.ed.get_entity(id) for id in found_people_ids ] rospy.loginfo("Found {} people: {}".format(len(found_people), found_people)) found_people = [p for p in found_people if p] rospy.loginfo("{} people remaining after None-check".format( len(found_people))) found_names = {person.id: person for person in found_people} found_person = None if self._discard_other_labels: found_person = found_names.get(person_label, None) else: # find which of those is closest robot_pose = self._robot.base.get_location() found_person = min(found_people, key=lambda person: person.pose.frame.p - robot_pose.frame.p) if self._room: room_entity = self._robot.ed.get_entity(id=self._room) if not room_entity.in_volume( found_person.pose.extractVectorStamped(), 'in'): # If the person is not in the room we are looking for, ignore the person rospy.loginfo( "We found a person '{}' but was not in desired room '{}' so ignoring that person" .format(found_person.id, room_entity.id)) found_person = None if found_person: rospy.loginfo("I found {} who I assume is {} at {}".format( found_person.id, person_label, found_person.pose.extractVectorStamped(), block=False)) if self.speak_when_found: self._robot.speech.speak("I think I found {}.".format( person_label, block=False)) self._robot.head.close() if self._found_entity_designator: self._found_entity_designator.write(found_person) return 'found' else: rospy.logwarn("Could not find {}".format(person_label)) rate.sleep() self._robot.head.close() rospy.sleep(2.0) return 'failed'
def execute(self, userdata=None): rospy.loginfo("Trying to find {}".format(self._person_label)) self._robot.head.look_at_standing_person() self._robot.speech.speak( "{}, please look at me while I am looking for you".format( self._person_label), block=False) start_time = rospy.Time.now() look_distance = 2.0 # magic number 4 look_angles = [ f * math.pi / d if d != 0 else 0.0 for f in [-1, 1] for d in [0, 6, 4, 2.3] ] # Magic numbers head_goals = [ kdl_conversions.VectorStamped(x=look_distance * math.cos(angle), y=look_distance * math.sin(angle), z=1.7, frame_id="/%s/base_link" % self._robot.robot_name) for angle in look_angles ] i = 0 while (rospy.Time.now() - start_time).to_sec() < self._timeout: if self.preempt_requested(): return 'failed' self._robot.head.look_at_point(head_goals[i]) i += 1 if i == len(head_goals): i = 0 self._robot.head.wait_for_motion_done() raw_detections = self._robot.perception.detect_faces() best_detection = self._robot.perception.get_best_face_recognition( raw_detections, self._person_label, probability_threshold=self._probability_threshold) rospy.loginfo("best_detection = {}".format(best_detection)) if not best_detection: continue roi = best_detection.roi try: person_pos_kdl = self._robot.perception.project_roi( roi=roi, frame_id="map") except Exception as e: rospy.logerr("head.project_roi failed: %s", e) return 'failed' person_pos_ros = kdl_conversions.kdl_vector_stamped_to_point_stamped( person_pos_kdl) self._face_pos_pub.publish(person_pos_ros) found_person = self._robot.ed.get_closest_laser_entity( radius=self._look_distance, center_point=person_pos_kdl) if found_person: self._robot.speech.speak("I found {}".format( self._person_label), block=False) self._robot.head.close() self._robot.ed.update_entity( id=self._person_label, frame_stamped=kdl_conversions.FrameStamped( kdl.Frame(person_pos_kdl.vector), "/map"), type="waypoint") return 'found' else: rospy.logwarn("Could not find {} in the {}".format( self._person_label, self.area)) self._robot.head.close() rospy.sleep(2.0) return 'failed'