Пример #1
0
    def execute(self, userdata=None):
        entity = self.grab_entity_designator.resolve()
        if not entity:
            rospy.logerr("Could not resolve grab_entity")
            return "failed"

        self.robot.head.look_at_point(VectorStamped(vector=entity._pose.p,
                                                    frame_id="/map"),
                                      timeout=0.0)
        self.robot.head.wait_for_motion_done()
        segm_res = self.robot.ed.update_kinect("%s" % entity.id)

        arm = self.arm_designator.resolve()

        if not arm:
            rospy.logerr("Could not resolve arm")
            return "failed"

        # Torso up (non-blocking)
        self.robot.torso.reset()

        # Arm to position in a safe way
        arm.send_joint_trajectory('prepare_grasp', timeout=0)
        arm.wait_for_motion_done()

        # Open gripper
        arm.send_gripper_goal('open', timeout=0.0)
        arm.wait_for_motion_done()

        # Make sure the head looks at the entity
        self.robot.head.look_at_point(VectorStamped(vector=entity._pose.p,
                                                    frame_id="/map"),
                                      timeout=0.0)
        self.robot.head.wait_for_motion_done()
        return 'succeeded'
    def get_closest_entity(self, type="", center_point=None, radius=0):
        if not center_point:
            center_point = VectorStamped(x=0,
                                         y=0,
                                         z=0,
                                         frame_id="/" + self.robot_name +
                                         "/base_link")

        entities = self.get_entities(type=type,
                                     center_point=center_point,
                                     radius=radius)

        # HACK
        entities = [
            e for e in entities if e.shape is not None and e.type != ""
        ]

        if len(entities) == 0:
            return None

        # Sort by distance
        try:
            center_in_map = center_point.projectToFrame(
                "/map", self._tf_listener)
            entities = sorted(
                entities,
                key=lambda entity: entity.distance_to_2d(center_in_map.vector))
        except:
            rospy.logerr("Failed to sort entities")
            return None

        return entities[0]
Пример #3
0
    def execute(self, userdata):

        grab_entity = self.grab_entity_designator.resolve()
        if not grab_entity:
            rospy.logerr("Could not resolve grab_entity")
            return "failed"

        arm = self.arm_designator.resolve()
        if not arm:
            rospy.logerr("Could not resolve arm")
            return "failed"
        userdata.arm = arm.side  # Using userdata makes sure we don't need to do any more arm entity magic

        goal_map = VectorStamped(0, 0, 0, frame_id=grab_entity.id)

        try:
            # Transform to base link frame
            goal_bl = goal_map.projectToFrame(
                self.robot.robot_name + '/base_link',
                tf_listener=self.robot.tf_listener)
            if goal_bl is None:
                rospy.logerr('Transformation of goal to base failed')
                return 'failed'
        except tf.Exception, tfe:
            rospy.logerr(
                'Transformation of goal to base failed: {0}'.format(tfe))
            return 'failed'
Пример #4
0
    def execute(self, userdata=None):
        # Get position to look at. Transform the position to map frame since taking the hmi pose may move base link
        goal = VectorStamped(1.0,
                             0.0,
                             1.6,
                             frame_id="/" + self._robot.robot_name +
                             "/base_link")
        tf_goal = goal.projectToFrame('/map', self._robot.tf_listener)

        self._robot.move_to_hmi_pose()

        self._robot.head.look_at_point(tf_goal)
        self._robot.head.wait_for_motion_done()
        return "succeeded"
Пример #5
0
 def _get_head_goal(self, spec):
     vs = VectorStamped(spec["x"],
                        spec["y"],
                        z=0,
                        frame_id="/" + self._robot.robot_name +
                        "/base_link")
     return vs
Пример #6
0
    def _recognize(self):
        z = 1.5
        self.robot.head.look_at_point(
            VectorStamped(100, 0, z, self.robot.robot_name + "/base_link"))
        self.robot.speech.speak("I am looking for my operator", block=False)

        # 1) Check how many people in the crowd
        shots = 3

        number_of_people = 0
        operator_list = []

        sentences = [
            "You are all looking great today!            Keep looking in my camera!",
            "I like it when everybody is staring at me; being in the center of attention!"
        ]

        for i in range(0, shots):
            self.robot.speech.speak(sentences[i % (shots - 1)], block=False)
            detections, operator = self._get_detections(
                external_api_request=False)

            # Get number of people
            number_of_people = max(len(detections), number_of_people)

            # Get operator guess
            if operator:
                operator_list.append(operator)

        # 2) Get all other information with use of the external api, make sure that we have all persons here
        max_tries = 5
        try_number = 0

        best_operator = None
        best_detections = None
        while True:
            try_number += 1

            self.robot.speech.speak(
                random.choice([
                    "Let's take a closer look",
                    "Let's see what we are dealing with",
                    "Let's get some more details"
                ]))
            detections, operator = self._get_detections(
                external_api_request=True)

            if self._shot_valid(number_of_people, operator_list, detections,
                                operator):
                return detections, operator

            if not best_detections or len(detections) > len(best_detections):
                best_operator = operator
                best_detections = detections

            if try_number > max_tries:
                self.robot.speech.speak(
                    "I am having trouble with my external servers, I will use my local result"
                )  # we just didnt pass our criteria
                return best_detections, best_operator
Пример #7
0
    def get_closest_laser_entity(self, type="", center_point=VectorStamped(), radius=0):
        """
        Get the closest entity detected by the laser. The ID's of such entities are postfixed with '-laser'
        For the rest, this works exactly like get_closest_entity
        :param type: What type of entities to filter on
        :param center_point: combined with radius. Around which point to search for entities
        :param radius: how far from the center_point to look (in meters)
        :return: list of Entity
        """

        entities = self.get_entities(type="", center_point=center_point, radius=radius)

        # HACK
        entities = [ e for e in entities if e.shape and e.type == "" and e.id.endswith("-laser") ]

        if len(entities) == 0:
            return None

        # Sort by distance
        try:
            entities = sorted(entities, key=lambda entity: entity.distance_to_2d(center_point.projectToFrame("/%s/base_link"%self.robot_name, self._tf_listener).vector)) # TODO: adjust for robot
        except:
            print "Failed to sort entities"
            return None

        return entities[0]
Пример #8
0
    def run(self, robot, entity, area, waittime):

        if not entity:
            return 'failed'

        # Entities define their own frame, so there is no need to transform the pose to /map.
        # That would be equivalent to defining coordinates 0,0,0 in its own frame, so that is what we do here.
        # The added benefit is that the entity's frame actually moves because the entity is tracked.
        # This makes the head track the entity
        frame_id = "/" + entity.id

        if area in entity.volumes:
            cp = entity.volumes[area].center_point
            vs = VectorStamped(cp.x(), cp.y(), cp.z(), frame_id)

            rospy.loginfo('Look at %s' % (repr(vs)))

            # This is awefully hardcoded for AMIGO!!! (TODO)
            height = min(0.4, max(0.1, vs.vector.z() - 0.55))
            robot.torso._send_goal([height], timeout=0)

            robot.head.look_at_point(vs, timeout=0)

            robot.head.wait_for_motion_done(timeout=5)
            robot.torso.wait_for_motion_done(timeout=5)

            rospy.sleep(rospy.Duration(waittime))
            return "succeeded"

        rospy.logwarn("Cannot find {0} in {1}".format(area, entity.id))
        return "failed"
Пример #9
0
    def get_entities(self,
                     type="",
                     center_point=VectorStamped(),
                     radius=0,
                     id="",
                     ignore_z=False):

        center_point_in_map = center_point.projectToFrame(
            "/map", self.tf_listener)

        entities = self._entities.values()
        if type:
            entities = [e for e in entities if e.is_a(type)]
        if radius:
            if ignore_z:
                entities = [
                    e for e in entities
                    if e.distance_to_2d(center_point_in_map.vector) <= radius
                ]
            else:
                entities = [
                    e for e in entities
                    if e.distance_to_3d(center_point_in_map.vector) <= radius
                ]
        if id:
            entities = [e for e in entities if e.id == id]

        return entities
Пример #10
0
    def get_entities(self,
                     type="",
                     center_point=VectorStamped(),
                     radius=0,
                     id="",
                     parse=True):
        self._publish_marker(center_point, radius)

        center_point_in_map = center_point.projectToFrame(
            "/map", self._tf_listener)
        query = SimpleQueryRequest(id=id,
                                   type=type,
                                   center_point=kdl_vector_to_point_msg(
                                       center_point_in_map.vector),
                                   radius=radius)

        try:
            entity_infos = self._ed_simple_query_srv(query).entities
            entities = map(from_entity_info, entity_infos)
        except Exception, e:
            rospy.logerr(
                "ERROR: robot.ed.get_entities(id=%s, type=%s, center_point=%s, radius=%s)"
                % (id, type, str(center_point), str(radius)))
            rospy.logerr("L____> [%s]" % e)
            return []
Пример #11
0
    def _get_detections(self, external_api_request):
        z = 1.5
        self.robot.head.look_at_point(
            VectorStamped(100, 0, z, self.robot.robot_name + "/base_link"))
        self.robot.head.wait_for_motion_done()
        time.sleep(1)

        rospy.logerr(
            "ed.detect _persons() method disappeared! This was only calling the face recognition module and we are using a new one now!"
        )
        rospy.logerr("I will return an empty detection list!")
        detections = []

        if not detections:
            detections = []

        operator_candidates = [
            candidate for candidate in detections
            if candidate.name == "operator"
        ]

        rospy.loginfo("Detections: %s", detections)
        rospy.loginfo("Operator candidates: %s", operator_candidates)

        operator = None
        if operator_candidates:
            operator = max(operator_candidates, key=lambda c: c.name_score)

        rospy.loginfo("Operator: %s", operator)

        return detections, operator
Пример #12
0
    def get_entities(self, type="", center_point=VectorStamped(), radius=float('inf'), id="", ignore_z=False):
        """
        Get entities via Simple Query interface

        :param type: Type of entity
        :param center_point: Point from which radius is measured
        :param radius: Distance between center_point and entity
        :param id: ID of entity
        :param ignore_z: Consider only the distance in the X,Y plane for the radius from center_point
        """
        self._publish_marker(center_point, radius)

        center_point_in_map = center_point.projectToFrame("/map", self.tf_listener)
        query = SimpleQueryRequest(id=id, type=type, center_point=kdl_vector_to_point_msg(center_point_in_map.vector),
                                   radius=radius, ignore_z=ignore_z)

        try:
            entity_infos = self._ed_simple_query_srv(query).entities
            entities = map(from_entity_info, entity_infos)
        except Exception as e:
            rospy.logerr("ERROR: robot.ed.get_entities(id={}, type={}, center_point={}, radius={}, ignore_z={})".format(
                id, type, str(center_point), str(radius), ignore_z))
            rospy.logerr("L____> [%s]" % e)
            return []

        return entities
Пример #13
0
    def get_closest_possible_person_entity(self, center_point=VectorStamped(), radius=float('inf')):
        """ Returns the 'possible_human' entity closest to a certain center point.

        :param center_point: (VectorStamped) indicating where the human should be close to
        :param radius: (float) radius to look for possible humans
        :return: (Entity) entity (if found), None otherwise
        """
        assert center_point.frame_id.endswith("map"), "Other frame ids not yet implemented"

        # Get all entities
        entities = self.get_entities(type="", center_point=center_point, radius=radius)

        # Filter on 'possible humans'
        entities = [e for e in entities if e.is_a('possible_human')]

        # If nothing found, return None
        if len(entities) == 0:
            return None

        # Sort by distance
        try:
            entities = sorted(entities, key=lambda entity: entity.distance_to_2d(center_point.vector))
            rospy.logdebug("entities sorted closest to robot = {}".format(entities))
        except Exception as e:
            rospy.logerr("Failed to sort entities: {}".format(e))
            return None

        return entities[0]
Пример #14
0
    def get_closest_laser_entity(self, type="", center_point=VectorStamped(), radius=float('inf'), ignore_z=False):
        """
        Get the closest entity detected by the laser. The ID's of such entities are postfixed with '-laser'
        For the rest, this works exactly like get_closest_entity

        :param type: What type of entities to filter on
        :param center_point: combined with radius. Around which point to search for entities
        :param radius: how far from the center_point to look (in meters)
        :param ignore_z: Consider only the distance in the X,Y plane for the radius from center_point criterium.
        :return: list of Entity
        """

        entities = self.get_entities(type="", center_point=center_point, radius=radius, ignore_z=ignore_z)

        # HACK
        entities = [e for e in entities if e.shape and e.type == "" and e.id.endswith("-laser")]

        if len(entities) == 0:
            return None

        # Sort by distance
        try:
            entities = sorted(entities, key=lambda entity: entity.distance_to_2d(
                center_point.projectToFrame("/%s/base_link" % self.robot_name,
                                            self.tf_listener).vector))  # TODO: adjust for robot
        except Exception as e:
            rospy.logerr("Failed to sort entities: {}".format(e))
            return None

        return entities[0]
Пример #15
0
    def get_closest_possible_person_entity(self,
                                           type="",
                                           center_point=VectorStamped(),
                                           radius=0,
                                           room=""):
        #if isinstance(center_point, PointStamped):
        #    center_point = self._transform_center_point_to_map(center_point)
        # ToDo: check frame ids

        entities = self.get_entities(type="",
                                     center_point=center_point,
                                     radius=radius)

        # HACK
        # entities = [ e for e in entities if e.convex_hull and e.type == "" and 'possible_human' in e.flags ]
        entities = [e for e in entities if e.is_a('possible_human')]

        if len(entities) == 0:
            return None

        # Sort by distance
        try:
            entities = sorted(
                entities,
                key=lambda entity: entity.distance_to_2d(center_point.vector))
            print "entities sorted closest to robot = ", entities
        except:
            print "Failed to sort entities"
            return None

        return entities[0]
Пример #16
0
    def execute(self, userdata):

        arm = self.arm_designator.resolve()
        if not arm:
            rospy.logerr("Could not resolve arm")
            return "failed"
        userdata.arm = arm.side

        entity = self.grab_entity_designator.resolve()
        if not entity:
            rospy.logerr("Could not resolve grab_entity")
            return "failed"

        # Open gripper (non-blocking)
        arm.send_gripper_goal('open', timeout=0)

        # Torso up (non-blocking)
        self.robot.torso.high()

        # Arm to position in a safe way
        arm.send_joint_trajectory('prepare_grasp', timeout=0)

        # Open gripper
        arm.send_gripper_goal('open', timeout=0.0)

        # Make sure the head looks at the entity
        self.robot.head.look_at_point(VectorStamped(vector=entity._pose.p, frame_id="/map"), timeout=0.0)

        return 'succeeded'
Пример #17
0
    def __init__(self,
                 robot,
                 type="",
                 center_point=None,
                 radius=float('inf'),
                 id="",
                 criteriafuncs=None,
                 weight_function=None,
                 type_designator=None,
                 center_point_designator=None,
                 id_designator=None,
                 debug=False,
                 name=None):
        """Designates an entity of some type, within a radius of some center_point, with some id,
        that match some given criteria functions.
        @param robot the robot to use for Ed queries
        @param type the type of the entity to resolve to (default: any type)
        @param center_point combined with radius: a sphere to search an entity in
        @param radius combined with center_point: a sphere to search an entity in
        @param id the ID of the object to get info about
        @param criteriafuncs a list of functions that take an entity and return a bool (True if criterium met)
        @param weight_function returns a weight for each entity, the one with the lowest weight will be selected
        (could be a distance calculation)
        @param type_designator same as type but dynamically resolved trhough a designator. Mutually exclusive with type
        @param center_point_designator same as center_point but dynamically resolved trhough a designator.
        Mutually exclusive with center_point
        @param id_designator same as id but dynamically resolved through a designator. Mutually exclusive with id"""
        super(EdEntityDesignator, self).__init__(resolve_type=Entity,
                                                 name=name)

        assert not type or type_designator is None, "Specify either type or type_designator, not both"
        assert center_point is None or center_point_designator is None, \
            "Specify either center_point or center_point_designator, not both"
        assert not id or id_designator is None, "Specify either id or id_designator, not both"

        self.robot = robot
        self.ed = robot.ed
        self.type = type
        if center_point is None and center_point_designator is None:
            center_point = VectorStamped()
        self.center_point = center_point
        self.radius = radius
        self.id = id
        self.criteriafuncs = criteriafuncs or []
        self.weight_function = weight_function or (lambda entity: 0)

        if type_designator:  # the resolve type of type_designator can be either str or list
            check_resolve_type(type_designator, str, list)
        self.type_designator = type_designator

        if center_point_designator:  # the resolve type of type_designator can be either VectorStamped
            check_resolve_type(center_point_designator, VectorStamped)
        self.center_point_designator = center_point_designator

        if id_designator:  # the resolve type of id_designator must be str
            check_resolve_type(id_designator, str)
        self.id_designator = id_designator

        self.debug = debug
Пример #18
0
    def execute(self, userdata=None):
        """ Does the actual work

        :param userdata:
        :return:
        """

        self._robot.head.reset()
        rospy.sleep(1)

        self._robot.speech.speak("I'm waiting for a waving person")
        waving_persons = []
        while not rospy.is_shutdown() and not waving_persons:
            rospy.sleep(1 / self.rate)
            for person in self.people_received.people:
                if {'RWave', 'LWave'}.intersection(set(person.tags)):
                    waving_persons.append(person)

        if not waving_persons:
            return 'aborted'

        rospy.loginfo('waving persons: %s', waving_persons)
        if len(waving_persons) > 1:
            rospy.logwarn('using the first person')

        header = self.people_received.header
        point = waving_persons[0].position
        pose = frame_stamped(header.frame_id, point.x, point.y, point.z)
        rospy.loginfo('update customer position to %s', pose)
        self._robot.ed.update_entity(id=self._caller_id,
                                     frame_stamped=pose,
                                     type="waypoint")
        # customer_point_msg_stamped_camera = PointStamped()
        # customer_point_msg_stamped_camera = self.people_received.header
        # customer_point_msg_stamped_camera = waving_persons[0].position
        # self.robot.tf_listener.

        # look at the barman
        kitchen_entity = self._kitchen_designator.resolve()
        target_pose = kitchen_entity._pose
        head_target_kdl = target_pose * kdl.Vector(20.0, 0.0, 0.0)
        head_target = VectorStamped(x=head_target_kdl.x(),
                                    y=head_target_kdl.y(),
                                    z=head_target_kdl.z(),
                                    frame_id="/map")
        # pose = kitchen_entity.pose.extractVectorStamped()
        # pose.vector[2] = 1.5

        # self._robot.head.look_at_point(head_target)

        self._robot.speech.speak(
            "I have seen a waving person, should I continue?")

        if self._confirm():
            self._robot.head.cancel_goal()
            return 'succeeded'
        else:
            self._robot.head.cancel_goal()
            return 'rejected'
Пример #19
0
    def execute(self):
        # convert item to pos and send goal
        goal_map = VectorStamped(0, 0, 0, frame_id=self.item.id)
        goal_bl = goal_map.projectToFrame(self.robot.robot_name + '/base_link',
                                          tf_listener=self.robot.tf_listener)
        if goal_bl is None:
            return 'failed'
        else:
            return 'failed'

        # Offset so goal_bl is in WS
        # tfToWSOffset = find ws from base_link and find the difference
        goal_bl.vector[0] = goal_bl.vector.x() - tfToWSOffset

        if not self.arm.send_goal(goal_bl,
                                  timeout=0):  # maybe register callbacks when
            return ('failed')  # movement finished? not waiting
Пример #20
0
    def get_shot(self):
        z = 1.5
        self.robot.head.look_at_point(
            VectorStamped(100, 0, z, self.robot.robot_name + "/base_link"))
        self.robot.head.wait_for_motion_done()
        time.sleep(1)

        image = self.robot.head.get_image()
        return self._bridge.imgmsg_to_cv2(image, 'bgr8')
Пример #21
0
    def reset(self, timeout=0):
        """
        Reset head position
        """
        reset_goal = VectorStamped(x=10,
                                   frame_id="/" + self.robot_name +
                                   "/base_link")

        return self.look_at_point(reset_goal, timeout=timeout)
Пример #22
0
    def __init__(self,
                 robot,
                 type="",
                 center_point=None,
                 radius=0,
                 id="",
                 parse=True,
                 criteriafuncs=None,
                 type_designator=None,
                 center_point_designator=None,
                 id_designator=None,
                 debug=False,
                 name=None):
        """Designates a collection of entities of some type, within a radius of some center_point, with some id,
        that match some given criteria functions.
        @param robot the robot to use for Ed queries
        @param type the type of the entity to resolve to (default: any type)
        @param center_point combined with radius: a sphere to search an entity in
        @param radius combined with center_point: a sphere to search an entity in
        @param id the ID of the object to get info about
        @param parse whether to parse the data string associated with the object model or entity
        @param type_designator same as type but dynamically resolved trhough a designator. Mutually exclusive with type
        @param center_point_designator same as center_point but dynamically resolved through a designator. Mutually exclusive with center_point
        @param id_designator same as id but dynamically resolved through a designator. Mutually exclusive with id"""
        super(EdEntityCollectionDesignator,
              self).__init__(resolve_type=[Entity], name=name)
        self.ed = robot.ed
        if type != "" and type_designator != None:
            raise TypeError("Specify either type or type_designator, not both")
        if center_point != None and center_point_designator != None:
            raise TypeError(
                "Specify either center_point or center_point_designator, not both"
            )
        elif center_point == None and center_point_designator == None:
            center_point = VectorStamped()
        if id != "" and id_designator != None:
            raise TypeError("Specify either id or id_designator, not both")

        self.type = type
        self.center_point = center_point
        self.radius = radius
        self.id = id
        self.parse = parse
        self.criteriafuncs = criteriafuncs or []

        if type_designator: check_resolve_type(type_designator, str)
        self.type_designator = type_designator

        if center_point_designator:
            check_resolve_type(center_point_designator, VectorStamped)
        self.center_point_designator = center_point_designator

        if id_designator: check_resolve_type(id_designator, str)
        self.id_designator = id_designator

        self.debug = debug
Пример #23
0
    def look_up(self, timeout=0):
        """
        Gives a target at z = 1.0 at 1 m in front of the robot
        """
        goal = VectorStamped(0.2,
                             0.0,
                             4.5,
                             frame_id="/" + self.robot_name + "/base_link")

        return self.look_at_point(goal, timeout=timeout)
Пример #24
0
    def look_at_standing_person(self, timeout=0):
        """
        Gives a target at z = 1.75 at 1 m in front of the robot
        """
        goal = VectorStamped(1.0,
                             0.0,
                             1.6,
                             frame_id="/" + self.robot_name + "/base_link")

        return self.look_at_point(goal, timeout=timeout)
Пример #25
0
 def __init__(self, robot_name, tf_listener, *args, **kwargs):
     super(Perception, self).__init__(robot_name, tf_listener)
     self.reset = mock.MagicMock()
     self.close = mock.MagicMock()
     self.learn_person = mock.MagicMock()
     self.detect_faces = mock.MagicMock()
     self.get_best_face_recognition = mock.MagicMock()
     self.get_rgb_depth_caminfo = mock.MagicMock()
     self.project_roi = lambda *args, **kwargs: VectorStamped(
         random.random(), random.random(), random.random(), "/map")
Пример #26
0
    def test_look_at_enity_looks_at_correct_point(self):
        """Test that the robot looks at the center point of the named area, w.r.t. the frame of the entity"""
        entity_ds = ds.Designator(self.entity)

        state = states.LookAtEntity(self.robot, entity_ds, waittime=0)

        state.execute()

        vs = VectorStamped(0, 0, 0, "/12345")

        self.robot.head.look_at_point.assert_called_with(vs)
Пример #27
0
    def get_closest_room(self, center_point=None, radius=0):
        if not center_point:
            center_point = VectorStamped(x=0,
                                         y=0,
                                         z=0,
                                         frame_id="/" + self.robot_name +
                                         "/base_link")

        return self.get_closest_entity(type="room",
                                       center_point=center_point,
                                       radius=radius)
Пример #28
0
    def project_roi(self, roi, frame_id=None):
        """ Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used

        :param roi: sensor_msgs/RegionOfInterest
        :param frame_id: if specified, the result is transformed into this frame id
        :return: VectorStamped object
        """
        response = self.project_rois(rois=[roi]).points[0]

        # Convert to VectorStamped
        result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
                               frame_id=response.header.frame_id)

        # If necessary, transform the point
        if frame_id is not None:
            rospy.loginfo("Transforming roi to {}".format(frame_id))
            result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)

        # Return the result
        return result
Пример #29
0
def test_head(amigo):

    global joint_positions

    max_err = 0.02

    # straight
    vs = VectorStamped(x=0.4, z=10, frame_id='/amigo/torso')
    amigo.head.look_at_point(vs)
    # print str(joint_positions['neck_pan_joint']) + " " + str(joint_positions['neck_tilt_joint'])
    show_test(
        "head straight",
        in_bounds(joint_positions['neck_pan_joint'], 0, max_err)
        and in_bounds(joint_positions['neck_tilt_joint'], 0, max_err))

    # up
    vs = VectorStamped(x=0.5, z=1, frame_id='/amigo/torso')
    amigo.head.look_at_point(vs)
    # print str(joint_positions['neck_pan_joint']) + " " + str(joint_positions['neck_tilt_joint'])
    show_test(
        "head up",
        in_bounds(joint_positions['neck_pan_joint'], 0, max_err)
        and in_bounds(joint_positions['neck_tilt_joint'], -0.26, max_err))

    # down
    vs = VectorStamped(x=-0.4, z=1, frame_id='/amigo/torso')
    amigo.head.look_at_point(vs)
    # print str(joint_positions['neck_pan_joint']) + " " + str(joint_positions['neck_tilt_joint'])
    show_test(
        "head down",
        in_bounds(joint_positions['neck_pan_joint'], 0, max_err)
        and in_bounds(joint_positions['neck_tilt_joint'], 0.54, max_err))

    # right
    vs = VectorStamped(x=0.4, y=100, z=1, frame_id='/amigo/torso')
    amigo.head.look_at_point(vs)
    # print str(joint_positions['neck_pan_joint']) + " " + str(joint_positions['neck_tilt_joint'])
    show_test(
        "head right",
        in_bounds(joint_positions['neck_pan_joint'], -1.547, max_err)
        and in_bounds(joint_positions['neck_tilt_joint'], 0, max_err))

    # left
    vs = VectorStamped(x=0.4, y=-100, z=1, frame_id='/amigo/torso')
    amigo.head.look_at_point(vs)
    # print str(joint_positions['neck_pan_joint']) + " " + str(joint_positions['neck_tilt_joint'])
    show_test(
        "head left",
        in_bounds(joint_positions['neck_pan_joint'], 1.547, max_err)
        and in_bounds(joint_positions['neck_tilt_joint'], 0, max_err))

    # straight
    vs = VectorStamped(x=0.4, z=10, frame_id='/amigo/torso')
    amigo.head.look_at_point(vs)
    # print str(joint_positions['neck_pan_joint']) + " " + str(joint_positions['neck_tilt_joint'])
    show_test(
        "head straight",
        in_bounds(joint_positions['neck_pan_joint'], 0, max_err)
        and in_bounds(joint_positions['neck_tilt_joint'], 0, max_err))
Пример #30
0
def look_at_segmentation_area(robot, entity, volume=None):
    """ Has a robot look at a certain object and possibly a volume

    :param robot: robot object
    :param entity: entity to look at
    :param volume: string indicating the specific volume to look at (e.g., 'on_top_on' or 'shelf3')
    """

    # Determine the height of the head target
    # Start with a default
    pos = entity.pose.frame.p
    look_at_point_z = 0.7

    # Check if we have areas: use these
    if volume in entity.volumes:
        search_volume = entity.volumes[volume]
        try:
            look_at_point_z = search_volume.min_corner.z()
        except Exception as e:
            rospy.logerr(
                "Cannot get z_min of volume {} of entity {}: {}".format(
                    volume, entity.id, e.message))
    else:
        # Look at the top of the entity to inspect
        pos = entity.pose.frame.p
        try:
            look_at_point_z = pos.z + entity.shape.z_max
        except Exception as e:
            rospy.logerr("Cannot get z_max of entity {}: {}".format(
                entity.id, e.message))

    # Point the head at the right direction
    robot.head.look_at_point(VectorStamped(pos.x(), pos.y(), look_at_point_z,
                                           "/map"),
                             timeout=0)

    # Make sure the spindle is at the appropriate height if we are AMIGO
    if robot.robot_name == "amigo":
        # Send spindle goal to bring head to a suitable location
        # Correction for standard height: with a table heigt of 0.76 a spindle position
        # of 0.35 is desired, hence offset = 0.76-0.35 = 0.41
        # Minimum: 0.15 (to avoid crushing the arms), maximum 0.4
        spindle_target = max(
            0.15, min(look_at_point_z - 0.41, robot.torso.upper_limit[0]))

        robot.torso._send_goal([spindle_target], timeout=0)
        robot.torso.wait_for_motion_done()

    robot.head.wait_for_motion_done()
Пример #31
0
    def execute(self, userdata=None):

        grab_entity = self.grab_entity_designator.resolve()
        if not grab_entity:
            rospy.logerr("Could not resolve grab_entity")
            return "failed"

        arm = self.arm_designator.resolve()
        if not arm:
            rospy.logerr("Could not resolve arm")
            return "failed"

        goal_map = VectorStamped(0, 0, 0, frame_id=grab_entity.id)

        try:
            # Transform to base link frame
            goal_bl = goal_map.projectToFrame(self.robot.robot_name+'/base_link', tf_listener=self.robot.tf_listener)
            if goal_bl is None:
                rospy.logerr('Transformation of goal to base failed')
                return 'failed'
        except tf2_ros.TransformException as tfe:
            rospy.logerr('Transformation of goal to base failed: {0}'.format(tfe))
            return 'failed'

        # Make sure the torso and the arm are done
        self.robot.torso.wait_for_motion_done(cancel=True)
        arm.wait_for_motion_done(cancel=True)

        # This is needed because the head is not entirely still when the
        # look_at_point function finishes
        rospy.sleep(rospy.Duration(0.5))

        # Resolve the entity again because we want the latest pose
        updated_grab_entity = self.grab_entity_designator.resolve()

        rospy.loginfo("ID to update: {0}".format(grab_entity.id))
        if not updated_grab_entity:
            rospy.logerr("Could not resolve the updated grab_entity, "
                         "this should not happen [CHECK WHY THIS IS HAPPENING]")
            grab_entity = self.associate(original_entity=grab_entity)
        else:
            rospy.loginfo("Updated pose of entity (dx, dy, dz) : (%f, %f, %f)" %
                          (updated_grab_entity.pose.frame.p.x() - grab_entity.pose.frame.p.x(),
                           updated_grab_entity.pose.frame.p.y() - grab_entity.pose.frame.p.y(),
                           updated_grab_entity.pose.frame.p.z() - grab_entity.pose.frame.p.z()))
            grab_entity = updated_grab_entity

        # - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        # Grasp point determination
        grasp_framestamped = self._gpd.get_grasp_pose(grab_entity, arm)

        # - - - - - - - - - - - - - - - - - - - - - - - - - - - -

        goal_map = VectorStamped(0, 0, 0, frame_id=grab_entity.id)

        # In case grasp point determination didn't work
        if not grasp_framestamped:
            goal_bl = goal_map.projectToFrame(self.robot.robot_name + '/base_link', tf_listener=self.robot.tf_listener)
            if goal_bl is None:
                return 'failed'
            else:
                return 'failed'
        else:
            # We do have a grasp pose, given as a kdl frame in map
            try:
                self.robot.tf_listener.waitForTransform("/map", self.robot.robot_name + "/base_link", rospy.Time(0),
                                                        rospy.Duration(10))
                # Transform to base link frame
                goal_bl = grasp_framestamped.projectToFrame(self.robot.robot_name + "/base_link",
                                                            tf_listener=self.robot.tf_listener)
                if goal_bl is None:
                    return 'failed'
            except tf2_ros.TransformException as tfe:
                rospy.logerr('Transformation of goal to base failed: {0}'.format(tfe))
                return 'failed'

        # Pre-grasp --> this is only necessary when using visual servoing
        # rospy.loginfo('Starting Pre-grasp')
        # if not arm.send_goal(goal_bl.x, goal_bl.y, goal_bl.z, 0, 0, 0,
        #                      frame_id='/'+self.robot.robot_name+'/base_link',
        #                      timeout=20, pre_grasp=True, first_joint_pos_only=True
        #                      ):
        #     rospy.logerr('Pre-grasp failed:')
        #     arm.reset()
        #     arm.send_gripper_goal('close', timeout=None)
        #     return 'failed'

        # Grasp
        rospy.loginfo('Start grasping')
        if not arm.send_goal(goal_bl, timeout=20, pre_grasp=True, allowed_touch_objects=[grab_entity.id]):
            self.robot.speech.speak('I am sorry but I cannot move my arm to the object position', block=False)
            rospy.logerr('Grasp failed')
            arm.reset()
            arm.send_gripper_goal('close', timeout=0.0)
            return 'failed'

        # Close gripper
        arm.send_gripper_goal('close')

        arm.occupied_by = grab_entity

        # Lift
        goal_bl = grasp_framestamped.projectToFrame(self.robot.robot_name + "/base_link",
                                                    tf_listener=self.robot.tf_listener)
        rospy.loginfo('Start lifting')
        roll = 0.0

        goal_bl.frame.p.z(goal_bl.frame.p.z() + 0.05)  # Add 5 cm
        goal_bl.frame.M = kdl.Rotation.RPY(roll, 0, 0)  # Update the roll
        rospy.loginfo("Start lift")
        if not arm.send_goal(goal_bl, timeout=20, allowed_touch_objects=[grab_entity.id]):
            rospy.logerr('Failed lift')

        # Retract
        goal_bl = grasp_framestamped.projectToFrame(self.robot.robot_name + '/base_link',
                                                    tf_listener=self.robot.tf_listener)
        rospy.loginfo('Start retracting')
        roll = 0.0

        goal_bl.frame.p.x(goal_bl.frame.p.x() - 0.1)  # Retract 10 cm
        goal_bl.frame.p.z(goal_bl.frame.p.z() + 0.05)  # Go 5 cm higher
        goal_bl.frame.M = kdl.Rotation.RPY(roll, 0.0, 0.0)  # Update the roll
        rospy.loginfo("Start retract")
        if not arm.send_goal(goal_bl, timeout=0.0, allowed_touch_objects=[grab_entity.id]):
            rospy.logerr('Failed retract')
        arm.wait_for_motion_done()
        self.robot.base.force_drive(-0.125, 0, 0, 2.0)

        # Update Kinect once again to make sure the object disappears from ED
        segm_res = self.robot.ed.update_kinect("%s" % grab_entity.id)

        arm.wait_for_motion_done(cancel=True)

        # Carrying pose
        # rospy.loginfo('start moving to carrying pose')
        arm.send_joint_goal('carrying_pose', timeout=0.0)

        result = 'succeeded'
        if self._check_occupancy:
            # Check if the object is present in the gripper
            if arm.object_in_gripper_measurement.is_empty:
                # If state is empty, grasp has failed
                result = "failed"
                rospy.logerr("Gripper is not holding an object")
                self.robot.speech.speak("Whoops, something went terribly wrong")
                arm.occupied_by = None  # Set the object the arm is holding to None
            else:
                # State is holding, grasp succeeded.
                # If unknown: sensor not there, assume gripper is holding and hope for the best
                result = "succeeded"
                if arm.object_in_gripper_measurement.is_unknown:
                    rospy.logwarn("GripperMeasurement unknown")

        # Reset head
        self.robot.head.cancel_goal()

        return result