Ejemplo n.º 1
0
    def _move_robot_to_post_place_pose(self, robot_pose):

        post_place_mat_in_suction_frame = np.array([
            [1, 0, 0, 0],
            [0, 1, 0, 0],
            [0, 0, 1, -self._post_place_dist],
            [0, 0, 0, 1],
        ])

        pos = robot_pose.position
        ori = robot_pose.orientation
        robot_np_pose = np.array(
            [pos.x, pos.y, pos.z, ori.x, ori.y, ori.z, ori.w])
        robot_mat = gk.quaternion_pose_to_mat(np, robot_np_pose)

        post_place_mat_in_world_frame = np.matmul(
            robot_mat, post_place_mat_in_suction_frame)
        post_place_pose_in_world_frame = gk.mat_to_quaternion_pose(
            np, post_place_mat_in_world_frame)
        translation = post_place_pose_in_world_frame[0:3]
        rotation = post_place_pose_in_world_frame[3:]

        self._tf_broadcaster.sendTransform(
            translation,
            rotation,
            self._object_ros_time,
            "post_place",
            "panda_link0",
        )

        position = Point(translation[0], translation[1], translation[2])

        self.set_end_effector_position_linearly(position, 0.25, 0.25)
Ejemplo n.º 2
0
    def _object_poses_callback(self, object_poses):
        for object_pose in object_poses.poses:
            object_pose_in_world_frame = object_pose.pose

            pos = object_pose_in_world_frame.position
            ori = object_pose_in_world_frame.orientation
            object_np_pose_in_world_frame = np.array(
                [pos.x, pos.y, pos.z, ori.x, ori.y, ori.z, ori.w])

            object_mat_in_world_frame = gk.quaternion_pose_to_mat(
                np, object_np_pose_in_world_frame)
            self._object_mats_in_world_frame[
                object_pose.class_id] = object_mat_in_world_frame

            self._object_pose_msgs_in_world_frame[
                object_pose.class_id] = object_pose_in_world_frame
Ejemplo n.º 3
0
    def _pick_poses_callback(self, pick_point_poses):

        if len(pick_point_poses.poses) == 0:
            return

        self._object_ros_time = pick_point_poses.header.stamp
        frame_id = pick_point_poses.header.frame_id

        # FIXME: current code assumes there are no multiple instances of the same class
        self._class_id_to_instance_id = {}
        for object_pose in pick_point_poses.poses:

            pose_stamped = PoseStamped()
            pose_stamped.header.frame_id = frame_id
            pose_stamped.header.stamp = self._object_ros_time
            pose_stamped.pose = object_pose.pose

            pick_point_pose_in_world_frame = self._tf_listener.transformPose(
                "panda_link0", pose_stamped).pose

            pos = pick_point_pose_in_world_frame.position
            ori = pick_point_pose_in_world_frame.orientation
            pick_point_np_pose_in_world_frame = np.array(
                [pos.x, pos.y, pos.z, ori.x, ori.y, ori.z, ori.w])
            self._pick_point_np_poses_in_world_frame[
                object_pose.class_id] = pick_point_np_pose_in_world_frame

            pick_point_mat_in_world_frame = gk.quaternion_pose_to_mat(
                np, pick_point_np_pose_in_world_frame)
            self._pick_point_mats_in_world_frame[
                object_pose.class_id] = pick_point_mat_in_world_frame

            self._class_id_to_instance_id[
                object_pose.class_id] = object_pose.instance_id

        self._ordered_object_ids_to_grasp = [
            object_pose.class_id for object_pose in pick_point_poses.poses
        ]

        ordered_class_names = (ycb_video_dataset.class_names[i]
                               for i in self._ordered_object_ids_to_grasp)
        morefusion.ros.loginfo_blue(
            f"Object tree: {' -> '.join(ordered_class_names)}")
Ejemplo n.º 4
0
 def _move_robot_to_place_pose(self, pre_pose_reached, object_to_robot_mat):
     place_position = pre_pose_reached.position
     place_position.z -= self._pre_placement_z_dist
     _, robot_pose = self.set_end_effector_position_linearly(
         place_position, 0.1, 0.1)
     pos = robot_pose.position
     ori = robot_pose.orientation
     robot_np_pose = np.array(
         [pos.x, pos.y, pos.z, ori.x, ori.y, ori.z, ori.w])
     object_mat = np.matmul(
         gk.quaternion_pose_to_mat(np, robot_np_pose),
         np.linalg.inv(object_to_robot_mat),
     )
     object_np_pose = gk.mat_to_quaternion_pose(np, object_mat)
     object_pose = Pose()
     object_pose.position.x = object_np_pose[0]
     object_pose.position.y = object_np_pose[1]
     object_pose.position.z = object_np_pose[2]
     object_pose.orientation.x = object_np_pose[3]
     object_pose.orientation.y = object_np_pose[4]
     object_pose.orientation.z = object_np_pose[5]
     object_pose.orientation.w = object_np_pose[6]
     return robot_pose, object_pose
Ejemplo n.º 5
0
    def pick_and_place(self):
        while not self._all_objects_removed:

            # object selection #
            # -----------------#

            self._object_id_to_grasp = self._choose_next_object_to_grasp()
            class_name = ycb_video_dataset.class_names[
                self._object_id_to_grasp]
            morefusion.ros.loginfo_blue(f"Picking up {class_name}")

            self._broadcast_object_pose()

            # pose calculation #
            # -----------------#

            pre_grasp_pose = self._get_pre_grasp_pose()
            grasp_pose = self._get_grasp_pose()
            post_grasp_pose = self._get_post_grasp_pose()
            inv_object_mat = np.linalg.inv(
                self._object_mats_in_world_frame[self._object_id_to_grasp])

            # Distractors #
            # ------------#

            self._all_distractors_removed = (
                self._check_if_all_distractors_removed())

            # move to object #
            # ---------------#

            self._move_robot_over_table()
            self._move_robot_to_pre_grasp_pose(pre_grasp_pose)
            robot_pose = self._move_robot_to_grasp_pose(grasp_pose)

            # update robot pose #
            # ------------------#

            pos = robot_pose.position
            ori = robot_pose.orientation
            robot_np_pose = np.array(
                [pos.x, pos.y, pos.z, ori.x, ori.y, ori.z, ori.w])
            object_to_robot_mat = np.matmul(
                inv_object_mat, gk.quaternion_pose_to_mat(np, robot_np_pose))

            # move object #
            # ------------#

            self._suction_grip_object()

            self._update_scene_with_grasp()
            self._publish_moved()

            self._move_robot_to_post_grasp_pose(post_grasp_pose)

            self._move_robot_over_table()
            if self._all_distractors_removed:
                self._move_robot_over_target_box()

                # get possible robot poses
                robot_poses = self._object_pose_interface.get_robot_poses(
                    self._object_id_to_grasp,
                    object_to_robot_mat,
                    self._in_target_box_position,
                )

                # filter poses based on face down threshold
                filtered_robot_poses = self._filter_robot_poses(robot_poses)

                if len(filtered_robot_poses) == 0:
                    raise Exception(
                        "Orientation of object in gripper is too difficult for placement in box.\n"
                        "Consider increasing the angle_from_vertical_limit threshold"
                    )

                # make motion
                robot_pose = self._move_robot_to_pre_place_pose(
                    filtered_robot_poses)
                robot_pose, obj_pose = self._move_robot_to_place_pose(
                    robot_pose, object_to_robot_mat)
                self._update_scene_with_placement(obj_pose)
                self._release_suction_grip()
                self._move_robot_to_post_place_pose(robot_pose)
            else:
                self._move_robot_over_distractor_box()
                self._move_robot_to_drop_pose()
                self._update_scene_with_drop()
                self._release_suction_grip()

            # object logging #
            # ---------------#

            self._picked_objects.append(self._object_id_to_grasp)
            self._all_objects_removed = self._check_if_all_objects_removed()

            # reset robot #
            # ------------#

            self.move_to_overlook_pose()
            morefusion.ros.loginfo_blue(f"Completed moving {class_name}")

        morefusion.ros.loginfo_blue("Demo completed")
        self.move_to_reset_pose()