def transform_grasp_to_world(self, grasp_pose, camera_viewpoint):
        """Refer the grasp pose to 6D, if the camera viewpoint is given

        Parameters
        ---------
        grasp_pose: geometry_msgs/PoseStamped
            The candidate to transform
        camera_viewpoint: geometry_msgs/PoseStamped
            The camera viewpoint wrt world reference frame

        Returns
        -------
            geometry_msgs/PoseStamped
            The candidate in the world reference frame
        """
        # Need to tranform the grasp pose from camera frame to world frame
        # w_T_cam : camera pose in world ref frame
        # cam_T_grasp : grasp pose in camera ref frame
        # w_T_grasp = w_T_cam * cam_T_grasp

        # Construct the 4x4 affine transf matrices from ROS poses
        grasp_quat = grasp_pose.pose.orientation
        grasp_pos = grasp_pose.pose.position
        cam_T_grasp = np.eye(4)
        cam_T_grasp[:3, :3] = quaternion_to_matrix(
            [grasp_quat.x, grasp_quat.y, grasp_quat.z, grasp_quat.w])
        cam_T_grasp[:3, 3] = np.array([grasp_pos.x, grasp_pos.y, grasp_pos.z])

        cam_quat = camera_viewpoint.pose.orientation
        cam_pos = camera_viewpoint.pose.position
        w_T_cam = np.eye(4)
        w_T_cam[:3, :3] = quaternion_to_matrix(
            [cam_quat.x, cam_quat.y, cam_quat.z, cam_quat.w])
        w_T_cam[:3, 3] = np.array([cam_pos.x, cam_pos.y, cam_pos.z])

        # Obtain the w_T_grasp affine transformation
        w_T_grasp = np.matmul(w_T_cam, cam_T_grasp)

        if DEBUG:
            print("[DEBUG] Grasp pose reference system transform")
            print("w_T_cam\n ", w_T_cam)
            print("cam_T_grasp\n ", cam_T_grasp)
            print("w_T_grasp\n ", w_T_grasp)

        # Create and return a StampedPose object
        w_T_grasp_quat = matrix_to_quaternion(w_T_grasp[:3, :3])
        result_pose = PoseStamped()
        result_pose.pose.orientation.x = w_T_grasp_quat[0]
        result_pose.pose.orientation.y = w_T_grasp_quat[1]
        result_pose.pose.orientation.z = w_T_grasp_quat[2]
        result_pose.pose.orientation.w = w_T_grasp_quat[3]
        result_pose.pose.position.x = w_T_grasp[0, 3]
        result_pose.pose.position.y = w_T_grasp[1, 3]
        result_pose.pose.position.z = w_T_grasp[2, 3]
        result_pose.header = camera_viewpoint.header

        return result_pose
    def _transform_grasp(self, gp: sb.GraspPoses):
        # --- transform grasp pose from icub to robot base coordinates --- #
        # robot_T_gp = robot_T_icub * icub_T_gp

        robot_base_T_icub_base = np.linalg.inv(self._icub_base_T_robot_base)

        icub_gp_ax = [
            gp.axisangle[0][0], gp.axisangle[1][0], gp.axisangle[2][0],
            gp.axisangle[3][0]
        ]
        icub_gp_pos = [gp.position[0][0], gp.position[1][0], gp.position[2][0]]

        icub_base_T_icub_gp = np.eye(4)
        icub_base_R_icub_gp = tr.quaternion_to_matrix(
            tr.axis_angle_to_quaternion(icub_gp_ax))
        icub_base_T_icub_gp[:3] = np.append(icub_base_R_icub_gp,
                                            np.array([icub_gp_pos]).T,
                                            axis=1)

        robot_base_T_icub_gp = np.matmul(robot_base_T_icub_base,
                                         icub_base_T_icub_gp)
        icub_gp_T_panda_gp = np.eye(4)
        # icub_gp_T_panda_gp[2,3] = -0.10
        grasp_target_T_panda_ef[:3, 3] = self._grasp_offset

        # --- transform grasp pose from icub hand ref frame to robot hand ref frame --- #
        robot_base_T_robot_gp = np.matmul(robot_base_T_icub_gp,
                                          self._icub_hand_T_robot_hand)
        robot_base_T_robot_gp1 = np.matmul(robot_base_T_robot_gp,
                                           icub_gp_T_panda_gp)

        return robot_base_T_robot_gp1
    def _compute_icub_base_T_robot_base(self):
        """ Compute the transformation from the robot base to the icub base

         We suppose the robot base frame has x pointing forward, y to the left, z upward.
         The superquadric planner considers as reference the icub base frame, which has x pointing backward and y to the right.
         Thus we need to transform pointcloud and grasp pose to/from the icub base frame.

        Returns
        -------
        icub_T_robot (np.ndarray): 4x4 matrix

        """

        if self._robot[:5] == 'icub' or self._robot[:5] == 'iCub':
            self._icub_base_T_robot_base = np.eye(4)

        else:
            # rotation of pi/2 around z
            icub_quat_robot = np.array([0., 0., 1., 0.])

            self._icub_base_T_robot_base = np.eye(4)
            icub_R_robot = tr.quaternion_to_matrix(icub_quat_robot)
            self._icub_base_T_robot_base[:3] = np.append(icub_R_robot,
                                                         np.array([0., 0.,
                                                                   0.]).T,
                                                         axis=1)

        return self._icub_base_T_robot_base
    def superqs(self):
        sq_out = sb.vector_superquadric(np.size(self._superqs, 0))
        for i, sq in enumerate(self._superqs):
            sq_out[i] = sq

            # Express the sq pose wrt the robot base, instead of the icub base
            quat_sq = tr.axis_angle_to_quaternion(
                (sq.axisangle[0][0], sq.axisangle[1][0], sq.axisangle[2][0],
                 sq.axisangle[3][0]))
            pos_sq = [sq.center[0][0], sq.center[1][0], sq.center[2][0]]

            robot_base_T_icub_base = np.linalg.inv(
                self._icub_base_T_robot_base)

            icub_base_T_icub_sq = np.eye(4)
            icub_base_R_icub_sq = tr.quaternion_to_matrix(quat_sq)
            icub_base_T_icub_sq[:3] = np.append(icub_base_R_icub_sq,
                                                np.array([pos_sq]).T,
                                                axis=1)

            robot_base_T_icub_sq = np.matmul(robot_base_T_icub_base,
                                             icub_base_T_icub_sq)

            vec_aa_sq = tr.quaternion_to_axis_angle(
                tr.matrix_to_quaternion(robot_base_T_icub_sq[:3, :3]))

            sq_out[i].setSuperqOrientation(vec_aa_sq)
            sq_out[i].setSuperqCenter(robot_base_T_icub_sq[:3, 3])

        return sq_out
Beispiel #5
0
    def quaternion(self, quat):
        # Convert quaternions
        if len(quat) != 4 or np.abs(np.linalg.norm(quat) - 1.0) > 1e-3:
            raise ValueError('Invalid quaternion')

        self._quaternion = np.array([q for q in quat])
        rotation = transformations.quaternion_to_matrix(q)

        self._check_valid_rotation(rotation)
        self._rotation = rotation * 1.
Beispiel #6
0
    def dump_grasps(self, grasps: list):

        # Find a suitable dir to store candidates each time this function is called
        import os
        dump_dir_base = '/workspace/dump_'
        dump_dir_idx = 0
        while (os.path.exists(dump_dir_base + str(dump_dir_idx))):
            dump_dir_idx += 1
        dump_dir = dump_dir_base + str(dump_dir_idx)
        os.mkdir(dump_dir)
        poses_filename = os.path.join(dump_dir, 'grasp_candidates.txt')
        scores_filename = os.path.join(dump_dir, 'grasp_candidates_scores.txt')
        width_filename = os.path.join(dump_dir, 'grasp_candidates_width.txt')

        with open(poses_filename,
                  'w') as poses_file, open(scores_filename,
                                           'w') as scores_file:
            # For each candidate, get the 4x4 affine matrix first
            for candidate in grasps:
                candidate_pose_orientation = candidate.pose.pose.orientation
                candidate_pose_position = candidate.pose.pose.position
                candidate_pose_score = candidate.score.data
                candidate_pose_affine = np.eye(4)
                candidate_pose_affine[:3, :3] = quaternion_to_matrix([
                    candidate_pose_orientation.x, candidate_pose_orientation.y,
                    candidate_pose_orientation.z, candidate_pose_orientation.w
                ])
                candidate_pose_affine[:3, 3] = np.array([
                    candidate_pose_position.x, candidate_pose_position.y,
                    candidate_pose_position.z
                ])
                for idx in range(candidate_pose_affine.shape[0]):
                    # Create weird format compatible with application
                    if idx != (candidate_pose_affine.shape[0] - 1):
                        row_string = '[{}, {}, {}, {}],'.format(
                            str(candidate_pose_affine[idx, 0]),
                            str(candidate_pose_affine[idx, 1]),
                            str(candidate_pose_affine[idx, 2]),
                            str(candidate_pose_affine[idx, 3]))
                    else:
                        row_string = '[{}, {}, {}, {}]\n'.format(
                            str(candidate_pose_affine[idx, 0]),
                            str(candidate_pose_affine[idx, 1]),
                            str(candidate_pose_affine[idx, 2]),
                            str(candidate_pose_affine[idx, 3]))
                    poses_file.write(row_string)

                score_string = '{}\n'.format(str(candidate_pose_score))
                scores_file.write(score_string)

        return Bool(True)
    def create_camera_data(self,
                           pointcloud: np.ndarray,
                           cam_pos: np.ndarray,
                           cam_quat: np.ndarray,
                           colors=np.ndarray(shape=(0, )),
                           cam_frame="camera_link"):
        """ Create the CameraData object in the right format expected by the superquadric-based grasp planner

        Parameters
        ---------
            pointcloud (np.ndarray): array of 3D points of shape (n,3)
            colors (np.ndarray):array of RGB colors of shape (n,3)
            cam_pos (np.ndarray): 3D position of the camera expressed wrt the robot base
            cam_quat (np.ndarray): quaternion (x, y, z, w) of the camera orientation expressed wrt the robot base

        Returns:
            CameraData: object that stores the input data required by plan_grasp()

        """

        self._camera_data = CameraData()
        self._pointcloud.deletePoints()

        # ----------------------------- #
        # --- Create the pointcloud --- #
        # ----------------------------- #

        # We need to express the pointcloud wrt the icub base frame:
        #     icub_base_T_cloud = icub_base_T_robot_base * robot_base_T_cam * cam_points

        # create robot_base_T_cloud
        robot_T_cam = np.eye(4)
        robot_R_cam = tr.quaternion_to_matrix(cam_quat)
        robot_T_cam[:3] = np.append(robot_R_cam, np.array([cam_pos]).T, axis=1)

        # create icub_base_T_cam
        icub_T_cam = np.matmul(self._icub_base_T_robot_base, robot_T_cam)

        # fill pointcloud
        sb_points = sb.deque_Vector3d()
        sb_colors = sb.vector_vector_uchar()

        for i in range(0, pointcloud.shape[0]):

            pt = np.array(
                [pointcloud[i, 0], pointcloud[i, 1], pointcloud[i, 2]])

            icub_pt = icub_T_cam.dot(np.append(pt, 1))
            if np.isnan(icub_pt[0]):
                continue

            if colors.size != 0:
                col = colors[i][:3]
                if type(col) is np.ndarray:
                    col = col.astype(int)
                    col = col.tolist()
                else:
                    col = [int(c) for c in col]
            else:
                col = [255, 255, 0]

            sb_points.push_back(icub_pt[:3])
            sb_colors.push_back(col)

        if sb_points.size() >= self.cfg['sq_model']['minimum_points']:
            self._pointcloud.setPoints(sb_points)
            self._pointcloud.setColors(sb_colors)
        else:
            warnings.warn("Not enough points in the pointcloud")

        self._camera_data.pc_img = self._pointcloud

        # ------------------- #
        # --- camera info --- #
        # ------------------- #
        self._camera_data.extrinsic_params['position'] = cam_pos
        self._camera_data.extrinsic_params['rotation'] = robot_R_cam

        self._camera_data.intrinsic_params['cam_frame'] = cam_frame

        return self._camera_data
    def user_cmd(self, req):
        """New grasp request handler

        Parameters
        ---------
        req: :obj:`ROS ServiceRequest`
            ROS `ServiceRequest` for grasp planner service.
        """
        if self._verbose:
            print("Received new command from user...")

        available_commands_dict = {}
        available_commands_dict['help'] = "display available commands"
        available_commands_dict[
            'grasp'] = "compute a new grasp and send it to the robot for execution"
        available_commands_dict[
            'get_candidates [n]'] = "computes n candidates and saves them to file"
        available_commands_dict[
            'abort'] = "interrupt grasp computation / do not send computed pose to the robot"

        available_commands_string = ''.join([
            "{}: {}\n".format(cmd, available_commands_dict[cmd])
            for cmd in available_commands_dict.keys()
        ])

        valid_command = False
        for cmd_string in available_commands_dict.keys():
            if cmd_string.split()[0] in req.cmd.data:
                valid_command = True
        if not valid_command:
            rospy.logerr("Invalid command")
            rospy.loginfo(
                "Available commands are: {}".format(available_commands_string))
            return Bool(False)

        self._abort = False

        if req.cmd.data == "help":
            print("Available commands are:\n", available_commands_string)
            return Bool(True)

        elif req.cmd.data == "grasp" or (req.cmd.data.split()[0]
                                         == "get_candidates"
                                         and len(req.cmd.data.split()) == 2):

            # --- get images --- #
            if self._verbose:
                print("... waiting for images ...")

            count = 0
            while not self._new_camera_data and count < 1000:
                count += 1

            if count >= 1000:
                print("...no images received")
                return Bool(False)

            self._new_camera_data = False

            # Create the service request
            planner_req = GraspPlannerRequest()

            # Fill in the 2D camera data
            planner_req.color_image = self._rgb_msg
            planner_req.depth_image = self._depth_msg
            planner_req.camera_info = self._cam_info_msg

            # Fill in the camera viewpoint field
            planner_req.view_point.header = self._camera_pose.header
            planner_req.view_point.pose.orientation = self._camera_pose.transform.rotation
            planner_req.view_point.pose.position = self._camera_pose.transform.translation

            # Transform the point cloud in the root reference frame and include it
            transform = self._camera_pose.transform
            pc_in = self._pc_msg
            tr_matrix = np.identity(4)
            tr_matrix[:3, :3] = quaternion_to_matrix([
                transform.rotation.x, transform.rotation.y,
                transform.rotation.z, transform.rotation.w
            ])
            tr_matrix[:3, 3] = [
                transform.translation.x, transform.translation.y,
                transform.translation.z
            ]
            points = []
            for p_in in read_points(pc_in,
                                    skip_nans=False,
                                    field_names=("x", "y", "z", "rgb")):
                p_transformed = np.dot(
                    tr_matrix, np.array([p_in[0], p_in[1], p_in[2], 1.0]))
                p_out = []
                p_out.append(p_transformed[0])
                p_out.append(p_transformed[1])
                p_out.append(p_transformed[2])
                p_out.append(p_in[3])
                points.append(p_out)

            header = pc_in.header
            header.frame_id = self._camera_pose.header.frame_id
            pc_out = sensor_msgs.point_cloud2.create_cloud(header=header,
                                                           fields=pc_in.fields,
                                                           points=points)

            planner_req.cloud = pc_out

            # Set number of candidates
            planner_req.n_of_candidates = NUMBER_OF_CANDIDATES if req.cmd.data == "grasp" else int(
                req.cmd.data.split()[1])

            if self._verbose:
                print("... send request to server ...")

            # Fill in the arcuo board wrt world reference frame

            planner_req.aruco_board.position = self._aruco_board_pose.transform.translation
            planner_req.aruco_board.orientation = self._aruco_board_pose.transform.rotation
            planner_req.grasp_filter_flag = self._enable_grasp_filter

            # Plan for grasps
            try:
                reply = self._grasp_planner(planner_req)
                if self._verbose:
                    print("Service {} reply is: \n{}".format(
                        self._grasp_planner.resolved_name, reply))
            except rospy.ServiceException as e:
                print("Service {} call failed: {}".format(
                    self._grasp_planner.resolved_name, e))
                return Bool(False)

            # Handle abort command
            if self._abort:
                rospy.loginfo("grasp execution was aborted by the user")
                return Bool(False)

            # If we are required to grasp, test the feasibility of the candidates first
            # and send the best grasp between the feasible candidates
            if req.cmd.data == "grasp":
                feasible_candidates = self.get_feasible_grasps(
                    reply.grasp_candidates)
                rospy.loginfo(
                    f"Feasible candidates: {len(feasible_candidates)}/{len(reply.grasp_candidates)}"
                )
                if not len(feasible_candidates):
                    return Bool(False)
                else:
                    return self.execute_grasp(
                        self.get_best_grasp(feasible_candidates))
            else:
                return self.dump_grasps(reply.grasp_candidates)

        elif req.cmd.data == "abort":
            self._abort = True
            return Bool(True)
Beispiel #9
0
    def read_images(self, req : GraspPlannerRequest):
        """Read images as a CameraData class from a service request

        Parameters
        ----------
        req : GraspPlannerRequest
            ROS service request for the grasp planning service
        """

        # Get color, depth and camera parameters from request
        camera_info = req.camera_info
        viewpoint = req.view_point
        try:
            cv2_color = self.cv_bridge.imgmsg_to_cv2(req.color_image, desired_encoding='rgb8')

            raw_depth = self.cv_bridge.imgmsg_to_cv2(req.depth_image, desired_encoding='passthrough')
            cv2_depth = np.array(raw_depth, dtype=np.float32)

            cv2_depth *= 0.001

            # Fix NANs
            nans_idx = np.isnan(cv2_depth)
            cv2_depth[nans_idx] = 1000.0

        except CvBridgeError as cv_bridge_exception:
            rospy.logerr(cv_bridge_exception)

        # Check for image and depth size
        if (cv2_color.shape[0] != cv2_depth.shape[0]) or (cv2_color.shape[1] != cv2_depth.shape[1]):
            msg = "Mismatch between depth shape {}x{} and color shape {}x{}".format(cv2_depth.shape[0],
                                                                                    cv2_depth.shape[1],
                                                                                    cv2_color.shape[0],
                                                                                    cv2_color.shape[1])
            rospy.logerr(msg)
            raise rospy.ServiceException(msg)

        # Create CameraData structure
        # CameraInfo intrinsic camera matrix for the raw (distorted) images:
        #     [fx  0 cx]
        # K = [ 0 fy cy]
        #     [ 0  0  1]
        camera_position = np.array([viewpoint.pose.position.x,
                                    viewpoint.pose.position.y,
                                    viewpoint.pose.position.z])
        camera_orientation = quaternion_to_matrix([viewpoint.pose.orientation.x,
                                           viewpoint.pose.orientation.y,
                                           viewpoint.pose.orientation.z,
                                           viewpoint.pose.orientation.w])
        camera_extrinsic_matrix = np.eye(4)
        camera_extrinsic_matrix[:3,:3] = camera_orientation
        camera_extrinsic_matrix[:3,3] = camera_position

        #  If available, get the object point cloud and transform it in the
        #  camera ref frame
        obj_cloud = self.npy_from_pc2(req.cloud)[0]
        obj_cloud = self.transform_pc_to_camera_frame(obj_cloud, camera_extrinsic_matrix) if obj_cloud is not None else None

        camera_data = self.create_camera_data(rgb_image=cv2_color,
                                              depth_image=cv2_depth,
                                              cam_intrinsic_frame=camera_info.header.frame_id,
                                              cam_extrinsic_matrix=camera_extrinsic_matrix,
                                              fx=camera_info.K[0],
                                              fy=camera_info.K[4],
                                              cx=camera_info.K[2],
                                              cy=camera_info.K[5],
                                              skew=0.0,
                                              w=camera_info.width,
                                              h=camera_info.height,
                                              obj_cloud=obj_cloud)

        return camera_data