def execute_grasp(self, grasp, cam_pose): """ Parameters: - grasp (obj: BenchmarkGrasp msg) - cam_pose (obj: geometry_msgs.msg.Transform) """ # Need to tranform the grasp pose from camera frame to world frame # w_T_grasp = w_T_cam * cam_T_grasp gp_quat = grasp.pose.pose.orientation gp_pose = grasp.pose.pose.position cam_T_grasp = np.eye(4) cam_T_grasp[:3,:3] = quaternion_to_matrix([gp_quat.x, gp_quat.y, gp_quat.z, gp_quat.w]) cam_T_grasp[:3,3] = np.array([gp_pose.x, gp_pose.y, gp_pose.z]) cam_quat = cam_pose.rotation cam_pose = cam_pose.translation w_T_cam = np.eye(4) w_T_cam[:3, :3] = quaternion_to_matrix([cam_quat.x, cam_quat.y, cam_quat.z, cam_quat.w]) w_T_cam[:3, 3] = np.array([cam_pose.x, cam_pose.y, cam_pose.z]) w_T_grasp = np.matmul(w_T_cam, cam_T_grasp) print("w_T_cam\n ", w_T_cam) print("cam_T_grasp\n ", cam_T_grasp) print("w_T_grasp\n ", w_T_grasp) # Create the ROS pose message to send to robot grasp_pose_msg = geometry_msgs.msg.PoseStamped() grasp_pose_msg.header.frame_id = 'robot_link0' grasp_pose_msg.pose.position.x = w_T_grasp[0,3] grasp_pose_msg.pose.position.y = w_T_grasp[1,3] grasp_pose_msg.pose.position.z = w_T_grasp[2,3] q = matrix_to_quaternion(w_T_grasp[:3,:3]) grasp_pose_msg.pose.orientation.x = q[0] grasp_pose_msg.pose.orientation.y = q[1] grasp_pose_msg.pose.orientation.z = q[2] grasp_pose_msg.pose.orientation.w = q[3] robot_req = RobotGraspRequest() robot_req.grasp = grasp_pose_msg robot_req.width = grasp.width print("request to robot is: \n{}" .format(robot_req)) try: reply = self._robot(robot_req) print("Service {} reply is: \n{}" .format(self._robot.resolved_name, reply)) return Bool(True) except rospy.ServiceException as e: print("Service {} call failed: {}" .format(self._robot.resolved_name, e)) return Bool(False)
def _transform_grasp(self, gp: sb.GraspPoses): # --- transform grasp pose from icub to robot base coordinates --- # # robot_T_gp = robot_T_icub * icub_T_gp robot_base_T_icub_base = np.linalg.inv(self._icub_base_T_robot_base) icub_gp_ax = [ gp.axisangle[0][0], gp.axisangle[1][0], gp.axisangle[2][0], gp.axisangle[3][0] ] icub_gp_pos = [gp.position[0][0], gp.position[1][0], gp.position[2][0]] icub_base_T_icub_gp = np.eye(4) icub_base_R_icub_gp = tr.quaternion_to_matrix( tr.axis_angle_to_quaternion(icub_gp_ax)) icub_base_T_icub_gp[:3] = np.append(icub_base_R_icub_gp, np.array([icub_gp_pos]).T, axis=1) robot_base_T_icub_gp = np.matmul(robot_base_T_icub_base, icub_base_T_icub_gp) icub_gp_T_panda_gp = np.eye(4) icub_gp_T_panda_gp[2, 3] = -0.10 # --- transform grasp pose from icub hand ref frame to robot hand ref frame --- # robot_base_T_robot_gp = np.matmul(robot_base_T_icub_gp, self._icub_hand_T_robot_hand) robot_base_T_robot_gp1 = np.matmul(robot_base_T_robot_gp, icub_gp_T_panda_gp) return robot_base_T_robot_gp1
def _compute_icub_base_T_robot_base(self): """ Compute the transformation from the robot base to the icub base We suppose the robot base frame has x pointing forward, y to the left, z upward. The superquadric planner considers as reference the icub base frame, which has x pointing backward and y to the right. Thus we need to transform pointcloud and grasp pose to/from the icub base frame. Returns ------- icub_T_robot (np.ndarray): 4x4 matrix """ if self._robot[:5] == 'icub' or self._robot[:5] == 'iCub': self._icub_base_T_robot_base = np.eye(4) else: # rotation of pi/2 around z icub_quat_robot = np.array([0., 0., 1., 0.]) self._icub_base_T_robot_base = np.eye(4) icub_R_robot = tr.quaternion_to_matrix(icub_quat_robot) self._icub_base_T_robot_base[:3] = np.append(icub_R_robot, np.array([0., 0., 0.]).T, axis=1) return self._icub_base_T_robot_base
def superqs(self): sq_out = sb.vector_superquadric(np.size(self._superqs, 0)) for i, sq in enumerate(self._superqs): sq_out[i] = sq # Express the sq pose wrt the robot base, instead of the icub base quat_sq = tr.axis_angle_to_quaternion( (sq.axisangle[0][0], sq.axisangle[1][0], sq.axisangle[2][0], sq.axisangle[3][0])) pos_sq = [sq.center[0][0], sq.center[1][0], sq.center[2][0]] robot_base_T_icub_base = np.linalg.inv( self._icub_base_T_robot_base) icub_base_T_icub_sq = np.eye(4) icub_base_R_icub_sq = tr.quaternion_to_matrix(quat_sq) icub_base_T_icub_sq[:3] = np.append(icub_base_R_icub_sq, np.array([pos_sq]).T, axis=1) robot_base_T_icub_sq = np.matmul(robot_base_T_icub_base, icub_base_T_icub_sq) vec_aa_sq = tr.quaternion_to_axis_angle( tr.matrix_to_quaternion(robot_base_T_icub_sq[:3, :3])) sq_out[i].setSuperqOrientation(vec_aa_sq) sq_out[i].setSuperqCenter(robot_base_T_icub_sq[:3, 3]) return sq_out
def quaternion(self, quat): # Convert quaternions if len(quat) != 4 or np.abs(np.linalg.norm(quat) - 1.0) > 1e-3: raise ValueError('Invalid quaternion') self._quaternion = np.array([q for q in quat]) rotation = transformations.quaternion_to_matrix(q) self._check_valid_rotation(rotation) self._rotation = rotation * 1.
def create_camera_data(self, pointcloud: np.ndarray, cam_pos: np.ndarray, cam_quat: np.ndarray, colors=np.ndarray(shape=(0, )), cam_frame="camera_link"): """ Create the CameraData object in the right format expected by the superquadric-based grasp planner Parameters --------- pointcloud (np.ndarray): array of 3D points colors (np.ndarray):array of RGB colors cam_pos (np.ndarray): 3D position of the camera expressed wrt the robot base cam_quat (np.ndarray): quaternion (x, y, z, w) of the camera orientation expressed wrt the robot base Returns: CameraData: object that stores the input data required by plan_grasp() """ self._camera_data = CameraData() self._pointcloud.deletePoints() # ----------------------------- # # --- Create the pointcloud --- # # ----------------------------- # # We need to express the pointcloud wrt the icub base frame: # icub_base_T_cloud = icub_base_T_robot_base * robot_base_T_cam * cam_points # create robot_base_T_cloud robot_T_cam = np.eye(4) robot_R_cam = tr.quaternion_to_matrix(cam_quat) robot_T_cam[:3] = np.append(robot_R_cam, np.array([cam_pos]).T, axis=1) # create icub_base_T_cam icub_T_cam = np.matmul(self._icub_base_T_robot_base, robot_T_cam) # fill pointcloud sb_points = sb.deque_Vector3d() sb_colors = sb.vector_vector_uchar() for i in range(0, pointcloud.shape[1]): for j in range(0, pointcloud.shape[2]): pt = np.array([ pointcloud[0][i][j], pointcloud[1][i][j], pointcloud[2][i][j] ]) icub_pt = icub_T_cam.dot(np.append(pt, 1)) if np.isnan(icub_pt[0]): continue if colors.size != 0: col = colors[i][:3] if type(col) is np.ndarray: col = col.astype(int) col = col.tolist() else: col = [int(c) for c in col] else: col = [255, 255, 0] sb_points.push_back(icub_pt[:3]) sb_colors.push_back(col) if sb_points.size() >= self.cfg['sq_model']['minimum_points']: self._pointcloud.setPoints(sb_points) self._pointcloud.setColors(sb_colors) else: warnings.warn("Not enough points in the pointcloud") self._camera_data.pc_img = self._pointcloud # ------------------- # # --- camera info --- # # ------------------- # self._camera_data.extrinsic_params['position'] = cam_pos self._camera_data.extrinsic_params['rotation'] = robot_R_cam self._camera_data.intrinsic_params['cam_frame'] = cam_frame return self._camera_data