def execute_policy(self, rgbd_image_state, grasping_policy, grasp_pose_publisher, pose_frame): """ Executes a grasping policy on an RgbdImageState Parameters ---------- rgbd_image_state: :obj:`RgbdImageState` RgbdImageState from perception module to encapsulate depth and color image along with camera intrinsics grasping_policy: :obj:`GraspingPolicy` grasping policy to use grasp_pose_publisher: :obj:`Publisher` ROS Publisher to publish planned grasp's ROS Pose only for visualization pose_frame: :obj:`str` frame of reference to publish pose alone in """ # execute the policy's action rospy.loginfo('Planning Grasp') grasp_planning_start_time = time.time() grasp = grasping_policy(rgbd_image_state) # create GQCNNGrasp return msg and populate it gqcnn_grasp = GQCNNGrasp() gqcnn_grasp.grasp_success_prob = grasp.q_value gqcnn_grasp.pose = grasp.grasp.pose().pose_msg # create and publish the pose alone for visualization ease of grasp pose in rviz pose_stamped = PoseStamped() pose_stamped.pose = grasp.grasp.pose().pose_msg header = Header() header.stamp = rospy.Time.now() header.frame_id = pose_frame pose_stamped.header = header grasp_pose_publisher.publish(pose_stamped) # return GQCNNGrasp msg rospy.loginfo('Total grasp planning time: ' + str(time.time() - grasp_planning_start_time) + ' secs.') if self.cfg['vis']['vis_final_grasp']: vis.imshow(rgbd_image_state.rgbd_im) vis.grasp(grasp.grasp, scale=1.5, show_center=False, show_axis=True) vis.show() return gqcnn_grasp
def execute_policy(self, rgbd_image_state, grasping_policy, grasp_pose_publisher, pose_frame): """Executes a grasping policy on an `RgbdImageState`. Parameters ---------- rgbd_image_state: :obj:`RgbdImageState` `RgbdImageState` from BerkeleyAutomation/perception to encapsulate depth and color image along with camera intrinsics. grasping_policy: :obj:`GraspingPolicy` Grasping policy to use. grasp_pose_publisher: :obj:`Publisher` ROS publisher to publish pose of planned grasp for visualization. pose_frame: :obj:`str` Frame of reference to publish pose in. """ # Execute the policy"s action. grasp_planning_start_time = time.time() grasp = grasping_policy(rgbd_image_state) # Create `GQCNNGrasp` return msg and populate it. gqcnn_grasp = GQCNNGrasp() gqcnn_grasp.q_value = grasp.q_value gqcnn_grasp.pose = grasp.grasp.pose().pose_msg if isinstance(grasp.grasp, Grasp2D): gqcnn_grasp.grasp_type = GQCNNGrasp.PARALLEL_JAW elif isinstance(grasp.grasp, SuctionPoint2D): gqcnn_grasp.grasp_type = GQCNNGrasp.SUCTION else: rospy.logerr("Grasp type not supported!") raise rospy.ServiceException("Grasp type not supported!") # Store grasp representation in image space. gqcnn_grasp.center_px[0] = grasp.grasp.center[0] gqcnn_grasp.center_px[1] = grasp.grasp.center[1] gqcnn_grasp.angle = grasp.grasp.angle gqcnn_grasp.depth = grasp.grasp.depth gqcnn_grasp.thumbnail = grasp.image.rosmsg # Create and publish the pose alone for easy visualization of grasp # pose in Rviz. pose_stamped = PoseStamped() pose_stamped.pose = grasp.grasp.pose().pose_msg header = Header() header.stamp = rospy.Time.now() header.frame_id = pose_frame pose_stamped.header = header grasp_pose_publisher.publish(pose_stamped) # Return `GQCNNGrasp` msg. rospy.loginfo("Total grasp planning time: " + str(time.time() - grasp_planning_start_time) + " secs.") return gqcnn_grasp
def execute_policy(self, rgbd_image_state, grasping_policy, grasp_pose_publisher, pose_frame): """ Executes a grasping policy on an RgbdImageState Parameters ---------- rgbd_image_state: :obj:`RgbdImageState` RgbdImageState from perception module to encapsulate depth and color image along with camera intrinsics grasping_policy: :obj:`GraspingPolicy` grasping policy to use grasp_pose_publisher: :obj:`Publisher` ROS Publisher to publish planned grasp's ROS Pose only for visualization pose_frame: :obj:`str` frame of reference to publish pose alone in """ # execute the policy's action grasp_planning_start_time = time.time() grasp = grasping_policy(rgbd_image_state) # create GQCNNGrasp return msg and populate it gqcnn_grasp = GQCNNGrasp() gqcnn_grasp.q_value = grasp.q_value gqcnn_grasp.pose = grasp.grasp.pose().pose_msg if isinstance(grasp.grasp, Grasp2D): gqcnn_grasp.grasp_type = GQCNNGrasp.PARALLEL_JAW elif isinstance(grasp.grasp, SuctionPoint2D): gqcnn_grasp.grasp_type = GQCNNGrasp.SUCTION else: rospy.logerr('Grasp type not supported!') raise rospy.ServiceException('Grasp type not supported!') # store grasp representation in image space gqcnn_grasp.center_px[0] = grasp.grasp.center[0] gqcnn_grasp.center_px[1] = grasp.grasp.center[1] gqcnn_grasp.angle = grasp.grasp.angle gqcnn_grasp.depth = grasp.grasp.depth gqcnn_grasp.thumbnail = grasp.image.rosmsg # create and publish the pose alone for visualization ease of grasp pose in rviz pose_stamped = PoseStamped() pose_stamped.pose = grasp.grasp.pose().pose_msg header = Header() header.stamp = rospy.Time.now() header.frame_id = pose_frame pose_stamped.header = header grasp_pose_publisher.publish(pose_stamped) # return GQCNNGrasp msg rospy.loginfo('Total grasp planning time: ' + str(time.time() - grasp_planning_start_time) + ' secs.') return gqcnn_grasp