config_filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', config_filename) # set random seed if seed is not None: np.random.seed(seed) random.seed(seed) # read config config = YamlConfig(config_filename) policy_config = config['policy'] # load test case state_path = os.path.join(test_case_path, 'state') action_path = os.path.join(test_case_path, 'action') state = RgbdImageState.load(state_path) original_action = ParallelJawGrasp.load(action_path) # init policy policy = CrossEntropyRobustGraspingPolicy(policy_config) if policy_config['vis']['input_images']: vis2d.figure() if state.segmask is None: vis2d.subplot(1, 2, 1) vis2d.imshow(state.rgbd_im.color) vis2d.title('COLOR') vis2d.subplot(1, 2, 2) vis2d.imshow(state.rgbd_im.depth, vmin=policy_config['vis']['vmin'], vmax=policy_config['vis']['vmax'])
# read camera calib tf_filename = '%s_to_world.tf' % (sensor_frame) T_camera_world = RigidTransform.load( os.path.join(config['calib_dir'], sensor_frame, tf_filename)) # setup sensor sensor = RgbdSensorFactory.sensor(sensor_type, config['sensor']) sensor.start() camera_intr = sensor.ir_intrinsics # read images color_im, depth_im, _ = sensor.frames() color_im = color_im.inpaint(rescale_factor=inpaint_rescale_factor) depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor) rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) state = RgbdImageState(rgbd_im, camera_intr) # init policy policy = CrossEntropyAntipodalGraspingPolicy(policy_config) policy_start = time.time() action = policy(state) logging.info('Planning took %.3f sec' % (time.time() - policy_start)) # vis final grasp if policy_config['vis']['final_grasp']: vis.figure(size=(10, 10)) vis.subplot(1, 2, 1) vis.imshow(rgbd_im.color) vis.grasp(action.grasp, scale=1.5, show_center=False, show_axis=True) vis.title('Planned grasp on color (Q=%.3f)' % (action.q_value)) vis.subplot(1, 2, 2)
def plan_grasp(self, req): """ Grasp planner request handler Parameters --------- req: :obj:`ROS ServiceRequest` ROS ServiceRequest for grasp planner service """ rospy.loginfo('Planning Grasp') # get the raw depth and color images as ROS Image objects raw_color = req.color_image raw_depth = req.depth_image # get the raw camera info as ROS CameraInfo object raw_camera_info = req.camera_info # get the bounding box as a custom ROS BoundingBox msg bounding_box = req.bounding_box # wrap the camera info in a perception CameraIntrinsics object camera_intrinsics = perception.CameraIntrinsics(raw_camera_info.header.frame_id, raw_camera_info.K[0], raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5], raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width) ### Create wrapped Perception RGB and Depth Images by unpacking the ROS Images using CVBridge ### try: color_image = perception.ColorImage(self.cv_bridge.imgmsg_to_cv2(raw_color, "rgb8"), frame=camera_intrinsics.frame) depth_image = perception.DepthImage(self.cv_bridge.imgmsg_to_cv2(raw_depth, desired_encoding = "passthrough"), frame=camera_intrinsics.frame) except CvBridgeError as cv_bridge_exception: rospy.logerr(cv_bridge_exception) # visualize if self.cfg['vis']['vis_uncropped_color_image']: vis.imshow(color_image) vis.show() if self.cfg['vis']['vis_uncropped_depth_image']: vis.imshow(depth_image) vis.show() # aggregate color and depth images into a single perception rgbdimage rgbd_image = perception.RgbdImage.from_color_and_depth(color_image, depth_image) # calc crop parameters minX = bounding_box.minX minY = bounding_box.minY maxX = bounding_box.maxX maxY = bounding_box.maxY # contain box to image->don't let it exceed image height/width bounds no_pad = False if minX < 0: minX = 0 no_pad = True if minY < 0: minY = 0 no_pad = True if maxX > rgbd_image.width: maxX = rgbd_image.width no_pad = True if maxY > rgbd_image.height: maxY = rgbd_image.height no_pad = True centroidX = (maxX + minX) / 2 centroidY = (maxY + minY) / 2 # add some padding to bounding box to prevent empty pixel regions when the image is rotated during grasp planning if not no_pad: width = (maxX - minX) + self.cfg['width_pad'] height = (maxY - minY) + self.cfg['height_pad'] else: width = (maxX - minX) height = (maxY - minY) # crop camera intrinsics and rgbd image cropped_camera_intrinsics = camera_intrinsics.crop(height, width, centroidY, centroidX) cropped_rgbd_image = rgbd_image.crop(height, width, centroidY, centroidX) # visualize if self.cfg['vis']['vis_cropped_rgbd_image']: vis.imshow(cropped_rgbd_image) vis.show() # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics image_state = RgbdImageState(cropped_rgbd_image, cropped_camera_intrinsics) # execute policy try: return self.execute_policy(image_state, self.grasping_policy, self.grasp_pose_publisher, cropped_camera_intrinsics.frame) except NoValidGraspsException: rospy.logerr('While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!') raise rospy.ServiceException('While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!') except NoAntipodalPairsFoundException: rospy.logerr('While executing policy could not sample any antipodal point pairs from input image. Aborting Policy! Please check if there is an object in the workspace or if the output of the object detector is reasonable.') raise rospy.ServiceException('While executing policy could not sample any antipodal point pairs from input image. Aborting Policy! Please check if there is an object in the workspace or if the output of the object detector is reasonable.')
# read images color_im, depth_im, _ = sensor.frames() color_im = color_im.inpaint(rescale_factor=inpaint_rescale_factor) depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor) # detect the objects detector = PointCloudBoxDetector() detections = detector.detect(color_im, depth_im, detection_config, camera_intr=camera_intr, T_camera_world=T_camera_world) detection = detections[0] # form RGB-D image state rgbd_im = RgbdImage.from_color_and_depth(detection.color_thumbnail, detection.depth_thumbnail) state = RgbdImageState(rgbd_im, detection.camera_intr, detection.binary_thumbnail) # init policy policy = CrossEntropyRobustGraspingPolicy(policy_config) policy_start = time.time() action = policy(state) logging.info('Planning took %.3f sec' %(time.time() - policy_start)) # vis final grasp if policy_config['vis']['final_grasp']: vis.figure(size=(10,10)) vis.subplot(1,2,1) vis.imshow(rgbd_im.color) vis.grasp(action.grasp, scale=1.5, show_center=False, show_axis=True) vis.title('Planned grasp on color (Q=%.3f)' %(action.q_value)) vis.subplot(1,2,2)
from visualization import Visualizer3D as vis3d point_cloud = camera_intr.deproject(depth_im) vis3d.figure() vis3d.points(point_cloud, subsample=3, random=True, color=(0, 0, 1), scale=0.001) vis3d.pose(RigidTransform()) vis3d.pose(T_camera_world.inverse()) vis3d.show() # Create state. rgbd_im = RgbdImage.from_color_and_depth(color_im, depth_im) state = RgbdImageState(rgbd_im, camera_intr, segmask=segmask) # Init policy. policy_type = "cem" if "type" in policy_config: policy_type = policy_config["type"] if policy_type == "ranking": policy = RobustGraspingPolicy(policy_config) else: policy = CrossEntropyRobustGraspingPolicy(policy_config) policy_start = time.time() action = policy(state) logger.info("Planning took %.3f sec" % (time.time() - policy_start)) # Vis final grasp. if policy_config["vis"]["final_grasp"]:
def plan_grasp(self, req): """ Grasp planner request handler . Parameters --------- req: :obj:`ROS ServiceRequest` ROS ServiceRequest for grasp planner service """ rospy.loginfo('Planning Grasp') # set min dimensions pad = max( math.ceil( np.sqrt(2) * (float(self.cfg['policy']['metric']['crop_width']) / 2)), math.ceil( np.sqrt(2) * (float(self.cfg['policy']['metric']['crop_height']) / 2))) min_width = 2 * pad + self.cfg['policy']['metric']['crop_width'] min_height = 2 * pad + self.cfg['policy']['metric']['crop_height'] # get the raw depth and color images as ROS Image objects raw_color = req.color_image raw_depth = req.depth_image segmask = None raw_segmask = req.segmask # get the raw camera info as ROS CameraInfo object raw_camera_info = req.camera_info # get the bounding box as a custom ROS BoundingBox msg bounding_box = req.bounding_box # wrap the camera info in a perception CameraIntrinsics object camera_intrinsics = CameraIntrinsics( raw_camera_info.header.frame_id, raw_camera_info.K[0], raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5], raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width) ### create wrapped Perception RGB and Depth Images by unpacking the ROS Images using CVBridge ### try: color_image = ColorImage(self.cv_bridge.imgmsg_to_cv2( raw_color, "rgb8"), frame=camera_intrinsics.frame) depth_image = DepthImage(self.cv_bridge.imgmsg_to_cv2( raw_depth, desired_encoding="passthrough"), frame=camera_intrinsics.frame) segmask = BinaryImage(self.cv_bridge.imgmsg_to_cv2( raw_segmask, desired_encoding="passthrough"), frame=camera_intrinsics.frame) except CvBridgeError as cv_bridge_exception: rospy.logerr(cv_bridge_exception) # check image sizes if color_image.height != depth_image.height or \ color_image.width != depth_image.width: rospy.logerr( 'Color image and depth image must be the same shape! Color is %d x %d but depth is %d x %d' % (color_image.height, color_image.width, depth_image.height, depth_image.width)) raise rospy.ServiceException( 'Color image and depth image must be the same shape! Color is %d x %d but depth is %d x %d' % (color_image.height, color_image.width, depth_image.height, depth_image.width)) if color_image.height < min_height or color_image.width < min_width: rospy.logerr( 'Color image is too small! Must be at least %d x %d resolution but the requested image is only %d x %d' % (min_height, min_width, color_image.height, color_image.width)) raise rospy.ServiceException( 'Color image is too small! Must be at least %d x %d resolution but the requested image is only %d x %d' % (min_height, min_width, color_image.height, color_image.width)) # inpaint images color_image = color_image.inpaint( rescale_factor=self.cfg['inpaint_rescale_factor']) depth_image = depth_image.inpaint( rescale_factor=self.cfg['inpaint_rescale_factor']) # visualize if self.cfg['vis']['color_image']: vis.imshow(color_image) vis.show() if self.cfg['vis']['depth_image']: vis.imshow(depth_image) vis.show() if self.cfg['vis']['segmask'] and segmask is not None: vis.imshow(segmask) vis.show() # aggregate color and depth images into a single perception rgbdimage rgbd_image = RgbdImage.from_color_and_depth(color_image, depth_image) # calc crop parameters minX = bounding_box.minX - pad minY = bounding_box.minY - pad maxX = bounding_box.maxX + pad maxY = bounding_box.maxY + pad # contain box to image->don't let it exceed image height/width bounds if minX < 0: minX = 0 if minY < 0: minY = 0 if maxX > rgbd_image.width: maxX = rgbd_image.width if maxY > rgbd_image.height: maxY = rgbd_image.height centroidX = (maxX + minX) / 2 centroidY = (maxY + minY) / 2 # compute width and height width = maxX - minX height = maxY - minY # crop camera intrinsics and rgbd image cropped_camera_intrinsics = camera_intrinsics.crop( height, width, centroidY, centroidX) cropped_rgbd_image = rgbd_image.crop(height, width, centroidY, centroidX) cropped_segmask = None if segmask is not None: cropped_segmask = segmask.crop(height, width, centroidY, centroidX) # visualize if self.cfg['vis']['cropped_rgbd_image']: vis.imshow(cropped_rgbd_image) vis.show() # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics image_state = RgbdImageState(cropped_rgbd_image, cropped_camera_intrinsics, segmask=cropped_segmask) # execute policy try: return self.execute_policy(image_state, self.grasping_policy, self.grasp_pose_publisher, cropped_camera_intrinsics.frame) except NoValidGraspsException: rospy.logerr( 'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!' ) raise rospy.ServiceException( 'While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!' )