def get_cam_intrinsic(self):
     #while self.cam_info is None:
     #    print "Waiting for cam_info"
     #    rospy.sleep(2)
     if self.cam_info is None:
         print "Waiting for cam_info"
         self.cam_info = rospy.wait_for_message(self.CAMINFO_TOPIC,
                                                CameraInfo)
     raw_camera_info = self.cam_info
     camera_intrinsics = perception.CameraIntrinsics(
         raw_camera_info.header.frame_id, raw_camera_info.K[0],
         raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5],
         raw_camera_info.K[1], raw_camera_info.height,
         raw_camera_info.width)
     return camera_intrinsics
Esempio n. 2
0
    def plan_grasp(self, req):
        """ Grasp planner request handler 
        
        Parameters
        ---------
        req: :obj:`ROS ServiceRequest`
            ROS ServiceRequest for grasp planner service
        """
        rospy.loginfo('Planning Grasp')
        
        # get the raw depth and color images as ROS Image objects
        raw_color = req.color_image
        raw_depth = req.depth_image

        # get the raw camera info as ROS CameraInfo object
        raw_camera_info = req.camera_info
        
        # get the bounding box as a custom ROS BoundingBox msg 
        bounding_box = req.bounding_box

        # wrap the camera info in a perception CameraIntrinsics object
        camera_intrinsics = perception.CameraIntrinsics(raw_camera_info.header.frame_id, raw_camera_info.K[0], raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5], raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width)

        ### Create wrapped Perception RGB and Depth Images by unpacking the ROS Images using CVBridge ###
        try:
            color_image = perception.ColorImage(self.cv_bridge.imgmsg_to_cv2(raw_color, "rgb8"), frame=camera_intrinsics.frame)
            depth_image = perception.DepthImage(self.cv_bridge.imgmsg_to_cv2(raw_depth, desired_encoding = "passthrough"), frame=camera_intrinsics.frame)
        except CvBridgeError as cv_bridge_exception:
            rospy.logerr(cv_bridge_exception)

        # visualize
        if self.cfg['vis']['vis_uncropped_color_image']:
            vis.imshow(color_image)
            vis.show()
        if self.cfg['vis']['vis_uncropped_depth_image']:
            vis.imshow(depth_image)
            vis.show()

        # aggregate color and depth images into a single perception rgbdimage
        rgbd_image = perception.RgbdImage.from_color_and_depth(color_image, depth_image)
        
        # calc crop parameters
        minX = bounding_box.minX
        minY = bounding_box.minY
        maxX = bounding_box.maxX
        maxY = bounding_box.maxY

        # contain box to image->don't let it exceed image height/width bounds
        no_pad = False
        if minX < 0:
            minX = 0
            no_pad = True
        if minY < 0:
            minY = 0
            no_pad = True
        if maxX > rgbd_image.width:
            maxX = rgbd_image.width
            no_pad = True
        if maxY > rgbd_image.height:
            maxY = rgbd_image.height
            no_pad = True

        centroidX = (maxX + minX) / 2
        centroidY = (maxY + minY) / 2

        # add some padding to bounding box to prevent empty pixel regions when the image is rotated during grasp planning
        if not no_pad:
            width = (maxX - minX) + self.cfg['width_pad']
            height = (maxY - minY) + self.cfg['height_pad']
        else:
            width = (maxX - minX)
            height = (maxY - minY)
  
        # crop camera intrinsics and rgbd image
        cropped_camera_intrinsics = camera_intrinsics.crop(height, width, centroidY, centroidX)
        cropped_rgbd_image = rgbd_image.crop(height, width, centroidY, centroidX)
        
        # visualize  
        if self.cfg['vis']['vis_cropped_rgbd_image']:
            vis.imshow(cropped_rgbd_image)
            vis.show()

        # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics
        image_state = RgbdImageState(cropped_rgbd_image, cropped_camera_intrinsics)
  
        # execute policy
        try:
            return self.execute_policy(image_state, self.grasping_policy, self.grasp_pose_publisher, cropped_camera_intrinsics.frame)
        except NoValidGraspsException:
            rospy.logerr('While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!')
            raise rospy.ServiceException('While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!')
        except NoAntipodalPairsFoundException:
            rospy.logerr('While executing policy could not sample any antipodal point pairs from input image. Aborting Policy! Please check if there is an object in the workspace or if the output of the object detector is reasonable.')
            raise rospy.ServiceException('While executing policy could not sample any antipodal point pairs from input image. Aborting Policy! Please check if there is an object in the workspace or if the output of the object detector is reasonable.')
    def service_predict(self, req):
        # The code not support uint depth, need to convert to float
        converted_depth = self.cv_bridge.imgmsg_to_cv2(
            self.depth_image, desired_encoding="passthrough")
        converted_depth = img_as_float(converted_depth)

        converted_image = self.cv_bridge.imgmsg_to_cv2(self.color_image,
                                                       "rgb8")

        # wrap the camera info in a perception CameraIntrinsics object
        camera_intrinsics = perception.CameraIntrinsics(
            raw_camera_info.header.frame_id, raw_camera_info.K[0],
            raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5],
            raw_camera_info.K[1], raw_camera_info.height,
            raw_camera_info.width)

        # Create wrapped Perception RGB and Depth Images by unpacking the ROS Images using CVBridge ###
        color_image = perception.ColorImage(converted_image,
                                            frame=camera_intrinsics.frame)
        depth_image = perception.DepthImage(converted_depth,
                                            frame=camera_intrinsics.frame)
        # Hongzhuo:
        color_image = color_image.inpaint(
            rescale_factor=self.cfg['inpaint_rescale_factor'])
        depth_image = depth_image.inpaint(
            rescale_factor=self.cfg['inpaint_rescale_factor'])

        min_x = bounding_box.minX
        min_y = bounding_box.minY
        max_x = bounding_box.maxX
        max_y = bounding_box.maxY

        # contain box to image->don't let it exceed image height/width bounds
        no_pad = False
        if min_x < 0:
            min_x = 0
            no_pad = True
        if min_y < 0:
            min_y = 0
            no_pad = True
        if max_x > rgbd_image.width:
            max_x = rgbd_image.width
            no_pad = True
        if max_y > rgbd_image.height:
            max_y = rgbd_image.height
            no_pad = True

        centroid_x = (max_x + min_x) / 2
        centroid_y = (max_y + min_y) / 2

        # add some padding to bounding box to prevent empty pixel regions when the image is
        # rotated during grasp planning
        if not no_pad:
            width = (max_x - min_x) + self.cfg['width_pad']
            height = (max_y - min_y) + self.cfg['height_pad']
        else:
            width = (max_x - min_x)
            height = (max_y - min_y)
        cropped_camera_intrinsics = camera_intrinsics.crop(
            height, width, centroid_y, centroid_x)
        #cropped_rgbd_image = rgbd_image.crop(height, width, centroid_y, centroid_x)
        depth_im = depth_image.crop(height,
                                    width,
                                    centroid_y,
                                    center_j=centroid_x)

        # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics
        #image_state = RgbdImageState(cropped_rgbd_image, cropped_camera_intrinsics)
        #depth_im=image_state.rgbd_im.depth;
        grasp_position = np.array([
            req.gripper_position.x, req.gripper_position.y,
            req.gripper_position.z
        ])
        grasp_depth = np.sqrt(np.sum(np.square(grasp_position)))
        pose_tensor = np.zeros([1, self.gqcnn_pose_dim])
        image_tensor = np.zeros([
            1, self.gqcnn_im_height, self.gqcnn_im_width,
            self.gqcnn_num_channels
        ])
        pose_tensor = grasp_depth

        scale = float(self.gqcnn_im_height) / self._crop_height
        depth_im_scaled = depth_im.resize(scale)
        im_tf = im_tf.crop(self.gqcnn_im_height, self.gqcnn_im_width)
        image_tensor = im_tf.raw_data
        grasp = Grasp2D(0.0, 0.0, pose_tensor, state.camera_intr)
        output_arr = self.gqcnn.predict(image_tensor, pose_tensor)
        q_values = output_arr[:, -1]

        if self.config['vis']['grasp_candidates']:
            # display each grasp on the original image, colored by predicted success
            vis.figure(size=(FIGSIZE, FIGSIZE))
            vis.imshow(depth_im)
            vis.grasp(grasp,
                      scale=1.5,
                      show_center=False,
                      show_axis=True,
                      color=plt.cm.RdYlBu(q_values))
            vis.title('Sampled grasps')
            self.show('grasp_candidates.png')

        return q_values > 0.9
Esempio n. 4
0
    # initialize the ROS node
    rospy.init_node('Baxter_Control_Node')

    # load detector config
    detector_cfg = YamlConfig(CFG_PATH +
                              'baxter_control_node.yaml')['detector']

    # load camera tf and intrinsics
    rospy.loginfo('Loading T_camera_world')
    T_camera_world = RigidTransform.load(CFG_PATH + 'kinect_to_world.tf')
    rospy.loginfo("Loading camera intrinsics")
    raw_camera_info = rospy.wait_for_message('/camera/rgb/camera_info',
                                             CameraInfo)
    camera_intrinsics = perception.CameraIntrinsics(
        raw_camera_info.header.frame_id, raw_camera_info.K[0],
        raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5],
        raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width)

    # initalize image processing objects
    cv_bridge = CvBridge()
    boundingBox = BoundingBox(BOUNDBOX_MIN_X, BOUNDBOX_MIN_Y, BOUNDBOX_MAX_X,
                              BOUNDBOX_MAX_Y)

    # wait for Grasp Planning Service and create Service Proxy
    rospy.loginfo("Waiting for planner node")
    rospy.wait_for_service('plan_gqcnn_grasp')
    plan_grasp = rospy.ServiceProxy('plan_gqcnn_grasp', GQCNNGraspPlanner)

    # wait for Grasp Execution Service and create Service Proxy
    if not DEBUG:
        rospy.loginfo("Waiting for execution node")