def get_color_im(self):
     #while self.rgb is None:
     #    print "Waiting for color"
     #    rospy.sleep(2)
     print "Waiting for color"
     self.rgb = rospy.wait_for_message(self.COLOR_TOPIC, Image)
     if self.cam_info is None:
         self.get_cam_intrinsic()
     #raw_color = self.bridge.imgmsg_to_cv2(self.rgb, 'bgr8')
     #print raw_color.shape
     #color_arr = copy.copy(raw_color)
     #color_arr = np.fliplr(np.flipud(copy.copy(raw_color)))
     #color_arr[:,:,[0,2]] = color_arr[:,:,[2,0]] # convert BGR to RGB
     #color_arr[:,:,0] = np.fliplr(color_arr[:,:,0])
     #color_arr[:,:,1] = np.fliplr(color_arr[:,:,1])
     #color_arr[:,:,2] = np.fliplr(color_arr[:,:,2])
     #color_arr[:,:,3] = np.fliplr(color_arr[:,:,3])
     #color_image = perception.ColorImage(color_arr[:,:,:3], self.cam_info.header.frame_id)
     color_image = perception.ColorImage(
         self.bridge.imgmsg_to_cv2(self.rgb),
         frame=self.cam_info.header.frame_id)
     color_image.save('test.jpg')
     return color_image
示例#2
0
    def plan_grasp(self, req):
        """ Grasp planner request handler 
        
        Parameters
        ---------
        req: :obj:`ROS ServiceRequest`
            ROS ServiceRequest for grasp planner service
        """
        rospy.loginfo('Planning Grasp')
        
        # get the raw depth and color images as ROS Image objects
        raw_color = req.color_image
        raw_depth = req.depth_image

        # get the raw camera info as ROS CameraInfo object
        raw_camera_info = req.camera_info
        
        # get the bounding box as a custom ROS BoundingBox msg 
        bounding_box = req.bounding_box

        # wrap the camera info in a perception CameraIntrinsics object
        camera_intrinsics = perception.CameraIntrinsics(raw_camera_info.header.frame_id, raw_camera_info.K[0], raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5], raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width)

        ### Create wrapped Perception RGB and Depth Images by unpacking the ROS Images using CVBridge ###
        try:
            color_image = perception.ColorImage(self.cv_bridge.imgmsg_to_cv2(raw_color, "rgb8"), frame=camera_intrinsics.frame)
            depth_image = perception.DepthImage(self.cv_bridge.imgmsg_to_cv2(raw_depth, desired_encoding = "passthrough"), frame=camera_intrinsics.frame)
        except CvBridgeError as cv_bridge_exception:
            rospy.logerr(cv_bridge_exception)

        # visualize
        if self.cfg['vis']['vis_uncropped_color_image']:
            vis.imshow(color_image)
            vis.show()
        if self.cfg['vis']['vis_uncropped_depth_image']:
            vis.imshow(depth_image)
            vis.show()

        # aggregate color and depth images into a single perception rgbdimage
        rgbd_image = perception.RgbdImage.from_color_and_depth(color_image, depth_image)
        
        # calc crop parameters
        minX = bounding_box.minX
        minY = bounding_box.minY
        maxX = bounding_box.maxX
        maxY = bounding_box.maxY

        # contain box to image->don't let it exceed image height/width bounds
        no_pad = False
        if minX < 0:
            minX = 0
            no_pad = True
        if minY < 0:
            minY = 0
            no_pad = True
        if maxX > rgbd_image.width:
            maxX = rgbd_image.width
            no_pad = True
        if maxY > rgbd_image.height:
            maxY = rgbd_image.height
            no_pad = True

        centroidX = (maxX + minX) / 2
        centroidY = (maxY + minY) / 2

        # add some padding to bounding box to prevent empty pixel regions when the image is rotated during grasp planning
        if not no_pad:
            width = (maxX - minX) + self.cfg['width_pad']
            height = (maxY - minY) + self.cfg['height_pad']
        else:
            width = (maxX - minX)
            height = (maxY - minY)
  
        # crop camera intrinsics and rgbd image
        cropped_camera_intrinsics = camera_intrinsics.crop(height, width, centroidY, centroidX)
        cropped_rgbd_image = rgbd_image.crop(height, width, centroidY, centroidX)
        
        # visualize  
        if self.cfg['vis']['vis_cropped_rgbd_image']:
            vis.imshow(cropped_rgbd_image)
            vis.show()

        # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics
        image_state = RgbdImageState(cropped_rgbd_image, cropped_camera_intrinsics)
  
        # execute policy
        try:
            return self.execute_policy(image_state, self.grasping_policy, self.grasp_pose_publisher, cropped_camera_intrinsics.frame)
        except NoValidGraspsException:
            rospy.logerr('While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!')
            raise rospy.ServiceException('While executing policy found no valid grasps from sampled antipodal point pairs. Aborting Policy!')
        except NoAntipodalPairsFoundException:
            rospy.logerr('While executing policy could not sample any antipodal point pairs from input image. Aborting Policy! Please check if there is an object in the workspace or if the output of the object detector is reasonable.')
            raise rospy.ServiceException('While executing policy could not sample any antipodal point pairs from input image. Aborting Policy! Please check if there is an object in the workspace or if the output of the object detector is reasonable.')
    def service_predict(self, req):
        # The code not support uint depth, need to convert to float
        converted_depth = self.cv_bridge.imgmsg_to_cv2(
            self.depth_image, desired_encoding="passthrough")
        converted_depth = img_as_float(converted_depth)

        converted_image = self.cv_bridge.imgmsg_to_cv2(self.color_image,
                                                       "rgb8")

        # wrap the camera info in a perception CameraIntrinsics object
        camera_intrinsics = perception.CameraIntrinsics(
            raw_camera_info.header.frame_id, raw_camera_info.K[0],
            raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5],
            raw_camera_info.K[1], raw_camera_info.height,
            raw_camera_info.width)

        # Create wrapped Perception RGB and Depth Images by unpacking the ROS Images using CVBridge ###
        color_image = perception.ColorImage(converted_image,
                                            frame=camera_intrinsics.frame)
        depth_image = perception.DepthImage(converted_depth,
                                            frame=camera_intrinsics.frame)
        # Hongzhuo:
        color_image = color_image.inpaint(
            rescale_factor=self.cfg['inpaint_rescale_factor'])
        depth_image = depth_image.inpaint(
            rescale_factor=self.cfg['inpaint_rescale_factor'])

        min_x = bounding_box.minX
        min_y = bounding_box.minY
        max_x = bounding_box.maxX
        max_y = bounding_box.maxY

        # contain box to image->don't let it exceed image height/width bounds
        no_pad = False
        if min_x < 0:
            min_x = 0
            no_pad = True
        if min_y < 0:
            min_y = 0
            no_pad = True
        if max_x > rgbd_image.width:
            max_x = rgbd_image.width
            no_pad = True
        if max_y > rgbd_image.height:
            max_y = rgbd_image.height
            no_pad = True

        centroid_x = (max_x + min_x) / 2
        centroid_y = (max_y + min_y) / 2

        # add some padding to bounding box to prevent empty pixel regions when the image is
        # rotated during grasp planning
        if not no_pad:
            width = (max_x - min_x) + self.cfg['width_pad']
            height = (max_y - min_y) + self.cfg['height_pad']
        else:
            width = (max_x - min_x)
            height = (max_y - min_y)
        cropped_camera_intrinsics = camera_intrinsics.crop(
            height, width, centroid_y, centroid_x)
        #cropped_rgbd_image = rgbd_image.crop(height, width, centroid_y, centroid_x)
        depth_im = depth_image.crop(height,
                                    width,
                                    centroid_y,
                                    center_j=centroid_x)

        # create an RGBDImageState with the cropped RGBDImage and CameraIntrinsics
        #image_state = RgbdImageState(cropped_rgbd_image, cropped_camera_intrinsics)
        #depth_im=image_state.rgbd_im.depth;
        grasp_position = np.array([
            req.gripper_position.x, req.gripper_position.y,
            req.gripper_position.z
        ])
        grasp_depth = np.sqrt(np.sum(np.square(grasp_position)))
        pose_tensor = np.zeros([1, self.gqcnn_pose_dim])
        image_tensor = np.zeros([
            1, self.gqcnn_im_height, self.gqcnn_im_width,
            self.gqcnn_num_channels
        ])
        pose_tensor = grasp_depth

        scale = float(self.gqcnn_im_height) / self._crop_height
        depth_im_scaled = depth_im.resize(scale)
        im_tf = im_tf.crop(self.gqcnn_im_height, self.gqcnn_im_width)
        image_tensor = im_tf.raw_data
        grasp = Grasp2D(0.0, 0.0, pose_tensor, state.camera_intr)
        output_arr = self.gqcnn.predict(image_tensor, pose_tensor)
        q_values = output_arr[:, -1]

        if self.config['vis']['grasp_candidates']:
            # display each grasp on the original image, colored by predicted success
            vis.figure(size=(FIGSIZE, FIGSIZE))
            vis.imshow(depth_im)
            vis.grasp(grasp,
                      scale=1.5,
                      show_center=False,
                      show_axis=True,
                      color=plt.cm.RdYlBu(q_values))
            vis.title('Sampled grasps')
            self.show('grasp_candidates.png')

        return q_values > 0.9
示例#4
0
def run_experiment():
    """ Run the experiment """
    # get the images from the sensor
    previous_grasp = None
    while True:
        rospy.loginfo("Waiting for images")
        start_time = time.time()
        raw_color = rospy.wait_for_message("/camera/rgb/image_color", Image)
        raw_depth = rospy.wait_for_message("/camera/depth_registered/image",
                                           Image)
        image_load_time = time.time() - start_time
        rospy.loginfo('Images loaded in: ' + str(image_load_time) + ' secs.')

        ### Create wrapped Perception RGB and Depth Images by unpacking the ROS Images using CVBridge ###
        try:
            color_image = perception.ColorImage(
                cv_bridge.imgmsg_to_cv2(raw_color, "rgb8"),
                frame=T_camera_world.from_frame)
            depth_image = perception.DepthImage(
                cv_bridge.imgmsg_to_cv2(raw_depth,
                                        desired_encoding="passthrough"),
                frame=T_camera_world.from_frame)
        except CvBridgeError as cv_bridge_exception:
            rospy.logerr(cv_bridge_exception)

        # inpaint to remove holes
        inpainted_color_image = color_image.inpaint(
            rescale_factor=INPAINT_RESCALE_FACTOR)
        inpainted_depth_image = depth_image.inpaint(
            rescale_factor=INPAINT_RESCALE_FACTOR)

        if DETECT_OBJECTS:
            detector = RgbdDetectorFactory.detector('point_cloud_box')
            detections = detector.detect(inpainted_color_image,
                                         inpainted_depth_image,
                                         detector_cfg,
                                         camera_intrinsics,
                                         T_camera_world,
                                         vis_foreground=False,
                                         vis_segmentation=False)

        detected_obj = None
        if previous_grasp is not None:
            position = previous_grasp.pose.position
            position = np.array([position.x, position.y, position.z])
            center_point = Point(position, camera_intrinsics.frame)
            center_pixel = camera_intrinsics.project(center_point,
                                                     camera_intrinsics.frame)
            i, j = center_pixel.y, center_pixel.x
            if DETECT_OBJECTS:
                for detection in detections:
                    binaryIm = detection.binary_im
                    if binaryIm[i, j]:
                        segmask = binaryIm
                        detected_obj = detection
                        break
            else:
                # Generate an ellipse inverse segmask centered on previous grasp
                y, x = np.ogrid[-i:IMAGE_HEIGHT - i, -j:IMAGE_WIDTH - j]
                circlemask = x * x + y * y <= CIRCLE_RAD * CIRCLE_RAD
                segmask_data = np.ones(
                    (IMAGE_HEIGHT, IMAGE_WIDTH), dtype=np.uint8) * 255
                segmask_data[circlemask] = 0
                segmask = BinaryImage(segmask_data, camera_intrinsics.frame)
        else:
            segmask = BinaryImage(
                np.ones((IMAGE_HEIGHT, IMAGE_WIDTH), dtype=np.uint8) * 255,
                camera_intrinsics.frame)
        segmask._encoding = 'mono8'

        if VISUALIZE_DETECTOR_OUTPUT:
            vis.figure()
            vis.subplot(1, 2, 1)
            vis.imshow(detected_obj.color_thumbnail)
            vis.subplot(1, 2, 2)
            vis.imshow(detected_obj.depth_thumbnail)
            vis.show()

        try:
            rospy.loginfo('Planning Grasp')
            start_time = time.time()
            planned_grasp_data = plan_grasp(inpainted_color_image.rosmsg,
                                            inpainted_depth_image.rosmsg,
                                            segmask.rosmsg, raw_camera_info,
                                            boundingBox)
            grasp_plan_time = time.time() - start_time
            rospy.loginfo('Total grasp planning time: ' +
                          str(grasp_plan_time) + ' secs.')

            rospy.loginfo('Queueing Grasp')
            previous_grasp = planned_grasp_data.grasp
            execute_grasp(previous_grasp)
            # raw_input("Press ENTER to resume")
        except rospy.ServiceException as e:
            rospy.logerr(e)
            previous_grasp = None
            raw_input("Press ENTER to resume")
from sensor_msgs.msg import Image
from cv_bridge import CvBridge

if __name__ == '__main__':
    # ros
    rospy.init_node("gqcnn_policy")
    color_img = rospy.wait_for_message("/camera/rgb/image_raw",
                                       Image,
                                       timeout=2)
    depth_img = rospy.wait_for_message("/camera/depth_registered/image_raw",
                                       Image,
                                       timeout=3)
    cv_bridge = CvBridge()

    color_img = cv_bridge.imgmsg_to_cv2(color_img, "bgr8")
    color_img = perception.ColorImage(color_img)
    depth_img = cv_bridge.imgmsg_to_cv2(depth_img, "passthrough") * 1.0
    depth_img = perception.DepthImage(depth_img)
    color_img = color_img.inpaint()
    depth_img = depth_img.inpaint()
    np.save(
        "/home/ros/ur10_catkin_ws/src/gqcnn/data/rgbd/multiple_objects/depth_0.npy",
        depth_img.data)
    cv2.imwrite(
        "/home/ros/ur10_catkin_ws/src/gqcnn/data/rgbd/multiple_objects/color_0.png",
        color_img.data)

    # set up logger
    logging.getLogger().setLevel(logging.DEBUG)

    # parse args