Example #1
0
    def test_io(self, height=50, width=100):
        color_data = (255 * np.random.rand(height, width, 3)).astype(np.uint8)
        im = ColorImage(color_data, 'a')
        file_root = 'test/data/test_color'

        # save and load png
        filename = file_root + '.png'
        im.save(filename)
        loaded_im = ColorImage.open(filename)
        self.assertTrue(np.sum(np.abs(loaded_im.data - im.data)) < 1e-5,
                        msg='ColorImage data changed after load png')

        # save and load jpg
        filename = file_root + '.jpg'
        im.save(filename)
        loaded_im = ColorImage.open(filename)

        # save and load npy
        filename = file_root + '.npy'
        im.save(filename)
        loaded_im = ColorImage.open(filename)
        self.assertTrue(np.sum(np.abs(loaded_im.data - im.data)) < 1e-5,
                        msg='ColorImage data changed after load npy')

        # save and load npz
        filename = file_root + '.npz'
        im.save(filename)
        loaded_im = ColorImage.open(filename)
        self.assertTrue(np.sum(np.abs(loaded_im.data - im.data)) < 1e-5,
                        msg='ColorImage data changed after load npz')
Example #2
0
    def frames(self):
        """Retrieve the next frame from the image directory and convert it to a ColorImage,
        a DepthImage, and an IrImage.

        Parameters
        ----------
        skip_registration : bool
            If True, the registration step is skipped.

        Returns
        -------
        :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
            The ColorImage, DepthImage, and IrImage of the current frame.

        Raises
        ------
        RuntimeError
            If the Kinect stream is not running or if all images in the
            directory have been used.
        """
        if not self._running:
            raise RuntimeError('Primesense device pointing to %s not runnning. Cannot read frames' %(self._path_to_images))

        if self._im_index > self._num_images:
            raise RuntimeError('Primesense device is out of images')

        # read images
        color_filename = os.path.join(self._path_to_images, 'color_%d.png' %(self._im_index))
        color_im = ColorImage.open(color_filename, frame=self._frame)
        depth_filename = os.path.join(self._path_to_images, 'depth_%d.npy' %(self._im_index))
        depth_im = DepthImage.open(depth_filename, frame=self._frame)
        self._im_index += 1
        return color_im, depth_im, None
Example #3
0
 def load(save_dir):
     if not os.path.exists(save_dir):
         raise ValueError('Directory %s does not exist!' % (save_dir))
     color_image_filename = os.path.join(save_dir, 'color.png')
     depth_image_filename = os.path.join(save_dir, 'depth.npy')
     camera_intr_filename = os.path.join(save_dir, 'camera.intr')
     segmask_filename = os.path.join(save_dir, 'segmask.npy')
     obj_segmask_filename = os.path.join(save_dir, 'obj_segmask.npy')
     state_filename = os.path.join(save_dir, 'state.pkl')
     camera_intr = CameraIntrinsics.load(camera_intr_filename)
     color = ColorImage.open(color_image_filename, frame=camera_intr.frame)
     depth = DepthImage.open(depth_image_filename, frame=camera_intr.frame)
     segmask = None
     if os.path.exists(segmask_filename):
         segmask = BinaryImage.open(segmask_filename,
                                    frame=camera_intr.frame)
     obj_segmask = None
     if os.path.exists(obj_segmask_filename):
         obj_segmask = SegmentationImage.open(obj_segmask_filename,
                                              frame=camera_intr.frame)
     fully_observed = None
     if os.path.exists(state_filename):
         fully_observed = pkl.load(open(state_filename, 'rb'))
     return RgbdImageState(RgbdImage.from_color_and_depth(color, depth),
                           camera_intr,
                           segmask=segmask,
                           obj_segmask=obj_segmask,
                           fully_observed=fully_observed)
Example #4
0
    def __init__(self, screen):
        """Initialize the ROS GUI.
        """
        super(RosGUI, self).__init__()

        # Set constants
        self._total_time = 0
        self._n_attempts = 0
        self._n_picks = 0
        self._state = 'pause'
        self._img_dir = os.path.join(os.getcwd(), 'images')

        # Initialize the UI
        self._init_UI()
        self._current_image = None

        # Connect the signals to slots
        self.weight_signal.connect(self.set_weight)
        self.conf_signal.connect(self.set_confidence)
        self.pph_signal.connect(self.set_pph)
        self.state_signal.connect(self.set_state)
        self.image_signal.connect(self.update_central_image)
        self.success_signal.connect(self.set_success)

        # Load grasp images
        self._suction_image = ColorImage.open(self._image_fn('suction.png'))
        self._suction_image = self._suction_image.resize((40, 40))
        self._jaw_image = ColorImage.open(self._image_fn('gripper.png'))
        self._jaw_image = self._jaw_image.resize(0.06)
        self._push_image = ColorImage.open(self._image_fn('probe.png'))
        self._push_image = self._push_image.resize(0.2)
        self._push_end_image = ColorImage.open(self._image_fn('probe_end.png'))
        self._push_end_image = self._push_end_image.resize(0.06)

        # Initialize ROS subscribers
        self._init_subscribers()

        desktop = QApplication.desktop()
        if screen > desktop.screenCount():
            print 'Screen index is greater than number of available screens; using default screen'
        screenRect = desktop.screenGeometry(screen)
        self.move(screenRect.topLeft())
        self.setWindowFlags(Qt.X11BypassWindowManagerHint)
        self.setFixedSize(screenRect.size())
        self.show()
import IPython
import matplotlib.pyplot as plt
import numpy as np
import os
import sys

from perception import ColorImage
from perception.models import ClassificationCNN

if __name__ == '__main__':
    model_dir = sys.argv[1]
    model_type = sys.argv[2]
    image_filename = sys.argv[3]

    #with open('data/images/imagenet.json', 'r') as f:
    #    label_to_category = eval(f.read())

    im = ColorImage.open(image_filename)
    cnn = ClassificationCNN.open(model_dir, model_typename=model_type)
    out = cnn.predict(im)
    label = cnn.top_prediction(im)
    #category = label_to_category[label]

    plt.figure()
    plt.imshow(im.bgr2rgb().data)
    plt.title('Pred: %d' % (label))
    plt.axis('off')
    plt.show()

    #IPython.embed()
Example #6
0
            os.path.dirname(os.path.realpath(__file__)), '..',
            'cfg/examples/grasp_planning_service.yaml')

    # read config
    config = YamlConfig(config_filename)

    # wait for Grasp Planning Service and create Service Proxy
    rospy.wait_for_service('grasping_policy')
    plan_grasp = rospy.ServiceProxy('grasping_policy', GQCNNGraspPlanner)
    cv_bridge = CvBridge()

    # setup sensor
    camera_intr = CameraIntrinsics.load(camera_intr_filename)

    # read images
    color_im = ColorImage.open(color_im_filename, frame=camera_intr.frame)
    depth_im = DepthImage.open(depth_im_filename, frame=camera_intr.frame)

    # optionally read a segmask
    segmask = BinaryImage(255 * np.ones(depth_im.shape).astype(np.uint8),
                          frame=camera_intr.frame)
    if segmask_filename is not None:
        segmask = BinaryImage.open(segmask_filename)

    # optionally set a bounding box
    bounding_box = BoundingBox()
    bounding_box.minY = 0
    bounding_box.minX = 0
    bounding_box.maxY = color_im.height
    bounding_box.maxX = color_im.width