Exemplo n.º 1
0
def test_uw_rgbd_scene(version='v1'):
    from pybot.vision.image_utils import to_color
    from pybot.vision.imshow_utils import imshow_cv

    v1_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v1/'
    v2_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v2/rgbd-scenes-v2/'

    if version == 'v1':
        rgbd_data_uw = UWRGBDSceneDataset(
            version='v1',
            directory=os.path.join(v1_directory, 'rgbd-scenes'),
            aligned_directory=os.path.join(v1_directory,
                                           'rgbd-scenes-aligned'))
    elif version == 'v2':
        rgbd_data_uw = UWRGBDSceneDataset(version='v2', directory=v2_directory)
    else:
        raise RuntimeError('''Version %s not supported. '''
                           '''Check dataset and choose v1/v2 scene dataset''' %
                           version)

    for f in rgbd_data_uw.iteritems(every_k_frames=5, with_ground_truth=True):
        vis = rgbd_data_uw.annotate(f)
        imshow_cv('frame', np.hstack([f.img, vis]), text='Image')
        imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth')
        cv2.waitKey(100)

    return rgbd_data_uw
Exemplo n.º 2
0
 def on_depth_cb(t, im):
     global rgb
     if rgb is None:
         return
     X = dcam.reconstruct(im)[::4, ::4, :]
     publish_cloud('depth', X, c=rgb, frame_id='camera', flip_rb=True)
     imshow_cv('d', im / 10.0)
Exemplo n.º 3
0
    def visualize(self, im, ids, pts, colored=False): 
        vis = to_color(im)
        dt_vis = self.dt_.visualize(vis, pts)

        OpenCVKLT.draw_tracks(self, vis, colored=colored, max_track_length=10)
        out = np.vstack([vis, dt_vis])
        imshow_cv('dt_vis', out, wait=1)

        return out
Exemplo n.º 4
0
def test_nyu_rgbd(version='v2'): 
    from pybot.vision.image_utils import to_color
    from pybot.vision.imshow_utils import imshow_cv

    v2_directory = '/media/spillai/MRG-HD1/data/nyu_rgbd/'
    v2_gt = os.path.join(v2_directory, 'nyu_depth_v2_labeled.mat')

    # if version == 'v1': 
    #     dataset = UWRGBDSceneDataset(version='v1', 
    #                                       directory=os.path.join(v1_directory, 'rgbd-scenes'), 
    #                                       aligned_directory=os.path.join(v1_directory, 'rgbd-scenes-aligned'))
    if version == 'v2': 
        dataset = NYURGBDDataset(version='v2', directory=v2_directory, ground_truth=v2_gt)
    else: 
        raise RuntimeError('''Version %s not supported. '''
                           '''Check dataset and choose v1/v2 scene dataset''' % version)


    for f in dataset.iteritems(every_k_frames=5): 
        # vis = rgbd_data_uw.annotate(f)
        imshow_cv('frame', f.img, text='Image')
        imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth')
        imshow_cv('instance', (f.instance).astype(np.uint8), text='Instance')
        imshow_cv('label', (f.label).astype(np.uint8), text='Label')
        cv2.waitKey(100)

    return dataset
Exemplo n.º 5
0
def test_uw_rgbd_object():
    from pybot.vision.image_utils import to_color
    from pybot.vision.imshow_utils import imshow_cv

    object_directory = '~/data/rgbd_datasets/udub/rgbd-object-crop/rgbd-dataset'
    rgbd_data_uw = UWRGBDObjectDataset(directory=object_directory)

    for f in rgbd_data_uw.iteritems(every_k_frames=5):
        bbox = f.bbox
        imshow_cv(
            'frame',
            np.hstack([f.img, np.bitwise_and(f.img, to_color(f.mask))]),
            text='Image + Mask [Category: [%i] %s, Instance: %i]' %
            (bbox['category'], rgbd_data_uw.get_category_name(
                bbox['category']), bbox['instance']))
        imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth')
Exemplo n.º 6
0
    def visualize(self, root=0):
        if len(self.frames_) < 2:
            return

        if self.fixed_reference_:
            ref = self.ref_frame_
            root = -1
        else:
            assert (root >= 0 and root < len(self.frames_))
            try:
                ref = self.frames_[root]
            except:
                raise RuntimeError("Unable to index to root")

        vis = {}

        # Detect features in the reference image, else
        # use provided features
        if ref.points is not None:
            pts = ref.points
        else:
            try:
                pts = self.fdet_.process(to_gray(ref.im))
            except:
                return

        if not len(pts):
            return

        print(pts.shape, pts.dtype)

        # Draw epipoles across all other images/poses
        for idx, f in enumerate(self.frames_):
            F_10 = ref.camera.F(f.camera)
            vis[idx] = plot_epipolar_line(f.im,
                                          F_10,
                                          pts,
                                          im_0=ref.im if idx == 0 else None)

            # print 'F b/w ref and idx={:}, \ncurr={:}\n\nF={:}\n'.format(idx, f.camera, F_10)

        if len(vis):
            imshow_cv('epi_out',
                      im_resize(np.vstack(list(vis.values())), scale=0.5))
Exemplo n.º 7
0
    def visualize(self, root=0):
        if len(self.frames_) < 2:
            return

        if self.fixed_reference_:
            ref_im = self.ref_frame_.im
            ref_camera = self.ref_frame_.camera
            root = -1
        else:
            assert (root >= 0 and root < len(self.frames_))
            try:
                ref_im = self.frames_[root].im
                ref_camera = self.frames_[root].camera
            except:
                raise RuntimeError("Unable to index to root")

        vis = {}

        # Detect features in the reference image
        try:
            pts = self.fdet_.process(to_gray(ref_im))
            # pts = pts.reshape(len(pts)/4,-1,2).mean(axis=1)
        except:
            return

        if not len(pts):
            return

        # Draw epipoles across all other images/poses
        for idx, f in enumerate(self.frames_):
            F_10 = ref_camera.F(f.camera)
            vis[idx] = plot_epipolar_line(f.im,
                                          F_10,
                                          pts,
                                          im_0=ref_im if idx == 0 else None)

            # print 'F b/w ref and idx={:}, \ncurr={:}\n\nF={:}\n'.format(idx, f.camera, F_10)

        if len(vis):
            imshow_cv('epi_out', im_resize(np.vstack(vis.values()), scale=0.5))
Exemplo n.º 8
0
    def iter_gt_frames(self, *args, **kwargs): 
        for (left, right), pose, depth in izip(self.iter_stereo_frames(*args, **kwargs), 
                                               self.poses.iteritems(*args, **kwargs), 
                                               self.gt.iteritems(*args, **kwargs)): 
            yield AttrDict(left=left, right=right, pose=pose, depth=depth)

    def iter_stereo_frames(self, *args, **kwargs): 
        return self.stereo.iteritems(*args, **kwargs)

    @property
    def stereo_frames(self): 
        return self.iter_stereo_frames()

    def viz_gt_poses(self): 
        draw_utils.publish_pose_list('POSES', self.poses.items, frame_id='camera')

def tsukuba_stereo_dataset(directory='~/HD1/data/NewTsukubaStereoDataset/', scale=1.0, grayscale=False, start_idx=1): 
    return TsukubaStereo2012Reader(directory=directory, scale=scale, grayscale=grayscale, start_idx=start_idx)

if __name__ == "__main__": 
    from pybot.vision.imshow_utils import imshow_cv
    from pybot.vision.image_utils import to_gray

    dataset = tsukuba_stereo_dataset()
    for f in dataset.iterframes():
        lim, rim = to_gray(f.left), to_gray(f.right)
        out = np.dstack([np.zeros_like(lim), lim, rim])
        imshow_cv('left/right', out)
        imshow_cv('disp', f.depth)
Exemplo n.º 9
0
 def on_image_cb(t, im):
     global rgb
     vis = to_color(im)
     rgb = to_color(im)[::4, ::4]
     vis = print_status(vis, text='Pose: {}'.format(odom_str))
     imshow_cv('im', vis)
Exemplo n.º 10
0
                           ],
                           every_k_frames=1,
                           start_idx=0,
                           index=False)

    # Iterate through rosbag reader
    odom = deque(maxlen=100)
    for idx, (t, ch, data) in enumerate(take(dataset.iterframes(), 10)):
        if ch == args.odom_channel:
            odom.append(data)
            publish_pose_list('robot_poses',
                              odom,
                              frame_id='origin',
                              reset=(idx == 0))
            continue
        imshow_cv('left', data)
    print('Read {} frames'.format(idx + 1))

    # Camera calibration
    cam, = dataset.calib(['/camera/depth/camera_info'])

    global dcam
    dcam = DepthCamera(K=cam.K)

    # Define callbacks
    global odom_str, rgb
    rgb = None
    odom_str = ''

    def on_image_cb(t, im):
        global rgb
Exemplo n.º 11
0
            r = to_color(r) if color else to_gray(r)
            return l,r

def test_video(color=True, stereo=False, **kwargs): 
    for l,r in test_dataset(**kwargs).iter_stereo_frames(): 
        l = to_color(l) if color else to_gray(l)
        if not stereo: 
            yield l
        else: 
            r = to_color(r) if color else to_gray(r)
            yield l,r

if __name__ == "__main__": 
    from pybot.vision.imshow_utils import imshow_cv

    # Test dataset
    dataset = test_dataset()
    for l,r in test_dataset(scale=0.5).iter_stereo_frames(): 
        imshow_cv('left/right', np.vstack([l,r]))

    # Test image
    im = test_image(color=True)
    imshow_cv('image', im)    

    # Test video
    for im in test_video(color=True): 
        print im.shape, im.dtype
        imshow_cv('video', im)

    cv2.waitKey(0)