예제 #1
0
def test_video(color=True, stereo=False, **kwargs): 
    for l,r in test_dataset(**kwargs).iter_stereo_frames(): 
        l = to_color(l) if color else to_gray(l)
        if not stereo: 
            yield l
        else: 
            r = to_color(r) if color else to_gray(r)
            yield l,r
예제 #2
0
def test_image(color=True, scale=1.0, stereo=False): 
    for l,r in test_dataset().iter_stereo_frames(): 
        l = to_color(l) if color else to_gray(l)
        if not stereo: 
            return l
        else: 
            r = to_color(r) if color else to_gray(r)
            return l,r
예제 #3
0
def dense_optical_flow(im1,
                       im2,
                       pyr_scale=0.5,
                       levels=3,
                       winsize=5,
                       iterations=3,
                       poly_n=5,
                       poly_sigma=1.2,
                       fb_threshold=-1,
                       mask1=None,
                       mask2=None,
                       flow1=None,
                       flow2=None):

    if flow1 is None:
        fflow = cv2.calcOpticalFlowFarneback(to_gray(im1), to_gray(im2),
                                             pyr_scale, levels, winsize,
                                             iterations, poly_n, poly_sigma, 0)
    else:
        fflow = cv2.calcOpticalFlowFarneback(to_gray(im1), to_gray(im2),
                                             pyr_scale, levels, winsize,
                                             iterations, poly_n, poly_sigma, 0,
                                             flow1.copy())

    if mask1 is not None:
        fflow[~mask1.astype(np.bool)] = np.nan

    if fb_threshold > 0:
        H, W = im1.shape[:2]
        xs, ys = np.meshgrid(np.arange(W), np.arange(H))
        xys1 = np.dstack([xs, ys])
        xys2 = xys1 + fflow
        rflow = dense_optical_flow(im2,
                                   im1,
                                   pyr_scale=pyr_scale,
                                   levels=levels,
                                   winsize=winsize,
                                   iterations=iterations,
                                   poly_n=poly_n,
                                   poly_sigma=poly_sigma,
                                   fb_threshold=-1)
        if mask2 is not None:
            rflow[~mask2.astype(np.bool)] = np.nan

        xys1r = xys2 + rflow
        fb_bad = (np.fabs(xys1r - xys1) > fb_threshold).all(axis=2)
        fflow[fb_bad] = np.nan

    return fflow
예제 #4
0
    def visualize(self, root=0):
        if len(self.frames_) < 2:
            return

        if self.fixed_reference_:
            ref = self.ref_frame_
            root = -1
        else:
            assert (root >= 0 and root < len(self.frames_))
            try:
                ref = self.frames_[root]
            except:
                raise RuntimeError("Unable to index to root")

        vis = {}

        # Detect features in the reference image, else
        # use provided features
        if ref.points is not None:
            pts = ref.points
        else:
            try:
                pts = self.fdet_.process(to_gray(ref.im))
            except:
                return

        if not len(pts):
            return

        print(pts.shape, pts.dtype)

        # Draw epipoles across all other images/poses
        for idx, f in enumerate(self.frames_):
            F_10 = ref.camera.F(f.camera)
            vis[idx] = plot_epipolar_line(f.im,
                                          F_10,
                                          pts,
                                          im_0=ref.im if idx == 0 else None)

            # print 'F b/w ref and idx={:}, \ncurr={:}\n\nF={:}\n'.format(idx, f.camera, F_10)

        if len(vis):
            imshow_cv('epi_out',
                      im_resize(np.vstack(list(vis.values())), scale=0.5))
예제 #5
0
    def process(self, im, detected_pts=None):

        # Preprocess
        self.ims_.append(gaussian_blur(to_gray(im)))

        # Track object
        pids, ppts = self.tm_.ids, self.tm_.pts
        if ppts is not None and len(ppts) and len(self.ims_) == 2: 
            pts = self.tracker_.track(self.ims_[-2], self.ims_[-1], ppts)

            # Check bounds
            valid = finite_and_within_bounds(pts, im.shape[:2])
            
            # Add pts and prune afterwards
            self.tm_.add(pts[valid], ids=pids[valid], prune=True)

        # Check if more features required
        self.add_features_ = self.add_features_ or ppts is None or (ppts is not None and len(ppts) < self.min_tracks_)

        # Initialize or add more features
        if self.add_features_: 
            # Extract features
            mask = self.create_mask(im.shape[:2], ppts)            
            # imshow_cv('mask', mask)

            if detected_pts is None: 

                # Detect features
                new_kpts = self.detector_.process(self.ims_[-1], mask=mask, return_keypoints=True)
                newlen = max(0, self.min_tracks_ - len(ppts))
                new_pts = to_pts(sorted(new_kpts, key=lambda kpt: kpt.response, reverse=True)[:newlen])
            else: 
                xy = detected_pts.astype(np.int32)
                valid = mask[xy[:,1], xy[:,0]] > 0
                new_pts = detected_pts[valid]

            # Add detected features with new ids, and prevent pruning 
            self.tm_.add(new_pts, ids=None, prune=False)
            self.add_features_ = False

        # Returns only tracks that have a minimum trajectory length
        # This is different from self.tm_.ids, self.tm_.pts
        return self.latest_ids, self.latest_pts
예제 #6
0
    def visualize(self, root=0):
        if len(self.frames_) < 2:
            return

        if self.fixed_reference_:
            ref_im = self.ref_frame_.im
            ref_camera = self.ref_frame_.camera
            root = -1
        else:
            assert (root >= 0 and root < len(self.frames_))
            try:
                ref_im = self.frames_[root].im
                ref_camera = self.frames_[root].camera
            except:
                raise RuntimeError("Unable to index to root")

        vis = {}

        # Detect features in the reference image
        try:
            pts = self.fdet_.process(to_gray(ref_im))
            # pts = pts.reshape(len(pts)/4,-1,2).mean(axis=1)
        except:
            return

        if not len(pts):
            return

        # Draw epipoles across all other images/poses
        for idx, f in enumerate(self.frames_):
            F_10 = ref_camera.F(f.camera)
            vis[idx] = plot_epipolar_line(f.im,
                                          F_10,
                                          pts,
                                          im_0=ref_im if idx == 0 else None)

            # print 'F b/w ref and idx={:}, \ncurr={:}\n\nF={:}\n'.format(idx, f.camera, F_10)

        if len(vis):
            imshow_cv('epi_out', im_resize(np.vstack(vis.values()), scale=0.5))
예제 #7
0
파일: tsukuba.py 프로젝트: subokita/pybot
    def iter_gt_frames(self, *args, **kwargs): 
        for (left, right), pose, depth in izip(self.iter_stereo_frames(*args, **kwargs), 
                                               self.poses.iteritems(*args, **kwargs), 
                                               self.gt.iteritems(*args, **kwargs)): 
            yield AttrDict(left=left, right=right, pose=pose, depth=depth)

    def iter_stereo_frames(self, *args, **kwargs): 
        return self.stereo.iteritems(*args, **kwargs)

    @property
    def stereo_frames(self): 
        return self.iter_stereo_frames()

    def viz_gt_poses(self): 
        draw_utils.publish_pose_list('POSES', self.poses.items, frame_id='camera')

def tsukuba_stereo_dataset(directory='~/HD1/data/NewTsukubaStereoDataset/', scale=1.0, grayscale=False, start_idx=1): 
    return TsukubaStereo2012Reader(directory=directory, scale=scale, grayscale=grayscale, start_idx=start_idx)

if __name__ == "__main__": 
    from pybot.vision.imshow_utils import imshow_cv
    from pybot.vision.image_utils import to_gray

    dataset = tsukuba_stereo_dataset()
    for f in dataset.iterframes():
        lim, rim = to_gray(f.left), to_gray(f.right)
        out = np.dstack([np.zeros_like(lim), lim, rim])
        imshow_cv('left/right', out)
        imshow_cv('disp', f.depth)