Esempio n. 1
0
class OpticalFlowTracker(object): 
    """
    General-purpose optical flow tracker class that allows for fast switching between
    sparse/dense tracking. 

    Also, you can request for variable pyramid levels of tracking, 
    and perform subpixel on the tracked keypoints
    """

    lk_params = AttrDict(winSize=(5,5), maxLevel=4)
    farneback_params = AttrDict(pyr_scale=0.5, levels=3, winsize=15, 
                                iterations=3, poly_n=7, poly_sigma=1.5, flags=0)

    def __init__(self, fb_check=True): 
        self.fb_check_ = fb_check

    @staticmethod
    def create(method='lk', fb_check=True, params=lk_params): 
        trackers = { 'lk': LKTracker, 'dense': FarnebackTracker }
        try: 
            # Determine tracker type that implements track
            tracker = trackers[method](**params)
        except: 
            raise RuntimeError('Unknown detector type: %s! Use from {:}'.format(trackers.keys()))
        return tracker

    def track(self, im0, im1, p0):
        raise NotImplementedError()
Esempio n. 2
0
 def calib_read(fn, scale): 
     db = AttrDict.load_yaml(fn)
     P0 = np.float32(db[calib_left].split(' '))
     P1 = np.float32(db[calib_right].split(' '))
     fx, cx, cy = P0[0], P0[2], P0[6]
     baseline_px = np.fabs(P1[3])
     return StereoCamera.from_calib_params(fx, fx, cx, cy, baseline_px=baseline_px)
Esempio n. 3
0
 def iterframes(self, *args, **kwargs):
     for im, pose in izip(self.iter_rgb_frames(*args, **kwargs),
                          self.iter_poses(*args, **kwargs)):
         yield AttrDict(img=im[:, :, :3],
                        mask=im[:, :, -1],
                        velodyne=None,
                        pose=pose)
Esempio n. 4
0
 def iterframes(self, *args, **kwargs):
     for (left, right), oxts, velodyne in izip(
             self.iter_stereo_frames(*args, **kwargs),
             self.iter_oxts_frames(*args, **kwargs),
             self.iter_velodyne_frames(*args, **kwargs)):
         yield AttrDict(left=left, right=right, velodyne=velodyne,
                        pose=oxts.pose, oxts=oxts.packet)
Esempio n. 5
0
        def _process_items(self, index, rgb_im, depth_im, bbox, pose):
            def _process_bbox(bbox):
                return AttrDict(category=bbox['category'],
                                target=UWRGBDDataset.target_hash[str(
                                    bbox['category'])],
                                coords=np.int64([
                                    bbox['left'], bbox['top'], bbox['right'],
                                    bbox['bottom']
                                ]))

            # Compute bbox from pose and map (v2 support)
            if self.version == 'v1':
                if bbox is not None:
                    bbox = [_process_bbox(bb) for bb in bbox]
                    bbox = filter(
                        lambda bb: bb.target in UWRGBDDataset.train_ids_set,
                        bbox)

            if self.version == 'v2':
                if bbox is None and hasattr(self, 'map_info'):
                    bbox = self.get_bboxes(pose)

            # print 'Processing pose', pose, bbox
            return AttrDict(index=index,
                            img=rgb_im,
                            depth=depth_im,
                            bbox=bbox if bbox is not None else [],
                            pose=pose)
Esempio n. 6
0
        def get_bboxes(self, pose):
            """
            Support for occlusion handling is incomplete/bug-ridden
            """

            # 1. Get pose for a particular frame,
            # and set camera extrinsic
            try:
                self.map_info.camera.set_pose(pose.inverse())
            except:
                # Otherwise break from detection loop
                print 'Failed to find pose'
                return None

            # 2. Determine bounding boxes for visible clusters
            object_centers = np.vstack(
                [obj.center for obj in self.map_info.objects])
            visible_inds, = np.where(
                check_visibility(self.map_info.camera, object_centers))

            object_candidates = []
            for ind in visible_inds:
                obj = self.map_info.objects[ind]
                label = obj.label
                pts2d, coords, depth = get_object_bbox(self.map_info.camera,
                                                       obj.points,
                                                       subsample=3,
                                                       scale=1)
                if coords is not None:
                    object_candidates.append(
                        AttrDict(
                            target=obj.label,
                            category=UWRGBDDataset.target_unhash[obj.label],
                            coords=coords,
                            depth=depth,
                            uid=obj.uid))

            # # 3. Ensure occlusions are handled, sort by increasing depth, and filter
            # # based on overlapping threshold
            # sorted_object_candidates = sorted(object_candidates, key=lambda obj: obj.depth)

            # # Occlusion mask
            # im_sz = self.map_info.camera.shape[:2]
            # occ_mask = np.zeros(shape=im_sz, dtype=np.uint8)

            # # Non-occluded object_candidates
            # nonocc_object_candidates = []
            # for obj in sorted_object_candidates:
            #     x0, y0, x1, y1 = obj.coords
            #     xc, yc = (x0 + x1) / 2, (y0 + y1) / 2

            #     # If the bbox center is previously occupied, skip
            #     if occ_mask[yc,xc]:
            #         continue

            #     # Set as occupied
            #     occ_mask[y0:y1,x0:x1] = 1
            #     nonocc_object_candidates.append(obj)

            return object_candidates
Esempio n. 7
0
 def iter_gt_frames(*args, **kwargs):
     gt = StereoSGBM()
     for (t, ch, (l, r)) in dataset.iteritems():
         # h,w = im.shape[:2]
         # l,r = np.split(im, 2, axis=0)
         disp = gt.process(l, r)
         yield AttrDict(left=l, right=r, noc=disp, occ=disp)
Esempio n. 8
0
    def _process_items(self, index, rgb_im, depth_im, instance, label, bbox, pose): 
        # print 'Processing pose', pose, bbox
                

        # def _process_bbox(bbox): 
        #     return dict(category=bbox['category'], target=UWRGBDDataset.target_hash[str(bbox['category'])], 
        #                 left=bbox.coords['left'], right=bbox['right'], top=bbox['top'], bottom=bbox['bottom'])

        # # Compute bbox from pose and map (v2 support)
        # if self.version == 'v1': 
        #     if bbox is not None: 
        #         bbox = [_process_bbox(bb) for bb in bbox]
        #         bbox = filter(lambda bb: bb['target'] in UWRGBDDataset.train_ids_set, bbox)

        # if self.version == 'v2': 
        #     if bbox is None and hasattr(self, 'map_info'): 
        #         bbox = self.get_bboxes(pose)

        # print 'Processing pose', pose, bbox

        rgb_im = np.swapaxes(rgb_im, 0, 2)
        rgb_im = cv2.cvtColor(rgb_im, cv2.COLOR_RGB2BGR)

        depth_im = np.swapaxes(depth_im, 0, 1) * 1000
        instance = np.swapaxes(instance, 0, 1)
        label = np.swapaxes(label, 0, 1)

        return AttrDict(index=index, img=rgb_im, depth=depth_im, instance=instance, 
                        label=label, bbox=bbox if bbox is not None else [], pose=pose)
Esempio n. 9
0
    def process(self, im, bboxes=None): 
        """
        Propagate bounding boxes based on feature tracks
        ccw: a b c d
        """
        OpenCVKLT.process(self, im, detected_pts=None)

        # Degenerate case where no points are available to propagate
        ids, pts, flow = self.latest_ids, self.latest_pts, self.latest_flow

        # 1. Update hulls based on the newly tracked locations and 
        for hid in self.hulls_.keys(): 

            # Find the intersection of previously tracked and currently tracking
            # TODO: can potentially update the ids within the newly tracked hull, 
            # so that the propagation is more prolonged
            tids = self.hulls_[hid].ids
            common_inds, = np.where(np.in1d(ids, tids))

            # Delete the hull if no common tracked ids
            if not len(common_inds): 
                self.hulls_.pop(hid)
                continue

            # Update the hull to the latest points
            # TODO: can update the ids as well so that 
            # the tracking is more prolonged
            vpts = pts[common_inds]
            vbox = get_bbox(vpts)
            valid_pts = inside_bboxes(pts, [vbox])
            vinds, = np.where(valid_pts.ravel())
            vids = ids[np.r_[common_inds, vinds]]

            self.hulls_[hid].pts = vpts
            self.hulls_[hid].bbox = vbox
            self.hulls_[hid].ids = vids

        # 2. Add new hulls that are provided, and keep old tracked ones
        max_id = len(self.hulls_)
        
        # # Find the bounding boxes that are relatively new (IoU < 0.3)
        # if len(self.hulls_) and bboxes is not None: 
        #     hbboxes = np.vstack([hull.bbox for hull in self.hulls_.itervalues()])
        #     HB = brute_force_match(hbboxes, bboxes, 
        #                            match_func=lambda x,y: intersection_over_union(x, y),
        #                            dtype=np.float32)
        #     newinds, = np.where(HB.max(axis=0) < 0.3)
        #     bboxes = bboxes[newinds]

        valid_mask = inside_bboxes(pts, bboxes)
        for bidx, valid in enumerate(valid_mask):
            vids, vpts = ids[valid], pts[valid], 
            # vis = to_color(im)
            # vis = draw_features(vis, vpts)
            # imshow_cv('bbox_feats', vis, block=True)
            if len(vpts): 
                self.hulls_[max_id + bidx] = AttrDict(ids=vids, pts=vpts, bbox=get_bbox(vpts)) # bbox=bboxes[bidx])

        return self.ids, self.bboxes
Esempio n. 10
0
 def _process_bbox(bbox):
     return AttrDict(category=bbox['category'],
                     target=UWRGBDDataset.target_hash[str(
                         bbox['category'])],
                     coords=np.int64([
                         bbox['left'], bbox['top'], bbox['right'],
                         bbox['bottom']
                     ]))
Esempio n. 11
0
 def __init__(self,
              fb_check=True,
              winSize=(5, 5),
              maxLevel=4,
              criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30,
                        0.01)):
     OpticalFlowTracker.__init__(self, fb_check=fb_check)
     self.lk_params_ = AttrDict(winSize=winSize,
                                maxLevel=maxLevel,
                                criteria=criteria)
Esempio n. 12
0
 def iteritems(self, every_k_frames=1):
     for rgb_im, depth_im, mask_im in izip(
             self.rgb.iteritems(every_k_frames=every_k_frames),
             self.depth.iteritems(every_k_frames=every_k_frames),
             self.mask.iteritems(every_k_frames=every_k_frames)):
         yield AttrDict(target=self.target,
                        instance=self.instance,
                        img=rgb_im,
                        depth=depth_im,
                        mask=mask_im)
Esempio n. 13
0
def setup_bb(scale=1.0):
    # Setup one-time calibration
    calib_path = '/home/spillai/perceptual-learning/software/python/bot_vision/calib/bb/calib'
    calibration = StereoCalibration(input_folder=calib_path)
    calib_params = AttrDict(
        get_stereo_calibration_params(input_folder=calib_path))
    return StereoCamera.from_calib_params(calib_params.fx,
                                          calib_params.fy,
                                          calib_params.cx,
                                          calib_params.cy,
                                          baseline=calib_params.baseline)
Esempio n. 14
0
 def process_gt_cb(fn): 
     data = AttrDict()
     with open(fn) as f: 
         for l in f: 
             key = l[0:l.find('=')].replace(' ', '')
             if l.find('[') < 0: 
                 val = float(l[l.find('=')+1:l.find(';')].replace(' ', ''))
             else: 
                 val = l[l.find('[')+1:l.find(']')]
                 val = np.array(map(lambda v: float(v), val.split(',')))
             data[key] = val
     return data
Esempio n. 15
0
 def iter_gt_frames(self, *args, **kwargs):
     """
     Iterate over all the ground-truth data
        - For noc, occ disparity conversion, see devkit_stereo_flow/matlab/disp_read.m
     """
     for (left, right), noc, occ, calib in izip(self.iter_stereo_frames(*args, **kwargs), 
                                                      self.noc.iteritems(*args, **kwargs), 
                                                      self.occ.iteritems(*args, **kwargs), 
                                                      self.calib.iteritems(*args, **kwargs)):
         yield AttrDict(left=left, right=right, 
                        depth=(occ/256).astype(np.float32),
                        noc=(noc/256).astype(np.float32), 
                        occ=(occ/256).astype(np.float32), 
                        calib=calib, pose=None)
Esempio n. 16
0
        def iteritems(self, every_k_frames=1):
            for rgb_im, depth_im, mask_im, loc in \
                izip(self.rgb.iteritems(every_k_frames=every_k_frames),
                     self.depth.iteritems(every_k_frames=every_k_frames),
                     self.mask.iteritems(every_k_frames=every_k_frames),
                     self.locations[::every_k_frames]):

                rgb = np.zeros(shape=UWRGBDObjectDataset.default_rgb_shape,
                               dtype=np.uint8)
                depth = np.zeros(shape=UWRGBDObjectDataset.default_depth_shape,
                                 dtype=np.uint16)
                mask = np.zeros(shape=UWRGBDObjectDataset.default_depth_shape,
                                dtype=np.uint8)

                rgb[loc[1]:loc[1] + rgb_im.shape[0],
                    loc[0]:loc[0] + rgb_im.shape[1]] = rgb_im
                depth[loc[1]:loc[1] + depth_im.shape[0],
                      loc[0]:loc[0] + depth_im.shape[1]] = depth_im
                mask[loc[1]:loc[1] + mask_im.shape[0],
                     loc[0]:loc[0] + mask_im.shape[1]] = mask_im

                # Only a single bbox per image
                yield AttrDict(
                    img=rgb,
                    depth=depth,
                    mask=mask,
                    bbox=[
                        AttrDict(coords=np.float32([
                            loc[0], loc[1], loc[0] + mask_im.shape[1],
                            loc[1] + mask_im.shape[0]
                        ]),
                                 target=self.target,
                                 category=UWRGBDDataset.get_category_name(
                                     self.target),
                                 instance=self.instance)
                    ])
Esempio n. 17
0
class AprilTagFeatureDetector(object): 
    """
    AprilTag Feature Detector (only detect 4 corner points)
    """
    default_detector_params = AttrDict(tag_size=0.1, fx=576.09, fy=576.09, cx=319.5, cy=239.5)
    def __init__(self, tag_size=0.1, fx=576.09, fy=576.09, cx=319.5, cy=239.5): 
        self.detector = AprilTagsWrapper()
        self.detector.set_calib(tag_size=tag_size, fx=fx, fy=fy, cx=cx, cy=cy)
    
    def detect(self, im, mask=None): 
        tags = self.detector.process(im, return_poses=False)
        kpts = []
        for tag in tags: 
            kpts.extend([cv2.KeyPoint(pt[0], pt[1], 1) for pt in tag.getFeatures()])
        return kpts
Esempio n. 18
0
 def load(cls, filename):
     db = AttrDict.load_yaml(filename)
     shape = np.int32([
         db.width, db.height
     ]) if hasattr(db, 'width') and hasattr(db, 'height') else None
     return cls.from_calib_params(db.fx,
                                  db.fy,
                                  db.cx,
                                  db.cy,
                                  k1=db.k1,
                                  k2=db.k2,
                                  k3=db.k3,
                                  p1=db.p1,
                                  p2=db.p2,
                                  shape=shape)
Esempio n. 19
0
    def oxts_process_cb(self, fn):
        X = np.fromfile(fn, dtype=np.float64, sep=' ')
        packet = AttrDict({fmt: x
                           for (fmt, x) in zip(self.oxts_formats, X)})

        # compute scale from first lat value
        if self.scale_ is None:
            self.scale_ = np.cos(packet.lat * np.pi / 180.)

        # Use a Mercator projection to get the translation vector
        tx = self.scale_ * packet.lon * np.pi * EARTH_RADIUS / 180.
        ty = self.scale_ * EARTH_RADIUS * \
             np.log(np.tan((90. + packet.lat) * np.pi / 360.))
        tz = packet.alt
        t = np.array([tx, ty, tz])

        # We want the initial position to be the origin, but keep the ENU
        # coordinate system
        rx, ry, rz = packet.roll, packet.pitch, packet.yaw
        Rx = np.float32([1,0,0, 0,
                         np.cos(rx), -np.sin(rx), 0,
                         np.sin(rx), np.cos(rx)]).reshape(3,3)
        Ry = np.float32([np.cos(ry),0,np.sin(ry),
                         0, 1, 0,
                         -np.sin(ry), 0, np.cos(ry)]).reshape(3,3)
        Rz = np.float32([np.cos(rz), -np.sin(rz), 0,
                         np.sin(rz), np.cos(rz), 0,
                         0, 0, 1]).reshape(3,3)
        R = np.dot(Rz, Ry.dot(Rx))
        pose = RigidTransform.from_Rt(R, t)

        if self.p_init_ is None:
            self.p_init_ = pose.inverse()

        # Use the Euler angles to get the rotation matrix
        return AttrDict(packet=packet, pose=self.p_init_ * pose)
Esempio n. 20
0
 def save(self, filename):
     try:
         height, width = self.shape[:2]
     except:
         height, width = '', ''
     AttrDict(fx=float(self.fx),
              fy=float(self.fy),
              cx=float(self.cx),
              cy=float(self.cy),
              k1=float(self.D[0]),
              k2=float(self.D[1]),
              k3=float(self.D[4]),
              p1=float(self.D[2]),
              p2=float(self.D[3]),
              width=int(width),
              height=int(height)).save_yaml(filename)
Esempio n. 21
0
 def __init__(self,
              fb_check=True,
              pyr_scale=0.5,
              levels=3,
              winsize=15,
              iterations=3,
              poly_n=7,
              poly_sigma=1.5,
              flags=0):
     OpticalFlowTracker.__init__(self, fb_check=fb_check)
     self.farneback_params_ = AttrDict(pyr_scale=pyr_scale,
                                       levels=levels,
                                       winsize=winsize,
                                       iterations=iterations,
                                       poly_n=poly_n,
                                       poly_sigma=poly_sigma,
                                       flags=flags)
Esempio n. 22
0
class AprilTagFeatureDetector(object): 
    """
    AprilTag Feature Detector (only detect 4 corner points)
    """
    default_params = AttrDict(tag_size=0.1, fx=576.09, fy=576.09, cx=319.5, cy=239.5)
    def __init__(self, tag_size=0.1, fx=576.09, fy=576.09, cx=319.5, cy=239.5): 
        try: 
            from pybot_apriltags import AprilTagsWrapper
            self.detector_ = AprilTagsWrapper(tag_size=tag_size, fx=fx, fy=fy, cx=cx, cy=cy)
        except: 
            raise ImportError('Apriltags (pybot_apriltags) is not available')

    def detect(self, im, mask=None): 
        tags = self.detector_.process(im, return_poses=False)
        kpts = []
        for tag in tags: 
            kpts.extend([cv2.KeyPoint(pt[0], pt[1], 1) for pt in tag.getFeatures()])
        return kpts
Esempio n. 23
0
    def setup_all_datasets(cls,
                           object_dir=None,
                           scene_dir=None,
                           targets=train_names,
                           version='v1'):
        return AttrDict(

            # Main object dataset (single object instance per image)
            objects = UWRGBDObjectDataset(directory=object_dir, targets=targets) \
            if object_dir is not None else None,

            # Scene dataset for evaluation
            scene = UWRGBDSceneDataset(version=version, directory=scene_dir) \
            if scene_dir is not None else None,

            # Background dataset for hard-negative training
            background = UWRGBDSceneDataset(version=version,
                                            directory=os.path.join(scene_dir, 'background')) \
            if scene_dir is not None else None
        )
Esempio n. 24
0
def get_calib_params(fx, fy, cx, cy, baseline=None, baseline_px=None):
    raise RuntimeError('Deprecated, see camera_utils.StereoCamera')

    baseline, baseline_px = get_baseline(fx,
                                         baseline=baseline,
                                         baseline_px=baseline_px)
    q43 = -1 / baseline
    P0 = np.float32([fx, 0.0, cx, 0.0, 0.0, fy, cy, 0.0, 0.0, 0.0, 1.0,
                     0.0]).reshape((3, 4))
    P1 = np.float32(
        [fx, 0.0, cx, -baseline_px, 0.0, fy, cy, 0.0, 0.0, 0.0, 1.0,
         0.0]).reshape((3, 4))

    K0, K1 = P0[:3, :3], P1[:3, :3]

    R0, R1 = np.eye(3), np.eye(3)
    T0, T1 = np.zeros(3), np.float32([baseline, 0, 0])
    # T0, T1 = np.zeros(3), np.float32([-baseline_px, 0, 0])

    Q = np.float32([[-1, 0, 0, cx], [0, -1, 0, cy], [0, 0, 0, -fx],
                    [0, 0, q43, 0]])

    D0, D1 = np.zeros(5), np.zeros(5)
    return AttrDict(R0=R0,
                    R1=R1,
                    K0=K0,
                    K1=K1,
                    P0=P0,
                    P1=P1,
                    Q=Q,
                    T0=T0,
                    T1=T1,
                    D0=D0,
                    D1=D1,
                    fx=fx,
                    fy=fy,
                    cx=cx,
                    cy=cy,
                    baseline=baseline,
                    baseline_px=baseline * fx)
Esempio n. 25
0
class SemiDenseFeatureDetector(object): 
    """
    Semi-Dense Feature Detector
    """
    default_params = AttrDict(step=10, threshold=20, max_corners=10000)
    def __init__(self, grid=(20,20), threshold=20, max_corners=10000): 
        try: 
            from pybot_vision import fast_proposals
            self.detector_ = cv2.GridAdaptedFeatureDetector(
                get_dense_detector(step=2, levels=1), max_corners, grid[0], grid[1])
            self.detect_edges_ = lambda im: fast_proposals(im, threshold)
        except: 
            raise ImportError('fast_proposals (pybot_vision) is not available')

    def detect(self, im, mask=None): 
        edges = self.detect_edges_(im)
        # edges1 = edges.copy()
        if mask is not None: 
            edges = np.bitwise_and(edges, mask)
        # cv2.imshow('mask', np.hstack([edges1, edges]))
        kpts = self.detector_.detect(im, mask=edges)
        return kpts
Esempio n. 26
0
        def cluster_ply_labels(ply_xyz, ply_rgb, ply_label):
            """
            Separate multiple object instances cleanly, otherwise, 
            candidate projection becomes inconsistent
            """
            from pybot_pcl import euclidean_clustering

            object_info = []
            unique_labels = np.unique(ply_label)

            for l in unique_labels:

                # Only add clusters that are in target/train and not background
                if l not in UWRGBDDataset.train_ids or l == UWRGBDDataset.target_hash[
                        'background']:
                    continue

                l_xyz = ply_xyz[ply_label == l]
                l_rgb = ply_rgb[ply_label == l]
                # print 'Clustering: ', l_xyz.shape, l_rgb.shape

                linds = euclidean_clustering(l_xyz.astype(np.float32),
                                             tolerance=0.1,
                                             scale=1.0,
                                             min_cluster_size=10)
                unique_linds = np.unique(linds)
                # print 'Output ', unique_linds
                for lind in unique_linds:
                    object_info.append(
                        AttrDict(label=l,
                                 uid=len(object_info),
                                 points=l_xyz[linds == lind],
                                 colors=l_rgb[linds == lind],
                                 center=np.mean(l_xyz[linds == lind], axis=0)))

            print 'Total unique objects in dataset: ', len(object_info)

            return object_info
Esempio n. 27
0
 def iter_gt_frames(self, *args, **kwargs): 
     for (left, right), pose, depth in izip(self.iter_stereo_frames(*args, **kwargs), 
                                            self.poses.iteritems(*args, **kwargs), 
                                            self.gt.iteritems(*args, **kwargs)): 
         yield AttrDict(left=left, right=right, pose=pose, depth=depth)
Esempio n. 28
0
 def iterframes(*args, **kwargs):
     for (t, ch, (l, r)) in dataset.iteritems(*args, **kwargs):
         yield AttrDict(left=l, right=r)
Esempio n. 29
0
 def iterframes(self, *args, **kwargs):
     for (left, right), pose in izip(
             self.iter_stereo_frames(*args, **kwargs),
             self.poses.iteritems(*args, **kwargs)):
         yield AttrDict(left=left, right=right,
                        velodyne=None, pose=pose)
Esempio n. 30
0
class BaseKLT(object): 
    """
    General-purpose KLT tracker that combines the use of FeatureDetector and 
    OpticalFlowTracker class. 

        min_track_length:   Defines the minimum length of the track that returned. 
                            Other shorter tracks are still considered while tracking
        
        max_track_length:   Maximum deque length for track history lookup

    """
    default_detector_params = AttrDict(method='fast', grid=(12,10), max_corners=1200, 
                                       max_levels=4, subpixel=False, params=FeatureDetector.fast_params)
    default_tracker_params = AttrDict(method='lk', fb_check=True, 
                                      params=OpticalFlowTracker.lk_params)

    default_detector = FeatureDetector(**default_detector_params)
    default_tracker = OpticalFlowTracker.create(**default_tracker_params)

    def __init__(self, 
                 detector=default_detector, 
                 tracker=default_tracker,  
                 min_track_length=2, max_track_length=4, min_tracks=1200, mask_size=9): 
        
        # BaseKLT Params
        self.detector_ = detector
        self.tracker_ = tracker

        # Track Manager
        self.tm_ = TrackManager(maxlen=max_track_length)
        self.min_track_length_ = min_track_length
        self.min_tracks_ = min_tracks
        self.mask_size_ = mask_size

    @classmethod
    def from_params(cls, detector_params=default_detector_params, 
                    tracker_params=default_tracker_params,
                    min_track_length=2, max_track_length=4, min_tracks=1200, mask_size=9): 

        # Setup detector and tracker
        detector = FeatureDetector(**detector_params)
        tracker = OpticalFlowTracker.create(**tracker_params)
        return cls(detector, tracker, 
                   min_track_length=min_track_length, max_track_length=max_track_length, 
                   min_tracks=min_tracks, mask_size=mask_size)

    def register_on_track_delete_callback(self, cb): 
        self.tm_.register_on_track_delete_callback(cb)

    def create_mask(self, shape, pts): 
        """
        Create a mask image to prevent feature extraction around regions
        that already have features detected. i.e prevent feature crowding
        """

        mask = np.ones(shape=shape, dtype=np.uint8) * 255
        all_pts = np.vstack([self.aug_pts_, pts]) if hasattr(self, 'aug_pts_') \
                  else pts
        try: 
            for pt in all_pts: 
                cv2.circle(mask, tuple(map(int, pt)), self.mask_size_, 0, -1, lineType=cv2.CV_AA)
        except:
            pass
        return mask

    def augment_mask(self, pts): 
        """
        Augment the mask of tracked features with additional features. 
        Usually, this is called when features are propagated from map points
        """
        self.aug_pts_ = pts

    def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4):
        """
        color_type: {age, unique}
        """

        N = 20
        # inds = self.confident_tracks(min_length=min_track_length)
        # if not len(inds): 
        #     return

        # ids, pts = self.latest_ids[inds], self.latest_pts[inds]
        # lengths = self.tm_.lengths[inds]

        ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths

        if color_type == 'unique': 
            cwheel = colormap(np.linspace(0, 1, N))
            cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)])
        elif color_type == 'age': 
            cols = colormap(lengths)
        else: 
            raise ValueError('Color type {:} undefined, use age or unique'.format(color_type))

        if not colored: 
            cols = np.tile([0,240,0], [len(self.tm_.tracks), 1])

        for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()): 
            cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False, 
                          tuple(col), thickness=1)
            tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
            cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)

    def visualize(self, out, colored=False): 
        if not len(self.latest_pts):
            return

        N = 20
        cols = colormap(np.linspace(0, 1, N))
        valid = finite_and_within_bounds(self.latest_pts, out.shape)

        for tid, pt in izip(self.latest_ids[valid], self.latest_pts[valid]): 
            cv2.rectangle(out, tuple(map(int, pt-2)), tuple(map(int, pt+2)), 
                          tuple(map(int, cols[tid % N])) if colored else (0,240,0), -1)

        return out

    def matches(self, index1=-2, index2=-1): 
        tids, p1, p2 = [], [], []
        for tid, pts in self.tm_.tracks.iteritems(): 
            if len(pts) > abs(index1) and len(pts) > abs(index2): 
                tids.append(tid)
                p1.append(pts.items[index1])
                p2.append(pts.items[index2]) 
        try: 
            return tids, np.vstack(p1), np.vstack(p2)
        except: 
            return np.array([]), np.array([]), np.array([])

    def process(self, im, detected_pts=None):
        raise NotImplementedError()

    @property
    def latest_ids(self): 
        return self.tm_.ids

    @property
    def latest_pts(self): 
        return self.tm_.pts

    @property
    def latest_flow(self): 
        return self.tm_.flow

    def confident_tracks(self, min_length=4): 
        return self.tm_.confident_tracks(min_length=min_length)