Example #1
0
 def locally_optimize(self,
                      kf_ref,
                      verbose=False,
                      rounds=10,
                      abort_flag=g2o.Flag()):
     keyframes, points, ref_keyframes = self.local_map.update(kf_ref)
     print('local optimization window: ',
           sorted([kf.id for kf in keyframes]))
     print('                     refs: ',
           sorted([kf.id for kf in ref_keyframes]))
     print('                   #points: ', len(points))
     #print('                   points: ', sorted([p.id for p in points]))
     #err = optimizer_g2o.optimize(frames, points, None, False, verbose, rounds)
     err, ratio_bad_observations = optimizer_g2o.local_bundle_adjustment(
         keyframes,
         points,
         ref_keyframes,
         False,
         verbose,
         rounds,
         abort_flag=abort_flag,
         map_lock=self.update_lock)
     Printer.green('local optimization - perc bad observations: %.2f %%' %
                   (ratio_bad_observations * 100))
     return err
Example #2
0
 def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
     with self.lock:         
         if self.frame is not frame:
             Printer.orange('WARNING: DELF is recomputing both kps and des on last input frame', frame.shape)            
             self.detectAndCompute(frame)
         return self.kps, self.des                
                
Example #3
0
 def remove_points_with_big_reproj_err(self, points):
     with self._lock:
         with self.update_lock:
             #print('map points: ', sorted([p.id for p in self.points]))
             #print('points: ', sorted([p.id for p in points]))
             culled_pt_count = 0
             for p in points:
                 # compute reprojection error
                 chi2s = []
                 for f, idx in p.observations():
                     uv = f.kpsu[idx]
                     proj, _ = f.project_map_point(p)
                     invSigma2 = Frame.feature_manager.inv_level_sigmas2[
                         f.octaves[idx]]
                     err = (proj - uv)
                     chi2s.append(np.inner(err, err) * invSigma2)
                 # cull
                 mean_chi2 = np.mean(chi2s)
                 if np.mean(
                         chi2s
                 ) > Parameters.kChi2Mono:  # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                     culled_pt_count += 1
                     #print('removing point: ',p.id, 'from frames: ', [f.id for f in p.keyframes])
                     self.remove_point(p)
             Printer.blue("# culled map points: ", culled_pt_count)
Example #4
0
    def track_reference_frame(self, f_ref, f_cur, name=''):
        print('>>>> tracking reference %d ...' % (f_ref.id))
        if f_ref is None:
            return
        # find keypoint matches between f_cur and kf_ref
        print('matching keypoints with ', Frame.feature_matcher.type.name)
        self.timer_match.start()
        idxs_cur, idxs_ref = match_frames(f_cur, f_ref)
        self.timer_match.refresh()
        self.num_matched_kps = idxs_cur.shape[0]
        print("# keypoints matched: %d " % self.num_matched_kps)
        if kUseEssentialMatrixFitting:
            # estimate camera orientation and inlier matches by fitting and essential matrix (see the limitations above)
            idxs_ref, idxs_cur = self.estimate_pose_by_fitting_ess_mat(
                f_ref, f_cur, idxs_ref, idxs_cur)

        if kUseDynamicDesDistanceTh:
            self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                f_ref, f_cur, idxs_ref, idxs_cur)

        # propagate map point matches from kf_ref to f_cur  (do not override idxs_ref, idxs_cur)
        num_found_map_pts_inter_frame, idx_ref_prop, idx_cur_prop = propagate_map_point_matches(
            f_ref,
            f_cur,
            idxs_ref,
            idxs_cur,
            max_descriptor_distance=self.descriptor_distance_sigma)
        print("# matched map points in prev frame: %d " %
              num_found_map_pts_inter_frame)

        if kDebugDrawMatches and True:
            img_matches = draw_feature_matches(f_ref.img,
                                               f_cur.img,
                                               f_ref.kps[idx_ref_prop],
                                               f_cur.kps[idx_cur_prop],
                                               f_ref.sizes[idx_ref_prop],
                                               f_cur.sizes[idx_cur_prop],
                                               horizontal=False)
            cv2.imshow('tracking frame (no projection) - matches', img_matches)
            cv2.waitKey(1)

        # store tracking info (for possible reuse)
        self.idxs_ref = idxs_ref
        self.idxs_cur = idxs_cur

        # f_cur pose optimization using last matches with kf_ref:
        # here, we use first guess of f_cur pose and propated map point matches from f_ref (matched keypoints)
        self.pose_optimization(f_cur, name)
        # update matched map points; discard outliers detected in last pose optimization
        num_matched_points = f_cur.clean_outlier_map_points()
        print('      # num_matched_map_points: %d' %
              (self.num_matched_map_points))
        #print('     # matched points: %d' % (num_matched_points) )
        if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackFrame:
            f_cur.remove_frame_views(idxs_cur)
            f_cur.reset_points()
            Printer.red(
                'failure in tracking reference %d, # matched map points: %d' %
                (f_ref.id, self.num_matched_map_points))
            self.pose_is_ok = False
 def __init__(
         self,
         num_features=kMinNumFeatureDefault,
         num_levels=3,  # number of pyramid levels for detector  
         scale_factor=1.2,  # detection scale factor (if it can be set, otherwise it is automatically computed) 
         detector_type=FeatureDetectorTypes.FAST,
         descriptor_type=FeatureDescriptorTypes.NONE,
         match_ratio_test=kRatioTest,
         tracker_type=FeatureTrackerTypes.LK):
     super().__init__(num_features=num_features,
                      num_levels=num_levels,
                      scale_factor=scale_factor,
                      detector_type=detector_type,
                      descriptor_type=descriptor_type,
                      tracker_type=tracker_type)
     self.feature_manager = feature_manager_factory(
         num_features=num_features,
         num_levels=num_levels,
         scale_factor=scale_factor,
         detector_type=detector_type,
         descriptor_type=descriptor_type)
     #if num_levels < 3:
     #    Printer.green('LkFeatureTracker: forcing at least 3 levels on LK pyr optic flow')
     #    num_levels = 3
     optic_flow_num_levels = max(kLkPyrOpticFlowNumLevelsMin, num_levels)
     Printer.green('LkFeatureTracker: num levels on LK pyr optic flow: ',
                   optic_flow_num_levels)
     # we use LK pyr optic flow for matching
     self.lk_params = dict(winSize=(21, 21),
                           maxLevel=optic_flow_num_levels,
                           criteria=(cv2.TERM_CRITERIA_EPS
                                     | cv2.TERM_CRITERIA_COUNT, 30, 0.01))
Example #6
0
    def get_frame_covisibles(self, frame):
        points = frame.get_matched_good_points()
        #keyframes = self.get_local_keyframes()
        #assert len(points) > 0
        if len(points) == 0:
            Printer.red('get_frame_covisibles - frame with not points')

        # for all map points in frame check in which other keyframes are they seen
        # increase counter for those keyframes
        viewing_keyframes = [
            kf for p in points for kf in p.keyframes() if not kf.is_bad
        ]  # if kf in keyframes]
        viewing_keyframes = Counter(viewing_keyframes)
        kf_ref = viewing_keyframes.most_common(1)[0][0]
        #local_keyframes = viewing_keyframes.keys()

        # include also some not-already-included keyframes that are neighbors to already-included keyframes
        for kf in list(viewing_keyframes.keys()):
            second_neighbors = kf.get_best_covisible_keyframes(
                Parameters.kNumBestCovisibilityKeyFrames)
            viewing_keyframes.update(second_neighbors)
            children = kf.get_children()
            viewing_keyframes.update(children)
            if len(viewing_keyframes
                   ) >= Parameters.kMaxNumOfKeyframesInLocalMap:
                break

        local_keyframes_counts = viewing_keyframes.most_common(
            Parameters.kMaxNumOfKeyframesInLocalMap)
        local_points = set()
        local_keyframes = []
        for kf, c in local_keyframes_counts:
            local_points.update(kf.get_matched_points())
            local_keyframes.append(kf)
        return kf_ref, local_keyframes, local_points
Example #7
0
 def compute(self, img, kps, mask=None):
     Printer.orange(
         'WARNING: you are supposed to call detectAndCompute() for ORB2 instead of compute()'
     )
     Printer.orange(
         'WARNING: ORB2 is recomputing both kps and des on input frame',
         img.shape)
     return self.detectAndCompute(img)
Example #8
0
 def init_feature_tracker(self, tracker):
     Frame.set_tracker(tracker)  # set the static field of the class
     if kUseEssentialMatrixFitting:
         Printer.orange('forcing feature matcher ratio_test to 0.8')
         tracker.matcher.ratio_test = 0.8
     if tracker.tracker_type == FeatureTrackerTypes.LK:
         raise ValueError(
             "You cannot use Lukas-Kanade tracker in this SLAM approach!")
Example #9
0
    def need_new_keyframe(self, f_cur):
        num_keyframes = self.map.num_keyframes()
        nMinObs = kNumMinObsForKeyFrameDefault
        if num_keyframes <= 2:
            nMinObs = 2  # if just two keyframes then we can have just two observations
        num_kf_ref_tracked_points = self.kf_ref.num_tracked_points(
            nMinObs)  # number of tracked points in k_ref
        num_f_cur_tracked_points = f_cur.num_matched_inlier_map_points(
        )  # number of inliers in f_cur
        Printer.purple('F(%d) #points: %d, KF(%d) #points: %d ' %
                       (f_cur.id, num_f_cur_tracked_points, self.kf_ref.id,
                        num_kf_ref_tracked_points))

        if kLogKFinfoToFile:
            self.kf_info_logger.info(
                'F(%d) #points: %d, KF(%d) #points: %d ' %
                (f_cur.id, num_f_cur_tracked_points, self.kf_ref.id,
                 num_kf_ref_tracked_points))

        self.num_kf_ref_tracked_points = num_kf_ref_tracked_points

        is_local_mapping_idle = self.local_mapping.is_idle()
        local_mapping_queue_size = self.local_mapping.queue_size()
        print('is_local_mapping_idle: ', is_local_mapping_idle,
              ', local_mapping_queue_size: ', local_mapping_queue_size)

        # condition 1: more than "max_frames_between_kfs" have passed from last keyframe insertion
        cond1 = f_cur.id >= (self.kf_last.id + self.max_frames_between_kfs)

        # condition 2: more than "min_frames_between_kfs" have passed and local mapping is idle
        cond2 = (f_cur.id >=
                 (self.kf_last.id +
                  self.min_frames_between_kfs)) & is_local_mapping_idle
        #cond2 = (f_cur.id >= (self.kf_last.id + self.min_frames_between_kfs))

        # condition 3: few tracked features compared to reference keyframe
        cond3 = (num_f_cur_tracked_points <
                 num_kf_ref_tracked_points * Parameters.kThNewKfRefRatio) and (
                     num_f_cur_tracked_points >
                     Parameters.kNumMinPointsForNewKf)

        #print('KF conditions: %d %d %d' % (cond1, cond2, cond3) )
        ret = (cond1 or cond2) and cond3

        if ret:
            if is_local_mapping_idle:
                return True
            else:
                self.local_mapping.interrupt_optimization()
                if True:
                    if local_mapping_queue_size <= 3:
                        return True
                    else:
                        return False
                else:
                    return False
        else:
            return False
Example #10
0
 def set_parent(self, keyframe):
     with self._lock_connections:
         if self == keyframe:
             if __debug__:
                 Printer.orange(
                     'KeyFrameGraph.set_parent - trying to set self as parent'
                 )
             return
         self.parent = keyframe
         keyframe.add_child(self)
Example #11
0
 def compute(self, frame):
     if self.first_level == -1:
         frame = self.createBaseImg(frame)  # replace the image with the new level -1 (up-resized image)
     if self.pyramid_type == PyramidType.RESIZE:
         return self.computeResize(frame)
     elif self.pyramid_type == PyramidType.RESIZE_AND_FILTER:          
         return self.computeResizeAndFilter(frame)              
     elif self.pyramid_type == PyramidType.GAUSS_PYRAMID:
         return self.computeGauss(frame) 
     else: 
         Printer.orange('Pyramid - unknown type')    
         return self.computeResizePyramid(frame)                    
Example #12
0
 def getImageColor(self, frame_id):
     try: 
         img = self.getImage(frame_id)
         if img.ndim == 2:
             return cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)     
         else:
             return img             
     except:
         img = None  
         #raise IOError('Cannot open dataset: ', self.name, ', path: ', self.path)        
         Printer.red('Cannot open dataset: ', self.name, ', path: ', self.path)
         return img    
Example #13
0
    def __init__(self, frame, img=None):
        KeyFrameGraph.__init__(self)
        Frame.__init__(
            self,
            img=None,
            camera=frame.camera,
            pose=frame.pose,
            id=frame.id,
            timestamp=frame.timestamp
        )  # here we MUST have img=None in order to avoid recomputing keypoint info

        if frame.img is not None:
            self.img = frame.img  # this is already a copy of an image
        else:
            if img is not None:
                self.img = img.copy()

        self.map = None

        self.is_keyframe = True
        self.kid = None  # keyframe id

        self._is_bad = False
        self.to_be_erased = False

        # pose relative to parent (this is computed when bad flag is activated)
        self._pose_Tcp = CameraPose()

        # share keypoints info with frame (these are computed once for all on frame initialization and they are not changed anymore)
        self.kps = frame.kps  # keypoint coordinates                  [Nx2]
        self.kpsu = frame.kpsu  # [u]ndistorted keypoint coordinates    [Nx2]
        self.kpsn = frame.kpsn  # [n]ormalized keypoint coordinates     [Nx2] (Kinv * [kp,1])
        self.octaves = frame.octaves  # keypoint octaves                      [Nx1]
        self.sizes = frame.sizes  # keypoint sizes                        [Nx1]
        self.angles = frame.angles  # keypoint angles                       [Nx1]
        self.des = frame.des  # keypoint descriptors                  [NxD] where D is the descriptor length

        if hasattr(frame, '_kd'):
            self._kd = frame._kd
        else:
            Printer.orange('KeyFrame %d computing kdtree for input frame %d' %
                           (self.id, frame.id))
            self._kd = cKDTree(self.kpsu)

        # map points information arrays (copy points coming from frame)
        self.points = frame.get_points(
        )  # map points => self.points[idx] is the map point matched with self.kps[idx] (if is not None)
        self.outliers = np.full(
            self.kpsu.shape[0], False,
            dtype=bool)  # used just in propagate_map_point_matches()
Example #14
0
 def compute_points_median_depth(self, points3d=None):
     with self._lock_pose:
         Rcw2 = self._pose.Rcw[2, :3]  # just 2-nd row
         tcw2 = self._pose.tcw[2]  # just 2-nd row
     if points3d is None:
         with self._lock_features:
             points3d = np.array(
                 [p.pt for p in self.points if p is not None])
     if len(points3d) > 0:
         z = np.dot(Rcw2, points3d[:, :3].T) + tcw2
         z = sorted(z)
         return z[(len(z) - 1) // 2]
     else:
         Printer.red('frame.compute_points_median_depth() with no points')
         return -1
Example #15
0
def main(argv=None):  # pylint: disable=unused-argument
    """Program entrance."""
    # create sift detector.
    sift_wrapper = SiftWrapper(n_sample=FLAGS.max_kpt_num)
    sift_wrapper.half_sigma = FLAGS.half_sigma
    sift_wrapper.pyr_off = FLAGS.pyr_off
    sift_wrapper.ori_off = FLAGS.ori_off
    sift_wrapper.create()
    # create deep feature extractor.
    Printer.yellow('loading model:',FLAGS.model_path,'...')
    graph = load_frozen_model(FLAGS.model_path, print_nodes=False)
    #sess = tf.Session(graph=graph)
    Printer.yellow('...done')    

    with tf.Session(graph=graph, config=config) as sess:    
        # extract deep feature from images.
        deep_feat1, cv_kpts1, img1 = extract_deep_features(
            sift_wrapper, sess, FLAGS.img1_path, qtz=False)
        deep_feat2, cv_kpts2, img2 = extract_deep_features(
            sift_wrapper, sess, FLAGS.img2_path, qtz=False)
    # match features by OpenCV brute-force matcher (CPU).
    matcher_wrapper = MatcherWrapper()
    # the ratio criterion is set to 0.89 for GeoDesc as described in the paper.
    deep_good_matches, deep_mask = matcher_wrapper.get_matches(
        deep_feat1, deep_feat2, cv_kpts1, cv_kpts2, ratio=0.89, cross_check=True, info='deep')

    deep_display = matcher_wrapper.draw_matches(
        img1, cv_kpts1, img2, cv_kpts2, deep_good_matches, deep_mask)
    # compare with SIFT.
    if FLAGS.cf_sift:
        sift_feat1 = sift_wrapper.compute(img1, cv_kpts1)
        sift_feat2 = sift_wrapper.compute(img2, cv_kpts2)
        sift_good_matches, sift_mask = matcher_wrapper.get_matches(
            sift_feat1, sift_feat2, cv_kpts1, cv_kpts2, ratio=0.80, cross_check=True, info='sift')
        sift_display = matcher_wrapper.draw_matches(
            img1, cv_kpts1, img2, cv_kpts2, sift_good_matches, sift_mask)
        display = np.concatenate((sift_display, deep_display), axis=0)
    else:
        display = deep_display

    cv2.imshow('display', display)
    cv2.waitKey()

    sess.close()
Example #16
0
    def track_local_map(self, f_cur):
        if self.map.local_map.is_empty():
            return
        print('>>>> tracking local map...')
        self.timer_seach_map.start()

        self.update_local_map()

        num_found_map_pts, reproj_err_frame_map_sigma, matched_points_frame_idxs = search_map_by_projection(
            self.local_points,
            f_cur,
            max_reproj_distance=self.
            reproj_err_frame_map_sigma,  #Parameters.kMaxReprojectionDistanceMap, 
            max_descriptor_distance=self.descriptor_distance_sigma,
            ratio_test=Parameters.kMatchRatioTestMap
        )  # use the updated local map
        self.timer_seach_map.refresh()
        #print('reproj_err_sigma: ', reproj_err_frame_map_sigma, ' used: ', self.reproj_err_frame_map_sigma)
        print("# new matched map points in local map: %d " % num_found_map_pts)
        print("# local map points ", self.map.local_map.num_points())

        if kDebugDrawMatches and True:
            img_matched_trails = f_cur.draw_feature_trails(
                f_cur.img.copy(),
                matched_points_frame_idxs,
                trail_max_length=3)
            cv2.imshow('tracking local map - matched trails',
                       img_matched_trails)
            cv2.waitKey(1)

        # f_cur pose optimization 2 with all the matched local map points
        self.pose_optimization(f_cur, 'proj-map-frame')
        f_cur.update_map_points_statistics(
        )  # here we do not reset outliers; we let them reach the keyframe generation
        # and then bundle adjustment will possible decide if remove them or not;
        # only after keyframe generation the outliers are cleaned!
        print('     # num_matched_map_points: %d' %
              (self.num_matched_map_points))
        if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackLocalMap:
            Printer.red(
                'failure in tracking local map, # matched map points: ',
                self.num_matched_map_points)
            self.pose_is_ok = False
Example #17
0
    def estimate_pose_by_fitting_ess_mat(self, f_ref, f_cur, idxs_ref,
                                         idxs_cur):
        # N.B.: in order to understand the limitations of fitting an essential mat, read the comments of the method self.estimate_pose_ess_mat()
        self.timer_pose_est.start()
        # estimate inter frame camera motion by using found keypoint matches
        # output of the following function is:  Trc = [Rrc, trc] with ||trc||=1  where c=cur, r=ref  and  pr = Trc * pc
        Mrc, self.mask_match = estimate_pose_ess_mat(
            f_ref.kpsn[idxs_ref],
            f_cur.kpsn[idxs_cur],
            method=cv2.RANSAC,
            prob=kRansacProb,
            threshold=kRansacThresholdNormalized)
        #Mcr = np.linalg.inv(poseRt(Mrc[:3, :3], Mrc[:3, 3]))
        Mcr = inv_T(Mrc)
        estimated_Tcw = np.dot(Mcr, f_ref.pose)
        self.timer_pose_est.refresh()

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        print('# inliers: ', self.num_inliers)
        idxs_ref = idxs_ref[mask_idxs]
        idxs_cur = idxs_cur[mask_idxs]

        # if there are not enough inliers do not use the estimated pose
        if self.num_inliers < kNumMinInliersEssentialMat:
            #f_cur.update_pose(f_ref.pose) # reset estimated pose to previous frame
            Printer.red('Essential mat: not enough inliers!')
        else:
            # use the estimated pose as an initial guess for the subsequent pose optimization
            # set only the estimated rotation (essential mat computation does not provide a scale for the translation, see above)
            #f_cur.pose[:3,:3] = estimated_Tcw[:3,:3] # copy only the rotation
            #f_cur.pose[:,3] = f_ref.pose[:,3].copy() # override translation with ref frame translation
            Rcw = estimated_Tcw[:3, :3]  # copy only the rotation
            tcw = f_ref.pose[:3,
                             3]  # override translation with ref frame translation
            f_cur.update_rotation_and_translation(Rcw, tcw)
        return idxs_ref, idxs_cur
Example #18
0
def bundle_adjustment(keyframes, points, local_window, fixed_points=False, verbose=False, rounds=10, use_robust_kernel=False, abort_flag=g2o.Flag()):
    if local_window is None:
        local_frames = keyframes
    else:
        local_frames = keyframes[-local_window:]

    # create g2o optimizer
    opt = g2o.SparseOptimizer()
    block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCSparseSE3())
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())        
    solver = g2o.OptimizationAlgorithmLevenberg(block_solver)
    opt.set_algorithm(solver)
    opt.set_force_stop_flag(abort_flag)

    thHuberMono = math.sqrt(5.991);  # chi-square 2 DOFS 

    graph_keyframes, graph_points = {}, {}

    # add frame vertices to graph
    for kf in (local_frames if fixed_points else keyframes):    # if points are fixed then consider just the local frames, otherwise we need all frames or at least two frames for each point
        if kf.is_bad:
            continue 
        #print('adding vertex frame ', f.id, ' to graph')
        se3 = g2o.SE3Quat(kf.Rcw, kf.tcw)
        v_se3 = g2o.VertexSE3Expmap()
        v_se3.set_estimate(se3)
        v_se3.set_id(kf.kid * 2)  # even ids  (use f.kid here!)
        v_se3.set_fixed(kf.kid==0 or kf not in local_frames) #(use f.kid here!)
        opt.add_vertex(v_se3)

        # confirm pose correctness
        #est = v_se3.estimate()
        #assert np.allclose(pose[0:3, 0:3], est.rotation().matrix())
        #assert np.allclose(pose[0:3, 3], est.translation())

        graph_keyframes[kf] = v_se3

    num_edges = 0
    
    # add point vertices to graph 
    for p in points:
        assert(p is not None)        
        if p.is_bad:  # do not consider bad points   
            continue
        if __debug__:        
            if not any([f in keyframes for f in p.keyframes()]):  
                Printer.red('point without a viewing frame!!')
                continue        
        #print('adding vertex point ', p.id,' to graph')
        v_p = g2o.VertexSBAPointXYZ()    
        v_p.set_id(p.id * 2 + 1)  # odd ids
        v_p.set_estimate(p.pt[0:3])
        v_p.set_marginalized(True)
        v_p.set_fixed(fixed_points)
        opt.add_vertex(v_p)
        graph_points[p] = v_p

        # add edges
        for kf, idx in p.observations():
            if kf.is_bad:
                continue 
            if kf not in graph_keyframes:
                continue
            #print('adding edge between point ', p.id,' and frame ', f.id)
            edge = g2o.EdgeSE3ProjectXYZ()
            edge.set_vertex(0, v_p)
            edge.set_vertex(1, graph_keyframes[kf])
            edge.set_measurement(kf.kpsu[idx])
            invSigma2 = Frame.feature_manager.inv_level_sigmas2[kf.octaves[idx]]
            edge.set_information(np.eye(2)*invSigma2)
            if use_robust_kernel:
                edge.set_robust_kernel(g2o.RobustKernelHuber(thHuberMono))

            edge.fx = kf.camera.fx 
            edge.fy = kf.camera.fy
            edge.cx = kf.camera.cx
            edge.cy = kf.camera.cy

            opt.add_edge(edge)
            num_edges += 1

    if verbose:
        opt.set_verbose(True)
    opt.initialize_optimization()
    opt.optimize(rounds)

    # put frames back
    for kf in graph_keyframes:
        est = graph_keyframes[kf].estimate()
        #R = est.rotation().matrix()
        #t = est.translation()
        #f.update_pose(poseRt(R, t))
        kf.update_pose(g2o.Isometry3d(est.orientation(), est.position()))

    # put points back
    if not fixed_points:
        for p in graph_points:
            p.update_position(np.array(graph_points[p].estimate()))
            p.update_normal_and_depth(force=True)
            
    mean_squared_error = opt.active_chi2()/max(num_edges,1)

    return mean_squared_error
Example #19
0
def local_bundle_adjustment(keyframes, points, keyframes_ref=[], fixed_points=False, verbose=False, rounds=10, abort_flag=g2o.Flag(), map_lock=None):

    # create g2o optimizer
    opt = g2o.SparseOptimizer()
    block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCSparseSE3())
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverEigenSE3())  
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())          
    solver = g2o.OptimizationAlgorithmLevenberg(block_solver)
    opt.set_algorithm(solver)
    opt.set_force_stop_flag(abort_flag)

    #robust_kernel = g2o.RobustKernelHuber(np.sqrt(5.991))  # chi-square 2 DOFs
    thHuberMono = math.sqrt(5.991);  # chi-square 2 DOFS 

    graph_keyframes, graph_points = {}, {}

    # add frame vertices to graph
    for kf in keyframes:    
        if kf.is_bad:
            continue 
        #print('adding vertex frame ', f.id, ' to graph')
        se3 = g2o.SE3Quat(kf.Rcw, kf.tcw)
        v_se3 = g2o.VertexSE3Expmap()
        v_se3.set_estimate(se3)
        v_se3.set_id(kf.kid * 2)  # even ids  (use f.kid here!)
        v_se3.set_fixed(kf.kid==0)  # (use f.kid here!)
        opt.add_vertex(v_se3)
        graph_keyframes[kf] = v_se3     
           
        # confirm pose correctness
        #est = v_se3.estimate()
        #assert np.allclose(pose[0:3, 0:3], est.rotation().matrix())
        #assert np.allclose(pose[0:3, 3], est.translation())
        
    # add reference frame vertices to graph
    for kf in keyframes_ref:    
        if kf.is_bad:
            continue 
        #print('adding vertex frame ', f.id, ' to graph')
        se3 = g2o.SE3Quat(kf.Rcw, kf.tcw)
        v_se3 = g2o.VertexSE3Expmap()
        v_se3.set_estimate(se3)
        v_se3.set_id(kf.kid * 2)  # even ids  (use f.kid here!)
        v_se3.set_fixed(True)
        opt.add_vertex(v_se3)
        graph_keyframes[kf] = v_se3             

    graph_edges = {}
    num_edges = 0
    num_bad_edges = 0

    # add point vertices to graph 
    for p in points:
        assert(p is not None)
        if p.is_bad:  # do not consider bad points             
            continue  
        if not any([f in keyframes for f in p.keyframes()]):  
            Printer.orange('point %d without a viewing keyframe in input keyframes!!' %(p.id))
            #Printer.orange('         keyframes: ',p.observations_string())
            continue
        #print('adding vertex point ', p.id,' to graph')
        v_p = g2o.VertexSBAPointXYZ()    
        v_p.set_id(p.id * 2 + 1)  # odd ids
        v_p.set_estimate(p.pt[0:3])
        v_p.set_marginalized(True)
        v_p.set_fixed(fixed_points)
        opt.add_vertex(v_p)
        graph_points[p] = v_p

        # add edges
        for kf, p_idx in p.observations():
            if kf.is_bad:
                continue 
            if kf not in graph_keyframes:
                continue
            if __debug__:      
                p_f = kf.get_point_match(p_idx)
                if p_f != p:
                    print('frame: ', kf.id, ' missing point ', p.id, ' at index p_idx: ', p_idx)                    
                    if p_f is not None:
                        print('p_f:', p_f)
                    print('p:',p)
            assert(kf.get_point_match(p_idx) is p)            
            #print('adding edge between point ', p.id,' and frame ', f.id)
            edge = g2o.EdgeSE3ProjectXYZ()
            edge.set_vertex(0, v_p)
            edge.set_vertex(1, graph_keyframes[kf])
            edge.set_measurement(kf.kpsu[p_idx])
            invSigma2 = Frame.feature_manager.inv_level_sigmas2[kf.octaves[p_idx]]
            edge.set_information(np.eye(2)*invSigma2)
            edge.set_robust_kernel(g2o.RobustKernelHuber(thHuberMono))

            edge.fx = kf.camera.fx 
            edge.fy = kf.camera.fy
            edge.cx = kf.camera.cx
            edge.cy = kf.camera.cy

            opt.add_edge(edge)

            graph_edges[edge] = (p,kf,p_idx) # one has kf.points[p_idx] == p
            num_edges += 1            

    if verbose:
        opt.set_verbose(True)

    if abort_flag.value:
        return -1,0

    # initial optimization 
    opt.initialize_optimization()
    opt.optimize(5)
    
    if not abort_flag.value:
        chi2Mono = 5.991 # chi-square 2 DOFs

        # check inliers observation 
        for edge, edge_data in graph_edges.items(): 
            p = edge_data[0]
            if p.is_bad:
                continue 
            if edge.chi2() > chi2Mono or not edge.is_depth_positive():
                edge.set_level(1)
                num_bad_edges += 1
            edge.set_robust_kernel(None)

        # optimize again without outliers 
        opt.initialize_optimization()
        opt.optimize(rounds)

    # search for final outlier observations and clean map  
    num_bad_observations = 0  # final bad observations
    outliers_data = []
    for edge, edge_data in graph_edges.items(): 
        p, kf, p_idx = edge_data
        if p.is_bad:
            continue         
        assert(kf.get_point_match(p_idx) is p) 
        if edge.chi2() > chi2Mono or not edge.is_depth_positive():         
            num_bad_observations += 1
            outliers_data.append(edge_data)       

    if map_lock is None: 
        map_lock = RLock() # put a fake lock 
        
    with map_lock:      
        # remove outlier observations 
        for d in outliers_data:
            p, kf, p_idx = d
            p_f = kf.get_point_match(p_idx)
            if p_f is not None:
                assert(p_f is p)
                p.remove_observation(kf,p_idx)
                # the following instruction is now included in p.remove_observation()
                #f.remove_point(p)   # it removes multiple point instances (if these are present)   
                #f.remove_point_match(p_idx) # this does not remove multiple point instances, but now there cannot be multiple instances any more

        # put frames back
        for kf in graph_keyframes:
            est = graph_keyframes[kf].estimate()
            #R = est.rotation().matrix()
            #t = est.translation()
            #f.update_pose(poseRt(R, t))
            kf.update_pose(g2o.Isometry3d(est.orientation(), est.position()))

        # put points back
        if not fixed_points:
            for p in graph_points:
                p.update_position(np.array(graph_points[p].estimate()))
                p.update_normal_and_depth(force=True)

    active_edges = num_edges-num_bad_edges
    mean_squared_error = opt.active_chi2()/active_edges

    return mean_squared_error, num_bad_observations/max(num_edges,1)
Example #20
0
def pose_optimization(frame, verbose=False, rounds=10):

    is_ok = True 

    # create g2o optimizer
    opt = g2o.SparseOptimizer()
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCSparseSE3())
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverDenseSE3())    
    block_solver = g2o.BlockSolverSE3(g2o.LinearSolverEigenSE3())       
    solver = g2o.OptimizationAlgorithmLevenberg(block_solver)
    opt.set_algorithm(solver)

    #robust_kernel = g2o.RobustKernelHuber(np.sqrt(5.991))  # chi-square 2 DOFs
    thHuberMono = math.sqrt(5.991);  # chi-square 2 DOFS 

    point_edge_pairs = {}
    num_point_edges = 0

    v_se3 = g2o.VertexSE3Expmap()
    v_se3.set_estimate(g2o.SE3Quat(frame.Rcw, frame.tcw))
    v_se3.set_id(0)  
    v_se3.set_fixed(False)
    opt.add_vertex(v_se3)

    with MapPoint.global_lock:
        # add point vertices to graph 
        for idx, p in enumerate(frame.points):
            if p is None:  
                continue

            # reset outlier flag 
            frame.outliers[idx] = False 

            # add edge
            #print('adding edge between point ', p.id,' and frame ', frame.id)
            edge = g2o.EdgeSE3ProjectXYZOnlyPose()

            edge.set_vertex(0, opt.vertex(0))
            edge.set_measurement(frame.kpsu[idx])
            invSigma2 = Frame.feature_manager.inv_level_sigmas2[frame.octaves[idx]]
            edge.set_information(np.eye(2)*invSigma2)
            edge.set_robust_kernel(g2o.RobustKernelHuber(thHuberMono))

            edge.fx = frame.camera.fx 
            edge.fy = frame.camera.fy
            edge.cx = frame.camera.cx
            edge.cy = frame.camera.cy
            edge.Xw = p.pt[0:3]
            
            opt.add_edge(edge)

            point_edge_pairs[p] = (edge, idx) # one edge per point 
            num_point_edges += 1

    if num_point_edges < 3:
        Printer.red('pose_optimization: not enough correspondences!') 
        is_ok = False 
        return 0, is_ok, 0

    if verbose:
        opt.set_verbose(True)

    # perform 4 optimizations: 
    # after each optimization we classify observation as inlier/outlier;
    # at the next optimization, outliers are not included, but at the end they can be classified as inliers again
    chi2Mono = 5.991 # chi-square 2 DOFs
    num_bad_point_edges = 0

    for it in range(4):
        v_se3.set_estimate(g2o.SE3Quat(frame.Rcw, frame.tcw))
        opt.initialize_optimization()        
        opt.optimize(rounds)

        num_bad_point_edges = 0

        for p, edge_pair in point_edge_pairs.items(): 
            edge, idx = edge_pair
            if frame.outliers[idx]:
                edge.compute_error()

            chi2 = edge.chi2()
            
            if chi2 > chi2Mono:
                frame.outliers[idx] = True 
                edge.set_level(1)
                num_bad_point_edges +=1
            else:
                frame.outliers[idx] = False
                edge.set_level(0)                                

            if it == 2:
                edge.set_robust_kernel(None)

        if len(opt.edges()) < 10:
            Printer.red('pose_optimization: stopped - not enough edges!')   
            is_ok = False           
            break                 
    
    print('pose optimization: available ', num_point_edges, ' points, found ', num_bad_point_edges, ' bad points')     
    if num_point_edges == num_bad_point_edges:
        Printer.red('pose_optimization: all the available correspondences are bad!')           
        is_ok = False      

    # update pose estimation
    if is_ok: 
        est = v_se3.estimate()
        # R = est.rotation().matrix()
        # t = est.translation()
        # frame.update_pose(poseRt(R, t))
        frame.update_pose(g2o.Isometry3d(est.orientation(), est.position()))

    # since we have only one frame here, each edge corresponds to a single distinct point
    num_valid_points = num_point_edges - num_bad_point_edges   
    
    mean_squared_error = opt.active_chi2()/max(num_valid_points,1)

    return mean_squared_error, is_ok, num_valid_points
Example #21
0
    def track(self, img, frame_id, timestamp=None):
        Printer.cyan('@tracking')
        time_start = time.time()

        # check image size is coherent with camera params
        print("img.shape: ", img.shape)
        print("camera ", self.camera.height, " x ", self.camera.width)
        assert img.shape[0:2] == (self.camera.height, self.camera.width)
        if timestamp is not None:
            print('timestamp: ', timestamp)

        self.timer_main_track.start()

        # build current frame
        self.timer_frame.start()
        f_cur = Frame(img, self.camera, timestamp=timestamp)
        self.f_cur = f_cur
        print("frame: ", f_cur.id)
        self.timer_frame.refresh()

        # reset indexes of matches
        self.idxs_ref = []
        self.idxs_cur = []

        if self.state == SlamState.NO_IMAGES_YET:
            # push first frame in the inizializer
            self.intializer.init(f_cur)
            self.state = SlamState.NOT_INITIALIZED
            return  # EXIT (jump to second frame)

        if self.state == SlamState.NOT_INITIALIZED:
            # try to inizialize
            initializer_output, intializer_is_ok = self.intializer.initialize(
                f_cur, img)
            if intializer_is_ok:
                kf_ref = initializer_output.kf_ref
                kf_cur = initializer_output.kf_cur
                # add the two initialized frames in the map
                self.map.add_frame(
                    kf_ref)  # add first frame in map and update its frame id
                self.map.add_frame(
                    kf_cur)  # add second frame in map and update its frame id
                # add the two initialized frames as keyframes in the map
                self.map.add_keyframe(
                    kf_ref)  # add first keyframe in map and update its kid
                self.map.add_keyframe(
                    kf_cur)  # add second keyframe in map and update its kid
                kf_ref.init_observations()
                kf_cur.init_observations()
                # add points in map
                new_pts_count, _, _ = self.map.add_points(
                    initializer_output.pts,
                    None,
                    kf_cur,
                    kf_ref,
                    initializer_output.idxs_cur,
                    initializer_output.idxs_ref,
                    img,
                    do_check=False)
                Printer.green("map: initialized %d new points" %
                              (new_pts_count))
                # update covisibility graph connections
                kf_ref.update_connections()
                kf_cur.update_connections()

                # update tracking info
                self.f_cur = kf_cur
                self.f_cur.kf_ref = kf_ref
                self.kf_ref = kf_cur  # set reference keyframe
                self.kf_last = kf_cur  # set last added keyframe
                self.map.local_map.update(self.kf_ref)
                self.state = SlamState.OK

                self.update_tracking_history()
                self.motion_model.update_pose(kf_cur.timestamp,
                                              kf_cur.position,
                                              kf_cur.quaternion)
                self.motion_model.is_ok = False  # after initialization you cannot use motion model for next frame pose prediction (time ids of initialized poses may not be consecutive)

                self.intializer.reset()

                if kUseDynamicDesDistanceTh:
                    self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                        kf_ref, kf_cur, initializer_output.idxs_ref,
                        initializer_output.idxs_cur)
            return  # EXIT (jump to next frame)

        # get previous frame in map as reference
        f_ref = self.map.get_frame(-1)
        #f_ref_2 = self.map.get_frame(-2)
        self.f_ref = f_ref

        # add current frame f_cur to map
        self.map.add_frame(f_cur)
        self.f_cur.kf_ref = self.kf_ref

        # reset pose state flag
        self.pose_is_ok = False

        with self.map.update_lock:
            # check for map point replacements in previous frame f_ref (some points might have been replaced by local mapping during point fusion)
            self.f_ref.check_replaced_map_points()

            if kUseDynamicDesDistanceTh:
                print('descriptor_distance_sigma: ',
                      self.descriptor_distance_sigma)
                self.local_mapping.descriptor_distance_sigma = self.descriptor_distance_sigma

            # udpdate (velocity) old motion model                                             # c1=ref_ref, c2=ref, c3=cur;  c=cur, r=ref
            #self.velocity = np.dot(f_ref.pose, inv_T(f_ref_2.pose))                          # Tc2c1 = Tc2w * Twc1   (predicted Tcr)
            #self.predicted_pose = g2o.Isometry3d(np.dot(self.velocity, f_ref.pose))          # Tc3w = Tc2c1 * Tc2w   (predicted Tcw)

            # set intial guess for current pose optimization
            if kUseMotionModel and self.motion_model.is_ok:
                print('using motion model for next pose prediction')
                # update f_ref pose according to its reference keyframe (the pose of the reference keyframe could be updated by local mapping)
                self.f_ref.update_pose(
                    self.tracking_history.relative_frame_poses[-1] *
                    self.f_ref.kf_ref.isometry3d)
                # predict pose by using motion model
                self.predicted_pose, _ = self.motion_model.predict_pose(
                    timestamp, self.f_ref.position, self.f_ref.orientation)
                f_cur.update_pose(self.predicted_pose)
            else:
                print('setting f_cur.pose <-- f_ref.pose')
                # use reference frame pose as initial guess
                f_cur.update_pose(f_ref.pose)

            # track camera motion from f_ref to f_cur
            self.track_previous_frame(f_ref, f_cur)

            if not self.pose_is_ok:
                # if previous track didn't go well then track the camera motion from kf_ref to f_cur
                self.track_keyframe(self.kf_ref, f_cur)

            # now, having a better estimate of f_cur pose, we can find more map point matches:
            # find matches between {local map points} (points in the local map) and {unmatched keypoints of f_cur}
            if self.pose_is_ok:
                self.track_local_map(f_cur)

        # end block {with self.map.update_lock:}

        # TODO: add relocalization

        # HACK: since local mapping is not fast enough in python (and tracking is not in real-time) => give local mapping more time to process stuff
        self.wait_for_local_mapping(
        )  # N.B.: this must be outside the `with self.map.update_lock:` block

        with self.map.update_lock:

            # update slam state
            if self.pose_is_ok:
                self.state = SlamState.OK
            else:
                self.state = SlamState.LOST
                Printer.red('tracking failure')

            # update motion model state
            self.motion_model.is_ok = self.pose_is_ok

            if self.pose_is_ok:  # if tracking was successful

                # update motion model
                self.motion_model.update_pose(timestamp, f_cur.position,
                                              f_cur.quaternion)

                f_cur.clean_vo_map_points()

                # do we need a new KeyFrame?
                need_new_kf = self.need_new_keyframe(f_cur)

                if need_new_kf:
                    Printer.green('adding new KF with frame id % d: ' %
                                  (f_cur.id))
                    if kLogKFinfoToFile:
                        self.kf_info_logger.info(
                            'adding new KF with frame id % d: ' % (f_cur.id))
                    kf_new = KeyFrame(f_cur, img)
                    self.kf_last = kf_new
                    self.kf_ref = kf_new
                    f_cur.kf_ref = kf_new

                    self.local_mapping.push_keyframe(kf_new)
                    if not kLocalMappingOnSeparateThread:
                        self.local_mapping.do_local_mapping()
                else:
                    Printer.yellow('NOT KF')

                # From ORBSLAM2:
                # Clean outliers once keyframe generation has been managed:
                # we allow points with high innovation (considered outliers by the Huber Function)
                # pass to the new keyframe, so that bundle adjustment will finally decide
                # if they are outliers or not. We don't want next frame to estimate its position
                # with those points so we discard them in the frame.
                f_cur.clean_outlier_map_points()

            if self.f_cur.kf_ref is None:
                self.f_cur.kf_ref = self.kf_ref

            self.update_tracking_history(
            )  # must stay after having updated slam state (self.state)

            Printer.green("map: %d points, %d keyframes" %
                          (self.map.num_points(), self.map.num_keyframes()))
            #self.update_history()

            self.timer_main_track.refresh()

            duration = time.time() - time_start
            print('tracking duration: ', duration)
Example #22
0
def search_and_fuse(points, keyframe, 
                    max_reproj_distance=Parameters.kMaxReprojectionDistanceFuse,
                    max_descriptor_distance = 0.5*Parameters.kMaxDescriptorDistance,
                    ratio_test=Parameters.kMatchRatioTestMap):
    #max_descriptor_distance = 0.5 * Parameters.kMaxDescriptorDistance 
    
    fused_pts_count = 0
    Ow = keyframe.Ow 
    if len(points) == 0:
        Printer.red('search_and_fuse - no points')        
        return 
        
    # get all matched points of keyframe 
    good_pts_idxs = np.flatnonzero(points!=None) 
    good_pts = points[good_pts_idxs] 
     
    if len(good_pts_idxs) == 0:
        Printer.red('search_and_fuse - no matched points')
        return
    
    # check if points are visible 
    good_pts_visible, good_projs, good_depths, good_dists = keyframe.are_visible(good_pts)
    
    if len(good_pts_visible) == 0:
        Printer.red('search_and_fuse - no visible points')
        return    
    
    predicted_levels = predict_detection_levels(good_pts, good_dists) 
    kp_scale_factors = Frame.feature_manager.scale_factors[predicted_levels]              
    radiuses = max_reproj_distance * kp_scale_factors     
    kd_idxs = keyframe.kd.query_ball_point(good_projs, radiuses)    

    #for i, p in enumerate(points):
    for i,p,j in zip(good_pts_idxs,good_pts,range(len(good_pts))):            
                
        if not good_pts_visible[j] or p.is_bad:     # point not visible in frame or point is bad 
            #print('p[%d] visible: %d, bad: %d' % (i, int(good_pts_visible[j]), int(p.is_bad))) 
            continue  
                  
        if p.is_in_keyframe(keyframe):    # we already matched this map point to this keyframe
            #print('p[%d] already in keyframe' % (i)) 
            continue
                
        # predicted_level = p.predict_detection_level(good_dists[j])         
        # kp_scale_factor = Frame.feature_manager.scale_factors[predicted_level]              
        # radius = max_reproj_distance * kp_scale_factor     
        predicted_level = predicted_levels[j]
        
        #print('p[%d] radius: %f' % (i,radius))         
            
        best_dist = math.inf 
        best_dist2 = math.inf
        best_level = -1 
        best_level2 = -1               
        best_kd_idx = -1        
            
        # find closest keypoints of frame        
        proj = good_projs[j]
        #for kd_idx in keyframe.kd.query_ball_point(proj, radius):  
        for kd_idx in kd_idxs[j]:             
                
            # check detection level     
            kp_level = keyframe.octaves[kd_idx]    
            if (kp_level<predicted_level-1) or (kp_level>predicted_level):   
                #print('p[%d] wrong predicted level **********************************' % (i))                       
                continue
        
            # check the reprojection error     
            kp = keyframe.kpsu[kd_idx]
            invSigma2 = Frame.feature_manager.inv_level_sigmas2[kp_level]                
            err = proj - kp       
            chi2 = np.inner(err,err)*invSigma2           
            if chi2 > Parameters.kChi2Mono: # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                #print('p[%d] big reproj err %f **********************************' % (i,chi2))
                continue                  
                            
            descriptor_dist = p.min_des_distance(keyframe.des[kd_idx])
            #print('p[%d] descriptor_dist %f **********************************' % (i,descriptor_dist))            
            
            #if descriptor_dist < max_descriptor_distance and descriptor_dist < best_dist:     
            if descriptor_dist < best_dist:                                      
                best_dist2 = best_dist
                best_level2 = best_level
                best_dist = descriptor_dist
                best_level = kp_level
                best_kd_idx = kd_idx   
            else: 
                if descriptor_dist < best_dist2:  # N.O.
                    best_dist2 = descriptor_dist       
                    best_level2 = kp_level                                   
                                                            
        #if best_kd_idx > -1 and best_dist < max_descriptor_distance:
        if best_dist < max_descriptor_distance:         
            # apply match distance ratio test only if the best and second are in the same scale level 
            if (best_level2 == best_level) and (best_dist > best_dist2 * ratio_test):  # N.O.
                #print('p[%d] best_dist > best_dist2 * ratio_test **********************************' % (i))
                continue                
            p_keyframe = keyframe.get_point_match(best_kd_idx)
            # if there is already a map point replace it otherwise add a new point
            if p_keyframe is not None:
                if not p_keyframe.is_bad:
                    if p_keyframe.num_observations > p.num_observations:
                        p.replace_with(p_keyframe)
                    else:
                        p_keyframe.replace_with(p)                  
            else:
                p.add_observation(keyframe, best_kd_idx) 
                #p.update_info()    # done outside!
            fused_pts_count += 1                  
    return fused_pts_count     
Example #23
0
    def track_previous_frame(self, f_ref, f_cur):
        print('>>>> tracking previous frame ...')
        is_search_frame_by_projection_failure = False
        use_search_frame_by_projection = self.motion_model.is_ok and kUseSearchFrameByProjection and kUseMotionModel

        if use_search_frame_by_projection:
            # search frame by projection: match map points observed in f_ref with keypoints of f_cur
            print('search frame by projection')
            search_radius = Parameters.kMaxReprojectionDistanceFrame
            f_cur.reset_points()
            self.timer_seach_frame_proj.start()
            idxs_ref, idxs_cur, num_found_map_pts = search_frame_by_projection(
                f_ref,
                f_cur,
                max_reproj_distance=search_radius,
                max_descriptor_distance=self.descriptor_distance_sigma)
            self.timer_seach_frame_proj.refresh()
            self.num_matched_kps = len(idxs_cur)
            print("# matched map points in prev frame: %d " %
                  self.num_matched_kps)

            # if not enough map point matches consider a larger search radius
            if self.num_matched_kps < Parameters.kMinNumMatchedFeaturesSearchFrameByProjection:
                f_cur.remove_frame_views(idxs_cur)
                f_cur.reset_points()
                idxs_ref, idxs_cur, num_found_map_pts = search_frame_by_projection(
                    f_ref,
                    f_cur,
                    max_reproj_distance=2 * search_radius,
                    max_descriptor_distance=0.5 *
                    self.descriptor_distance_sigma)
                self.num_matched_kps = len(idxs_cur)
                Printer.orange(
                    "# matched map points in prev frame (wider search): %d " %
                    self.num_matched_kps)

            if kDebugDrawMatches and True:
                img_matches = draw_feature_matches(f_ref.img,
                                                   f_cur.img,
                                                   f_ref.kps[idxs_ref],
                                                   f_cur.kps[idxs_cur],
                                                   f_ref.sizes[idxs_ref],
                                                   f_cur.sizes[idxs_cur],
                                                   horizontal=False)
                cv2.imshow('tracking frame by projection - matches',
                           img_matches)
                cv2.waitKey(1)

            if self.num_matched_kps < Parameters.kMinNumMatchedFeaturesSearchFrameByProjection:
                f_cur.remove_frame_views(idxs_cur)
                f_cur.reset_points()
                is_search_frame_by_projection_failure = True
                Printer.red(
                    'Not enough matches in search frame by projection: ',
                    self.num_matched_kps)
            else:
                # search frame by projection was successful
                if kUseDynamicDesDistanceTh:
                    self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                        f_ref, f_cur, idxs_ref, idxs_cur)

                # store tracking info (for possible reuse)
                self.idxs_ref = idxs_ref
                self.idxs_cur = idxs_cur

                # f_cur pose optimization 1:
                # here, we use f_cur pose as first guess and exploit the matched map point of f_ref
                self.pose_optimization(f_cur, 'proj-frame-frame')
                # update matched map points; discard outliers detected in last pose optimization
                num_matched_points = f_cur.clean_outlier_map_points()
                print('     # num_matched_map_points: %d' %
                      (self.num_matched_map_points))
                #print('     # matched points: %d' % (num_matched_points) )

                if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackFrame:
                    Printer.red(
                        'failure in tracking previous frame, # matched map points: ',
                        self.num_matched_map_points)
                    self.pose_is_ok = False

        if not use_search_frame_by_projection or is_search_frame_by_projection_failure:
            self.track_reference_frame(f_ref, f_cur, 'match-frame-frame')
Example #24
0
    def add_points(self,
                   points3d,
                   mask_pts3d,
                   kf1,
                   kf2,
                   idxs1,
                   idxs2,
                   img1,
                   do_check=True,
                   cos_max_parallax=Parameters.kCosMaxParallax):
        with self._lock:
            assert (kf1.is_keyframe
                    and kf2.is_keyframe)  # kf1 and kf2 must be keyframes
            assert (points3d.shape[0] == len(idxs1))
            assert (len(idxs2) == len(idxs1))

            added_points = []
            out_mask_pts3d = np.full(points3d.shape[0], False, dtype=bool)
            if mask_pts3d is None:
                mask_pts3d = np.full(points3d.shape[0], True, dtype=bool)

            ratio_scale_consistency = Parameters.kScaleConsistencyFactor * Frame.feature_manager.scale_factor

            if do_check:
                # project points
                uvs1, depths1 = kf1.project_points(points3d)
                bad_depths1 = depths1 <= 0
                uvs2, depths2 = kf2.project_points(points3d)
                bad_depths2 = depths2 <= 0

                # compute back-projected rays (unit vectors)
                rays1 = np.dot(kf1.Rwc, add_ones(kf1.kpsn[idxs1]).T).T
                norm_rays1 = np.linalg.norm(rays1, axis=-1, keepdims=True)
                rays1 /= norm_rays1
                rays2 = np.dot(kf2.Rwc, add_ones(kf2.kpsn[idxs2]).T).T
                norm_rays2 = np.linalg.norm(rays2, axis=-1, keepdims=True)
                rays2 /= norm_rays2

                # compute dot products of rays
                cos_parallaxs = np.sum(rays1 * rays2, axis=1)
                bad_cos_parallaxs = np.logical_or(
                    cos_parallaxs < 0, cos_parallaxs > cos_max_parallax)

                # compute reprojection errors
                errs1 = uvs1 - kf1.kpsu[idxs1]
                errs1_sqr = np.sum(errs1 * errs1,
                                   axis=1)  # squared reprojection errors
                kps1_levels = kf1.octaves[idxs1]
                invSigmas2_1 = Frame.feature_manager.inv_level_sigmas2[
                    kps1_levels]
                chis2_1 = errs1_sqr * invSigmas2_1  # chi square
                bad_chis2_1 = chis2_1 > Parameters.kChi2Mono  # chi-square 2 DOFs  (Hartley Zisserman pg 119)

                errs2 = uvs2 - kf2.kpsu[idxs2]
                errs2_sqr = np.sum(errs2 * errs2,
                                   axis=1)  # squared reprojection errors
                kps2_levels = kf2.octaves[idxs2]
                invSigmas2_2 = Frame.feature_manager.inv_level_sigmas2[
                    kps2_levels]
                chis2_2 = errs2_sqr * invSigmas2_2  # chi square
                bad_chis2_2 = chis2_2 > Parameters.kChi2Mono  # chi-square 2 DOFs  (Hartley Zisserman pg 119)

                # scale consistency
                scale_factors_x_depths1 = Frame.feature_manager.scale_factors[
                    kps1_levels] * depths1
                scale_factors_x_depths1_x_ratio_scale_consistency = scale_factors_x_depths1 * ratio_scale_consistency
                scale_factors_x_depths2 = Frame.feature_manager.scale_factors[
                    kps2_levels] * depths2
                scale_factors_x_depths2_x_ratio_scale_consistency = scale_factors_x_depths2 * ratio_scale_consistency
                bad_scale_consistency = np.logical_or(
                    (scale_factors_x_depths1 >
                     scale_factors_x_depths2_x_ratio_scale_consistency),
                    (scale_factors_x_depths2 >
                     scale_factors_x_depths1_x_ratio_scale_consistency))

                # combine all checks
                bad_points = bad_cos_parallaxs | bad_depths1 | bad_depths2 | bad_chis2_1 | bad_chis2_2 | bad_scale_consistency

            img_coords = np.rint(kf1.kps[idxs1]).astype(
                np.intp)  # image keypoints coordinates
            # build img patches coordinates
            delta = Parameters.kColorPatchDelta
            patch_extension = 1 + 2 * delta  # patch_extension x patch_extension
            img_pts_start = img_coords - delta
            img_pts_end = img_coords + delta
            img_ranges = np.linspace(img_pts_start,
                                     img_pts_end,
                                     patch_extension,
                                     dtype=np.intp)[:, :].T

            def img_range_elem(ranges, i):
                return ranges[:, i]

            for i, p in enumerate(points3d):
                if not mask_pts3d[i]:
                    #print('p[%d] not good' % i)
                    continue

                idx1_i = idxs1[i]
                idx2_i = idxs2[i]

                # perform different required checks before adding the point
                if do_check:
                    if bad_points[i]:
                        continue

                    # check parallax is large enough (this is going to filter out all points when the inter-frame motion is almost zero)
                    # ray1 = np.dot(kf1.Rwc, add_ones_1D(kf1.kpsn[idx1_i]))
                    # ray2 = np.dot(kf2.Rwc, add_ones_1D(kf2.kpsn[idx2_i]))
                    # cos_parallax = ray1.dot(ray2) / (np.linalg.norm(ray1) * np.linalg.norm(ray2))
                    # if cos_parallax < 0 or cos_parallax > cos_max_parallax:
                    #     #print('p[',i,']: ',p,' not enough parallax: ', cos_parallaxs[i])
                    #     continue

                    # check points are visible on f1
                    # uv1, depth1 = kf1.project_point(p)
                    # is_visible1  = kf1.is_in_image(uv1, depth1) # N.B.: is_in_image() check is redundant since we check the reproj errror
                    # if not is_visible1:
                    #     continue

                    # check points are visible on f2
                    # uv2, depth2 = kf2.project_point(p)
                    # is_visible2  = kf2.is_in_image(uv2, depth2)  # N.B.: is_in_image() check is redundant since we check the reproj errror
                    # if not is_visible2:
                    #     continue

                    # check reprojection error on f1
                    #kp1_level = kf1.octaves[idx1_i]
                    #invSigma2_1 = Frame.feature_manager.inv_level_sigmas2[kp1_level]
                    # err1 = uvs1[i] - kf1.kpsu[idx1_i]
                    # chi2_1 = np.inner(err1,err1)*invSigma2_1
                    # if chi2_1 > Parameters.kChi2Mono: # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                    #     continue

                    # check reprojection error on f2
                    # kp2_level = kf2.octaves[idx2_i]
                    # invSigma2_2 = Frame.feature_manager.inv_level_sigmas2[kp2_level]
                    # err2 = uvs2[i] - kf2.kpsu[idx2_i]
                    # chi2_2 = np.inner(err2,err2)*invSigma2_2
                    # if chi2_2 > Parameters.kChi2Mono: # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                    #     continue

                    #check scale consistency
                    # scale_factor_x_depth1 =  Frame.feature_manager.scale_factors[kps1_levels[i]] * depths1[i]
                    # scale_factor_x_depth2 =  Frame.feature_manager.scale_factors[kps2_levels[i]] * depths2[i]
                    # if (scale_factor_x_depth1 > scale_factor_x_depth2*ratio_scale_consistency) or \
                    #    (scale_factor_x_depth2 > scale_factor_x_depth1*ratio_scale_consistency):
                    #     continue

                # get the color of the point
                try:
                    #color = img1[int(round(kf1.kps[idx1_i, 1])), int(round(kf1.kps[idx1_i, 0]))]
                    #img_pt = np.rint(kf1.kps[idx1_i]).astype(np.int)
                    # color at the point
                    #color = img1[img_pt[1],img_pt[0]]
                    # buils color patch
                    #color_patch = img1[img_pt[1]-delta:img_pt[1]+delta,img_pt[0]-delta:img_pt[0]+delta]
                    #color = color_patch.mean(axis=0).mean(axis=0)  # compute the mean color in the patch

                    # average color in a (1+2*delta) x (1+2*delta) patch
                    #pt_start = img_pts_start[i]
                    #pt_end   = img_pts_end[i]
                    #color_patch = img1[pt_start[1]:pt_end[1],pt_start[0]:pt_end[0]]

                    # average color in a (1+2*delta) x (1+2*delta) patch
                    img_range = img_range_elem(img_ranges, i)
                    color_patch = img1[img_range[1][:, np.newaxis],
                                       img_range[0]]
                    #print('color_patch.shape:',color_patch.shape)

                    color = cv2.mean(
                        color_patch)[:3]  # compute the mean color in the patch
                except IndexError:
                    Printer.orange('color out of range')
                    color = (255, 0, 0)

                # add the point to this map
                mp = MapPoint(p[0:3], color, kf2, idx2_i)
                self.add_point(mp)  # add point to this map
                mp.add_observation(kf1, idx1_i)
                mp.add_observation(kf2, idx2_i)
                mp.update_info()
                out_mask_pts3d[i] = True
                added_points.append(mp)
            return len(added_points), out_mask_pts3d, added_points
def search_frame_for_triangulation_test(f1, f2, img2, img1 = None):   

    idxs2_out = []
    idxs1_out = []
    lines_out = [] 
    num_found_matches = 0
    img2_epi = None     

    if __debug__:
        timer = Timer()
        timer.start()

    O1w = f1.Ow
    O2w = f2.Ow 
    # compute epipoles
    e1,_ = f1.project_point(O2w)  # in first frame  
    e2,_ = f2.project_point(O1w)  # in second frame  
    #print('e1: ', e1)
    #print('e2: ', e2)    
    
    baseline = np.linalg.norm(O1w-O2w) 
        
    # if the translation is too small we cannot triangulate 
    # if False: 
    #     if baseline < Parameters.kMinTraslation:  # we assume the Inializer has been used for building the first map 
    #         Printer.red("search for triangulation: impossible with almost zero translation!")
    #         return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT
    # else:
    medianDepthF2 = f2.compute_points_median_depth()
    ratioBaselineDepth = baseline/medianDepthF2
    if ratioBaselineDepth < Parameters.kMinRatioBaselineDepth:  
        Printer.red("search for triangulation: impossible with too low ratioBaselineDepth!")
        return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT           

    # compute the fundamental matrix between the two frames by using their estimated poses 
    F12, H21 = computeF12(f1, f2)

    idxs1 = []
    for i, p in enumerate(f1.get_points()): 
        if p is None:  # we consider just unmatched keypoints 
            kp = f1.kpsu[i]
            scale_factor = Frame.feature_manager.scale_factors[f1.octaves[i]]
            # discard points which are too close to the epipole 
            if np.linalg.norm(kp-e1) < Parameters.kMinDistanceFromEpipole * scale_factor:             
                continue    
            idxs1.append(i)      
    kpsu1 = f1.kpsu[idxs1]
    
    if __debug__:
        print('search_frame_for_triangulation - timer1: ', timer.elapsed())  
        timer.start()        

    # compute epipolar lines in second image 
    lines2 = cv2.computeCorrespondEpilines(kpsu1.reshape(-1,1,2), 2, F12)  
    lines2 = lines2.reshape(-1,3)
    
    xs2_inf = np.dot(H21, add_ones(kpsu1).T).T # x2inf = H21 * x1  where x2inf is corresponding point according to infinite homography [Hartley Zisserman pag 339]
    xs2_inf = xs2_inf[:, 0:2] / xs2_inf[:, 2:]  
    line_edges = [ epiline_to_end_points(line, e2, x2_inf, f2.width) for line, x2_inf in zip(lines2, xs2_inf) ] 
    #print("line_edges: ", line_edges)

    if __debug__:
        print('search_frame_for_triangulation - timer3: ', timer.elapsed())
        assert(len(line_edges) == len(idxs1))    
        timer.start()

    len_des2 = len(f2.des)
    flag_match = np.full(len_des2, False, dtype=bool)
    dist_match = np.zeros(len_des2)
    index_match = np.full(len_des2, 0, dtype=int)
    for i, idx in enumerate(idxs1):   # N.B.: a point in f1 can be matched to more than one point in f2, we avoid this by caching matches with f2 points
        f2_idx, dist = find_matches_along_line(f2, e2, line_edges[i], f1.des[idx])
        if f2_idx > -1:
            if not flag_match[f2_idx]: # new match
                flag_match[f2_idx] = True 
                dist_match[f2_idx] = dist 
                idxs2_out.append(f2_idx)
                idxs1_out.append(idx)
                index_match[f2_idx] = len(idxs2_out)-1                
                assert(f2.get_point_match(f2_idx) is None)            
                assert(f1.get_point_match(idx) is None)
                if __debug__:
                    lines_out.append(line_edges[i])
            else: # already matched
                if dist < dist_match[f2_idx]:  # update in case of a smaller distance 
                    dist_match[f2_idx] = dist 
                    index = index_match[f2_idx]  
                    idxs1_out[index] = idx    
                    if __debug__:
                        lines_out[index] = line_edges[i]                              
 
    num_found_matches = len(idxs1_out)
    assert(len(idxs1_out) == len(idxs2_out)) 

    if __debug__:   
        #print("num found matches: ", num_found_matches)
        if True: 
            kpsu2 = f2.kpsu[idxs2_out]
            img2_epi = draw_lines(img2.copy(), lines_out, kpsu2)
            #cv2.imshow("epipolar lines",img2_epi)
            #cv2.waitKey(0)

    if __debug__:
        print('search_frame_for_triangulation - timer4: ', timer.elapsed())

    return idxs1_out, idxs2_out, num_found_matches, img2_epi
Example #26
0
            if display2d is not None:
                getchar()
            else:
                key_cv = cv2.waitKey(
                    0) & 0xFF  # useful when drawing stuff for debugging

        if do_step and img_id > 1:
            # stop at each frame
            if display2d is not None:
                getchar()
            else:
                key_cv = cv2.waitKey(0) & 0xFF

        if key == 'd' or (key_cv == ord('d')):
            do_step = not do_step
            Printer.green('do step: ', do_step)

        if key == 'q' or (key_cv == ord('q')):
            if display2d is not None:
                display2d.quit()
            if viewer3D is not None:
                viewer3D.quit()
            if matched_points_plt is not None:
                matched_points_plt.quit()
            break

        if viewer3D is not None:
            is_paused = not viewer3D.is_paused()

    slam.quit()
Example #27
0
    def initialize(self, f_cur, img_cur):

        if self.num_failures > kNumOfFailuresAfterWichNumMinTriangulatedPointsIsHalved:
            self.num_min_triangulated_points = 0.5 * Parameters.kInitializerNumMinTriangulatedPoints
            self.num_failures = 0
            Printer.orange(
                'Initializer: halved min num triangulated features to ',
                self.num_min_triangulated_points)

        # prepare the output
        out = InitializerOutput()
        is_ok = False

        #print('num frames: ', len(self.frames))

        # if too many frames have passed, move the current idx_f_ref forward
        # this is just one very simple policy that can be used
        if self.f_ref is not None:
            if f_cur.id - self.f_ref.id > kMaxIdDistBetweenIntializingFrames:
                self.f_ref = self.frames[-1]  # take last frame in the buffer
                #self.idx_f_ref = len(self.frames)-1  # take last frame in the buffer
                #self.idx_f_ref = self.frames.index(self.f_ref)  # since we are using a deque, the code of the previous commented line is not valid anymore
                #print('*** idx_f_ref:',self.idx_f_ref)
        #self.f_ref = self.frames[self.idx_f_ref]
        f_ref = self.f_ref
        #print('ref fid: ',self.f_ref.id,', curr fid: ', f_cur.id, ', idxs_ref: ', self.idxs_ref)

        # append current frame
        self.frames.append(f_cur)

        # if the current frames do no have enough features exit
        if len(f_ref.kps) < self.num_min_features or len(
                f_cur.kps) < self.num_min_features:
            Printer.red('Inializer: ko - not enough features!')
            self.num_failures += 1
            return out, is_ok

        # find keypoint matches
        idxs_cur, idxs_ref = match_frames(f_cur, f_ref,
                                          kFeatureMatchRatioTestInitializer)

        print('|------------')
        #print('deque ids: ', [f.id for f in self.frames])
        print('initializing frames ', f_cur.id, ', ', f_ref.id)
        print("# keypoint matches: ", len(idxs_cur))

        Trc = self.estimatePose(f_ref.kpsn[idxs_ref], f_cur.kpsn[idxs_cur])
        Tcr = inv_T(Trc)  # Tcr w.r.t. ref frame
        f_ref.update_pose(np.eye(4))
        f_cur.update_pose(Tcr)

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        print('# keypoint inliers: ', self.num_inliers)
        idx_cur_inliers = idxs_cur[mask_idxs]
        idx_ref_inliers = idxs_ref[mask_idxs]

        # create a temp map for initializing
        map = Map()
        f_ref.reset_points()
        f_cur.reset_points()

        #map.add_frame(f_ref)
        #map.add_frame(f_cur)

        kf_ref = KeyFrame(f_ref)
        kf_cur = KeyFrame(f_cur, img_cur)
        map.add_keyframe(kf_ref)
        map.add_keyframe(kf_cur)

        pts3d, mask_pts3d = triangulate_normalized_points(
            kf_cur.Tcw, kf_ref.Tcw, kf_cur.kpsn[idx_cur_inliers],
            kf_ref.kpsn[idx_ref_inliers])

        new_pts_count, mask_points, _ = map.add_points(
            pts3d,
            mask_pts3d,
            kf_cur,
            kf_ref,
            idx_cur_inliers,
            idx_ref_inliers,
            img_cur,
            do_check=True,
            cos_max_parallax=Parameters.kCosMaxParallaxInitializer)
        print("# triangulated points: ", new_pts_count)

        if new_pts_count > self.num_min_triangulated_points:
            err = map.optimize(verbose=False,
                               rounds=20,
                               use_robust_kernel=True)
            print("init optimization error^2: %f" % err)

            num_map_points = len(map.points)
            print("# map points:   %d" % num_map_points)
            is_ok = num_map_points > self.num_min_triangulated_points

            out.pts = pts3d[mask_points]
            out.kf_cur = kf_cur
            out.idxs_cur = idx_cur_inliers[mask_points]
            out.kf_ref = kf_ref
            out.idxs_ref = idx_ref_inliers[mask_points]

            # set scene median depth to equal desired_median_depth'
            desired_median_depth = Parameters.kInitializerDesiredMedianDepth
            median_depth = kf_cur.compute_points_median_depth(out.pts)
            depth_scale = desired_median_depth / median_depth
            print('forcing current median depth ', median_depth, ' to ',
                  desired_median_depth)

            out.pts[:, :3] = out.pts[:, :3] * depth_scale  # scale points
            tcw = kf_cur.tcw * depth_scale  # scale initial baseline
            kf_cur.update_translation(tcw)

        map.delete()

        if is_ok:
            Printer.green('Inializer: ok!')
        else:
            self.num_failures += 1
            Printer.red('Inializer: ko!')
        print('|------------')
        return out, is_ok
Example #28
0
from enum import Enum

from scipy.spatial import cKDTree
#from pykdtree.kdtree import KDTree # slower!

from utils_sys import Printer, import_from
from utils_geom import add_ones, s1_diff_deg, s1_dist_deg, l2_distances

ORBextractor = import_from('orbslam2_features', 'ORBextractor')  
      
kPySlamUtilsAvailable= True      
try:
    import pyslam_utils
except:
    kPySlamUtilsAvailable = False 
    Printer.orange('WARNING: cannot import pyslam_utils')
      
from parameters import Parameters 



# convert matrix of pts into list of cv2 keypoints
def convert_pts_to_keypoints(pts, size=1): 
    kps = []
    if pts is not None: 
        if pts.ndim > 2:
            # convert matrix [Nx1x2] of pts into list of keypoints  
            kps = [ cv2.KeyPoint(p[0][0], p[0][1], _size=size) for p in pts ]          
        else: 
            # convert matrix [Nx2] of pts into list of keypoints  
            kps = [ cv2.KeyPoint(p[0], p[1], _size=size) for p in pts ]                      
Example #29
0
def extract_deep_features(sift_wrapper, sess, img_path, qtz=True):
    img = cv2.imread(img_path)
    if img is None:
        Printer.red('cannot find img: ', img_path)
        sys.exit(0)
    gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # detect SIFT keypoints.
    start = time.time()
    _, cv_kpts = sift_wrapper.detect(gray_img)
    end = time.time()
    print('Time cost in keypoint detection', end - start)

    start = time.time()
    sift_wrapper.build_pyramid(gray_img)
    end = time.time()
    print('Time cost in scale space construction', end - start)

    start = time.time()
    all_patches = sift_wrapper.get_patches(cv_kpts)
    end = time.time()
    print('Time cost in patch cropping', end - start)

    num_patch = all_patches.shape[0]

    if num_patch % FLAGS.batch_size > 0:
        loop_num = int(np.floor(float(num_patch) / float(FLAGS.batch_size)))
    else:
        loop_num = int(num_patch / FLAGS.batch_size - 1)

    def _worker(patch_queue, sess, all_feat):
        """The worker thread."""
        while True:
            patch_data = patch_queue.get()
            if patch_data is None:
                return
            feat = sess.run("squeeze_1:0", feed_dict={"input:0": np.expand_dims(patch_data, -1)})
            all_feat.append(feat)
            #patch_queue.task_done()

    all_feat = []
    patch_queue = Queue()
    worker_thread = Thread(target=_worker, args=(patch_queue, sess, all_feat))
    worker_thread.daemon = True
    worker_thread.start()

    start = time.time()

    # enqueue
    for i in range(loop_num + 1):
        if i < loop_num:
            patch_queue.put(all_patches[i * FLAGS.batch_size: (i + 1) * FLAGS.batch_size])
        else:
            patch_queue.put(all_patches[i * FLAGS.batch_size:])
    # poison pill
    patch_queue.put(None)
    # wait for extraction.
    worker_thread.join()

    end = time.time()
    print('Time cost in feature extraction', end - start)

    all_feat = np.concatenate(all_feat, axis=0)
    # quantize output features.
    all_feat = (all_feat * 128 + 128).astype(np.uint8) if qtz else all_feat
    return all_feat, cv_kpts, img
Example #30
0
def search_frame_for_triangulation(kf1, kf2, idxs1=None, idxs2=None, 
                                   max_descriptor_distance=0.5*Parameters.kMaxDescriptorDistance):   
    idxs2_out = []
    idxs1_out = []
    num_found_matches = 0
    img2_epi = None     

    if __debug__:
        timer = Timer()
        timer.start()

    O1w = kf1.Ow
    O2w = kf2.Ow
    # compute epipoles
    e1,_ = kf1.project_point(O2w)  # in first frame 
    e2,_ = kf2.project_point(O1w)  # in second frame  
    #print('e1: ', e1)
    #print('e2: ', e2)    
    
    baseline = np.linalg.norm(O1w-O2w) 

    # if the translation is too small we cannot triangulate 
    # if baseline < Parameters.kMinTraslation:  # we assume the Inializer has been used for building the first map 
    #     Printer.red("search for triangulation: impossible with almost zero translation!")
    #     return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT
    # else:    
    medianDepth = kf2.compute_points_median_depth()
    if medianDepth == -1:
        Printer.orange("search for triangulation: f2 with no points")        
        medianDepth = kf1.compute_points_median_depth()        
    ratioBaselineDepth = baseline/medianDepth
    if ratioBaselineDepth < Parameters.kMinRatioBaselineDepth:  
        Printer.orange("search for triangulation: impossible with too low ratioBaselineDepth!")
        return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT        

    # compute the fundamental matrix between the two frames by using their estimated poses 
    F12, H21 = computeF12(kf1, kf2)

    if idxs1 is None or idxs2 is None:
        timerMatch = Timer()
        timerMatch.start()        
        idxs1, idxs2 = Frame.feature_matcher.match(kf1.des, kf2.des)        
        print('search_frame_for_triangulation - matching - timer: ', timerMatch.elapsed())        
    
    rot_histo = RotationHistogram()
    check_orientation = kCheckFeaturesOrientation and Frame.oriented_features     
        
    # check epipolar constraints 
    for i1,i2 in zip(idxs1,idxs2):
        if kf1.get_point_match(i1) is not None or kf2.get_point_match(i2) is not None: # we are searching for keypoint matches where both keypoints do not have a corresponding map point 
            #print('existing point on match')
            continue 
        
        descriptor_dist = Frame.descriptor_distance(kf1.des[i1], kf2.des[i2])
        if descriptor_dist > max_descriptor_distance:
            continue     
        
        kp1 = kf1.kpsu[i1]
        #kp1_scale_factor = Frame.feature_manager.scale_factors[kf1.octaves[i1]]
        #kp1_size = f1.sizes[i1]
        # discard points which are too close to the epipole            
        #if np.linalg.norm(kp1-e1) < Parameters.kMinDistanceFromEpipole * kp1_scale_factor:                 
        #if np.linalg.norm(kp1-e1) - kp1_size < Parameters.kMinDistanceFromEpipole:  # N.B.: this is too much conservative => it filters too much                                       
        #    continue   
        
        kp2 = kf2.kpsu[i2]
        kp2_scale_factor = Frame.feature_manager.scale_factors[kf2.octaves[i2]]        
        # kp2_size = f2.sizes[i2]        
        # discard points which are too close to the epipole            
        delta = kp2-e2        
        #if np.linalg.norm(delta) < Parameters.kMinDistanceFromEpipole * kp2_scale_factor:   
        if np.inner(delta,delta) < kMinDistanceFromEpipole2 * kp2_scale_factor:   # OR.            
        # #if np.linalg.norm(delta) - kp2_size < Parameters.kMinDistanceFromEpipole:  # N.B.: this is too much conservative => it filters too much                                                  
             continue           
        
        # check epipolar constraint         
        sigma2_kp2 = Frame.feature_manager.level_sigmas2[kf2.octaves[i2]]
        if check_dist_epipolar_line(kp1,kp2,F12,sigma2_kp2):
            idxs1_out.append(i1)
            idxs2_out.append(i2)
            
            if check_orientation:
                index_match = len(idxs1_out)-1
                rot = kf1.angles[i1]-kf2.angles[i2]
                rot_histo.push(rot,index_match)            
        #else:
        #    print('discarding point match non respecting epipolar constraint')
         
    if check_orientation:            
        valid_match_idxs = rot_histo.get_valid_idxs()     
        #print('checking orientation consistency - valid matches % :', len(valid_match_idxs)/max(1,len(idxs1_out))*100,'% of ', len(idxs1_out),'matches')
        #print('rotation histogram: ', rot_histo)
        idxs1_out = np.array(idxs1_out)[valid_match_idxs]
        idxs2_out = np.array(idxs2_out)[valid_match_idxs]
                 
    num_found_matches = len(idxs1_out)
             
    if __debug__:
        print('search_frame_for_triangulation - timer: ', timer.elapsed())

    return idxs1_out, idxs2_out, num_found_matches, img2_epi