コード例 #1
0
ファイル: slam.py プロジェクト: daoran/pyslam-features
    def track_reference_frame(self, f_ref, f_cur, name=''):
        print('>>>> tracking reference %d ...' % (f_ref.id))
        if f_ref is None:
            return
        # find keypoint matches between f_cur and kf_ref
        print('matching keypoints with ', Frame.feature_matcher.type.name)
        self.timer_match.start()
        idxs_cur, idxs_ref = match_frames(f_cur, f_ref)
        self.timer_match.refresh()
        self.num_matched_kps = idxs_cur.shape[0]
        print("# keypoints matched: %d " % self.num_matched_kps)
        if kUseEssentialMatrixFitting:
            # estimate camera orientation and inlier matches by fitting and essential matrix (see the limitations above)
            idxs_ref, idxs_cur = self.estimate_pose_by_fitting_ess_mat(
                f_ref, f_cur, idxs_ref, idxs_cur)

        if kUseDynamicDesDistanceTh:
            self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                f_ref, f_cur, idxs_ref, idxs_cur)

        # propagate map point matches from kf_ref to f_cur  (do not override idxs_ref, idxs_cur)
        num_found_map_pts_inter_frame, idx_ref_prop, idx_cur_prop = propagate_map_point_matches(
            f_ref,
            f_cur,
            idxs_ref,
            idxs_cur,
            max_descriptor_distance=self.descriptor_distance_sigma)
        print("# matched map points in prev frame: %d " %
              num_found_map_pts_inter_frame)

        if kDebugDrawMatches and True:
            img_matches = draw_feature_matches(f_ref.img,
                                               f_cur.img,
                                               f_ref.kps[idx_ref_prop],
                                               f_cur.kps[idx_cur_prop],
                                               f_ref.sizes[idx_ref_prop],
                                               f_cur.sizes[idx_cur_prop],
                                               horizontal=False)
            cv2.imshow('tracking frame (no projection) - matches', img_matches)
            cv2.waitKey(1)

        # store tracking info (for possible reuse)
        self.idxs_ref = idxs_ref
        self.idxs_cur = idxs_cur

        # f_cur pose optimization using last matches with kf_ref:
        # here, we use first guess of f_cur pose and propated map point matches from f_ref (matched keypoints)
        self.pose_optimization(f_cur, name)
        # update matched map points; discard outliers detected in last pose optimization
        num_matched_points = f_cur.clean_outlier_map_points()
        print('      # num_matched_map_points: %d' %
              (self.num_matched_map_points))
        #print('     # matched points: %d' % (num_matched_points) )
        if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackFrame:
            f_cur.remove_frame_views(idxs_cur)
            f_cur.reset_points()
            Printer.red(
                'failure in tracking reference %d, # matched map points: %d' %
                (f_ref.id, self.num_matched_map_points))
            self.pose_is_ok = False
コード例 #2
0
    def get_frame_covisibles(self, frame):
        points = frame.get_matched_good_points()
        #keyframes = self.get_local_keyframes()
        #assert len(points) > 0
        if len(points) == 0:
            Printer.red('get_frame_covisibles - frame with not points')

        # for all map points in frame check in which other keyframes are they seen
        # increase counter for those keyframes
        viewing_keyframes = [
            kf for p in points for kf in p.keyframes() if not kf.is_bad
        ]  # if kf in keyframes]
        viewing_keyframes = Counter(viewing_keyframes)
        kf_ref = viewing_keyframes.most_common(1)[0][0]
        #local_keyframes = viewing_keyframes.keys()

        # include also some not-already-included keyframes that are neighbors to already-included keyframes
        for kf in list(viewing_keyframes.keys()):
            second_neighbors = kf.get_best_covisible_keyframes(
                Parameters.kNumBestCovisibilityKeyFrames)
            viewing_keyframes.update(second_neighbors)
            children = kf.get_children()
            viewing_keyframes.update(children)
            if len(viewing_keyframes
                   ) >= Parameters.kMaxNumOfKeyframesInLocalMap:
                break

        local_keyframes_counts = viewing_keyframes.most_common(
            Parameters.kMaxNumOfKeyframesInLocalMap)
        local_points = set()
        local_keyframes = []
        for kf, c in local_keyframes_counts:
            local_points.update(kf.get_matched_points())
            local_keyframes.append(kf)
        return kf_ref, local_keyframes, local_points
コード例 #3
0
ファイル: dataset.py プロジェクト: daoran/pyslam-features
 def getImageColor(self, frame_id):
     try: 
         img = self.getImage(frame_id)
         if img.ndim == 2:
             return cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)     
         else:
             return img             
     except:
         img = None  
         #raise IOError('Cannot open dataset: ', self.name, ', path: ', self.path)        
         Printer.red('Cannot open dataset: ', self.name, ', path: ', self.path)
         return img    
コード例 #4
0
ファイル: frame.py プロジェクト: daoran/pyslam-features
 def compute_points_median_depth(self, points3d=None):
     with self._lock_pose:
         Rcw2 = self._pose.Rcw[2, :3]  # just 2-nd row
         tcw2 = self._pose.tcw[2]  # just 2-nd row
     if points3d is None:
         with self._lock_features:
             points3d = np.array(
                 [p.pt for p in self.points if p is not None])
     if len(points3d) > 0:
         z = np.dot(Rcw2, points3d[:, :3].T) + tcw2
         z = sorted(z)
         return z[(len(z) - 1) // 2]
     else:
         Printer.red('frame.compute_points_median_depth() with no points')
         return -1
コード例 #5
0
ファイル: slam.py プロジェクト: daoran/pyslam-features
    def track_local_map(self, f_cur):
        if self.map.local_map.is_empty():
            return
        print('>>>> tracking local map...')
        self.timer_seach_map.start()

        self.update_local_map()

        num_found_map_pts, reproj_err_frame_map_sigma, matched_points_frame_idxs = search_map_by_projection(
            self.local_points,
            f_cur,
            max_reproj_distance=self.
            reproj_err_frame_map_sigma,  #Parameters.kMaxReprojectionDistanceMap, 
            max_descriptor_distance=self.descriptor_distance_sigma,
            ratio_test=Parameters.kMatchRatioTestMap
        )  # use the updated local map
        self.timer_seach_map.refresh()
        #print('reproj_err_sigma: ', reproj_err_frame_map_sigma, ' used: ', self.reproj_err_frame_map_sigma)
        print("# new matched map points in local map: %d " % num_found_map_pts)
        print("# local map points ", self.map.local_map.num_points())

        if kDebugDrawMatches and True:
            img_matched_trails = f_cur.draw_feature_trails(
                f_cur.img.copy(),
                matched_points_frame_idxs,
                trail_max_length=3)
            cv2.imshow('tracking local map - matched trails',
                       img_matched_trails)
            cv2.waitKey(1)

        # f_cur pose optimization 2 with all the matched local map points
        self.pose_optimization(f_cur, 'proj-map-frame')
        f_cur.update_map_points_statistics(
        )  # here we do not reset outliers; we let them reach the keyframe generation
        # and then bundle adjustment will possible decide if remove them or not;
        # only after keyframe generation the outliers are cleaned!
        print('     # num_matched_map_points: %d' %
              (self.num_matched_map_points))
        if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackLocalMap:
            Printer.red(
                'failure in tracking local map, # matched map points: ',
                self.num_matched_map_points)
            self.pose_is_ok = False
コード例 #6
0
ファイル: slam.py プロジェクト: daoran/pyslam-features
    def estimate_pose_by_fitting_ess_mat(self, f_ref, f_cur, idxs_ref,
                                         idxs_cur):
        # N.B.: in order to understand the limitations of fitting an essential mat, read the comments of the method self.estimate_pose_ess_mat()
        self.timer_pose_est.start()
        # estimate inter frame camera motion by using found keypoint matches
        # output of the following function is:  Trc = [Rrc, trc] with ||trc||=1  where c=cur, r=ref  and  pr = Trc * pc
        Mrc, self.mask_match = estimate_pose_ess_mat(
            f_ref.kpsn[idxs_ref],
            f_cur.kpsn[idxs_cur],
            method=cv2.RANSAC,
            prob=kRansacProb,
            threshold=kRansacThresholdNormalized)
        #Mcr = np.linalg.inv(poseRt(Mrc[:3, :3], Mrc[:3, 3]))
        Mcr = inv_T(Mrc)
        estimated_Tcw = np.dot(Mcr, f_ref.pose)
        self.timer_pose_est.refresh()

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        print('# inliers: ', self.num_inliers)
        idxs_ref = idxs_ref[mask_idxs]
        idxs_cur = idxs_cur[mask_idxs]

        # if there are not enough inliers do not use the estimated pose
        if self.num_inliers < kNumMinInliersEssentialMat:
            #f_cur.update_pose(f_ref.pose) # reset estimated pose to previous frame
            Printer.red('Essential mat: not enough inliers!')
        else:
            # use the estimated pose as an initial guess for the subsequent pose optimization
            # set only the estimated rotation (essential mat computation does not provide a scale for the translation, see above)
            #f_cur.pose[:3,:3] = estimated_Tcw[:3,:3] # copy only the rotation
            #f_cur.pose[:,3] = f_ref.pose[:,3].copy() # override translation with ref frame translation
            Rcw = estimated_Tcw[:3, :3]  # copy only the rotation
            tcw = f_ref.pose[:3,
                             3]  # override translation with ref frame translation
            f_cur.update_rotation_and_translation(Rcw, tcw)
        return idxs_ref, idxs_cur
コード例 #7
0
def extract_deep_features(sift_wrapper, sess, img_path, qtz=True):
    img = cv2.imread(img_path)
    if img is None:
        Printer.red('cannot find img: ', img_path)
        sys.exit(0)
    gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # detect SIFT keypoints.
    start = time.time()
    _, cv_kpts = sift_wrapper.detect(gray_img)
    end = time.time()
    print('Time cost in keypoint detection', end - start)

    start = time.time()
    sift_wrapper.build_pyramid(gray_img)
    end = time.time()
    print('Time cost in scale space construction', end - start)

    start = time.time()
    all_patches = sift_wrapper.get_patches(cv_kpts)
    end = time.time()
    print('Time cost in patch cropping', end - start)

    num_patch = all_patches.shape[0]

    if num_patch % FLAGS.batch_size > 0:
        loop_num = int(np.floor(float(num_patch) / float(FLAGS.batch_size)))
    else:
        loop_num = int(num_patch / FLAGS.batch_size - 1)

    def _worker(patch_queue, sess, all_feat):
        """The worker thread."""
        while True:
            patch_data = patch_queue.get()
            if patch_data is None:
                return
            feat = sess.run("squeeze_1:0", feed_dict={"input:0": np.expand_dims(patch_data, -1)})
            all_feat.append(feat)
            #patch_queue.task_done()

    all_feat = []
    patch_queue = Queue()
    worker_thread = Thread(target=_worker, args=(patch_queue, sess, all_feat))
    worker_thread.daemon = True
    worker_thread.start()

    start = time.time()

    # enqueue
    for i in range(loop_num + 1):
        if i < loop_num:
            patch_queue.put(all_patches[i * FLAGS.batch_size: (i + 1) * FLAGS.batch_size])
        else:
            patch_queue.put(all_patches[i * FLAGS.batch_size:])
    # poison pill
    patch_queue.put(None)
    # wait for extraction.
    worker_thread.join()

    end = time.time()
    print('Time cost in feature extraction', end - start)

    all_feat = np.concatenate(all_feat, axis=0)
    # quantize output features.
    all_feat = (all_feat * 128 + 128).astype(np.uint8) if qtz else all_feat
    return all_feat, cv_kpts, img
コード例 #8
0
def bundle_adjustment(keyframes, points, local_window, fixed_points=False, verbose=False, rounds=10, use_robust_kernel=False, abort_flag=g2o.Flag()):
    if local_window is None:
        local_frames = keyframes
    else:
        local_frames = keyframes[-local_window:]

    # create g2o optimizer
    opt = g2o.SparseOptimizer()
    block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCSparseSE3())
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())        
    solver = g2o.OptimizationAlgorithmLevenberg(block_solver)
    opt.set_algorithm(solver)
    opt.set_force_stop_flag(abort_flag)

    thHuberMono = math.sqrt(5.991);  # chi-square 2 DOFS 

    graph_keyframes, graph_points = {}, {}

    # add frame vertices to graph
    for kf in (local_frames if fixed_points else keyframes):    # if points are fixed then consider just the local frames, otherwise we need all frames or at least two frames for each point
        if kf.is_bad:
            continue 
        #print('adding vertex frame ', f.id, ' to graph')
        se3 = g2o.SE3Quat(kf.Rcw, kf.tcw)
        v_se3 = g2o.VertexSE3Expmap()
        v_se3.set_estimate(se3)
        v_se3.set_id(kf.kid * 2)  # even ids  (use f.kid here!)
        v_se3.set_fixed(kf.kid==0 or kf not in local_frames) #(use f.kid here!)
        opt.add_vertex(v_se3)

        # confirm pose correctness
        #est = v_se3.estimate()
        #assert np.allclose(pose[0:3, 0:3], est.rotation().matrix())
        #assert np.allclose(pose[0:3, 3], est.translation())

        graph_keyframes[kf] = v_se3

    num_edges = 0
    
    # add point vertices to graph 
    for p in points:
        assert(p is not None)        
        if p.is_bad:  # do not consider bad points   
            continue
        if __debug__:        
            if not any([f in keyframes for f in p.keyframes()]):  
                Printer.red('point without a viewing frame!!')
                continue        
        #print('adding vertex point ', p.id,' to graph')
        v_p = g2o.VertexSBAPointXYZ()    
        v_p.set_id(p.id * 2 + 1)  # odd ids
        v_p.set_estimate(p.pt[0:3])
        v_p.set_marginalized(True)
        v_p.set_fixed(fixed_points)
        opt.add_vertex(v_p)
        graph_points[p] = v_p

        # add edges
        for kf, idx in p.observations():
            if kf.is_bad:
                continue 
            if kf not in graph_keyframes:
                continue
            #print('adding edge between point ', p.id,' and frame ', f.id)
            edge = g2o.EdgeSE3ProjectXYZ()
            edge.set_vertex(0, v_p)
            edge.set_vertex(1, graph_keyframes[kf])
            edge.set_measurement(kf.kpsu[idx])
            invSigma2 = Frame.feature_manager.inv_level_sigmas2[kf.octaves[idx]]
            edge.set_information(np.eye(2)*invSigma2)
            if use_robust_kernel:
                edge.set_robust_kernel(g2o.RobustKernelHuber(thHuberMono))

            edge.fx = kf.camera.fx 
            edge.fy = kf.camera.fy
            edge.cx = kf.camera.cx
            edge.cy = kf.camera.cy

            opt.add_edge(edge)
            num_edges += 1

    if verbose:
        opt.set_verbose(True)
    opt.initialize_optimization()
    opt.optimize(rounds)

    # put frames back
    for kf in graph_keyframes:
        est = graph_keyframes[kf].estimate()
        #R = est.rotation().matrix()
        #t = est.translation()
        #f.update_pose(poseRt(R, t))
        kf.update_pose(g2o.Isometry3d(est.orientation(), est.position()))

    # put points back
    if not fixed_points:
        for p in graph_points:
            p.update_position(np.array(graph_points[p].estimate()))
            p.update_normal_and_depth(force=True)
            
    mean_squared_error = opt.active_chi2()/max(num_edges,1)

    return mean_squared_error
コード例 #9
0
def pose_optimization(frame, verbose=False, rounds=10):

    is_ok = True 

    # create g2o optimizer
    opt = g2o.SparseOptimizer()
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCSparseSE3())
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverDenseSE3())    
    block_solver = g2o.BlockSolverSE3(g2o.LinearSolverEigenSE3())       
    solver = g2o.OptimizationAlgorithmLevenberg(block_solver)
    opt.set_algorithm(solver)

    #robust_kernel = g2o.RobustKernelHuber(np.sqrt(5.991))  # chi-square 2 DOFs
    thHuberMono = math.sqrt(5.991);  # chi-square 2 DOFS 

    point_edge_pairs = {}
    num_point_edges = 0

    v_se3 = g2o.VertexSE3Expmap()
    v_se3.set_estimate(g2o.SE3Quat(frame.Rcw, frame.tcw))
    v_se3.set_id(0)  
    v_se3.set_fixed(False)
    opt.add_vertex(v_se3)

    with MapPoint.global_lock:
        # add point vertices to graph 
        for idx, p in enumerate(frame.points):
            if p is None:  
                continue

            # reset outlier flag 
            frame.outliers[idx] = False 

            # add edge
            #print('adding edge between point ', p.id,' and frame ', frame.id)
            edge = g2o.EdgeSE3ProjectXYZOnlyPose()

            edge.set_vertex(0, opt.vertex(0))
            edge.set_measurement(frame.kpsu[idx])
            invSigma2 = Frame.feature_manager.inv_level_sigmas2[frame.octaves[idx]]
            edge.set_information(np.eye(2)*invSigma2)
            edge.set_robust_kernel(g2o.RobustKernelHuber(thHuberMono))

            edge.fx = frame.camera.fx 
            edge.fy = frame.camera.fy
            edge.cx = frame.camera.cx
            edge.cy = frame.camera.cy
            edge.Xw = p.pt[0:3]
            
            opt.add_edge(edge)

            point_edge_pairs[p] = (edge, idx) # one edge per point 
            num_point_edges += 1

    if num_point_edges < 3:
        Printer.red('pose_optimization: not enough correspondences!') 
        is_ok = False 
        return 0, is_ok, 0

    if verbose:
        opt.set_verbose(True)

    # perform 4 optimizations: 
    # after each optimization we classify observation as inlier/outlier;
    # at the next optimization, outliers are not included, but at the end they can be classified as inliers again
    chi2Mono = 5.991 # chi-square 2 DOFs
    num_bad_point_edges = 0

    for it in range(4):
        v_se3.set_estimate(g2o.SE3Quat(frame.Rcw, frame.tcw))
        opt.initialize_optimization()        
        opt.optimize(rounds)

        num_bad_point_edges = 0

        for p, edge_pair in point_edge_pairs.items(): 
            edge, idx = edge_pair
            if frame.outliers[idx]:
                edge.compute_error()

            chi2 = edge.chi2()
            
            if chi2 > chi2Mono:
                frame.outliers[idx] = True 
                edge.set_level(1)
                num_bad_point_edges +=1
            else:
                frame.outliers[idx] = False
                edge.set_level(0)                                

            if it == 2:
                edge.set_robust_kernel(None)

        if len(opt.edges()) < 10:
            Printer.red('pose_optimization: stopped - not enough edges!')   
            is_ok = False           
            break                 
    
    print('pose optimization: available ', num_point_edges, ' points, found ', num_bad_point_edges, ' bad points')     
    if num_point_edges == num_bad_point_edges:
        Printer.red('pose_optimization: all the available correspondences are bad!')           
        is_ok = False      

    # update pose estimation
    if is_ok: 
        est = v_se3.estimate()
        # R = est.rotation().matrix()
        # t = est.translation()
        # frame.update_pose(poseRt(R, t))
        frame.update_pose(g2o.Isometry3d(est.orientation(), est.position()))

    # since we have only one frame here, each edge corresponds to a single distinct point
    num_valid_points = num_point_edges - num_bad_point_edges   
    
    mean_squared_error = opt.active_chi2()/max(num_valid_points,1)

    return mean_squared_error, is_ok, num_valid_points
コード例 #10
0
ファイル: slam.py プロジェクト: daoran/pyslam-features
    def track(self, img, frame_id, timestamp=None):
        Printer.cyan('@tracking')
        time_start = time.time()

        # check image size is coherent with camera params
        print("img.shape: ", img.shape)
        print("camera ", self.camera.height, " x ", self.camera.width)
        assert img.shape[0:2] == (self.camera.height, self.camera.width)
        if timestamp is not None:
            print('timestamp: ', timestamp)

        self.timer_main_track.start()

        # build current frame
        self.timer_frame.start()
        f_cur = Frame(img, self.camera, timestamp=timestamp)
        self.f_cur = f_cur
        print("frame: ", f_cur.id)
        self.timer_frame.refresh()

        # reset indexes of matches
        self.idxs_ref = []
        self.idxs_cur = []

        if self.state == SlamState.NO_IMAGES_YET:
            # push first frame in the inizializer
            self.intializer.init(f_cur)
            self.state = SlamState.NOT_INITIALIZED
            return  # EXIT (jump to second frame)

        if self.state == SlamState.NOT_INITIALIZED:
            # try to inizialize
            initializer_output, intializer_is_ok = self.intializer.initialize(
                f_cur, img)
            if intializer_is_ok:
                kf_ref = initializer_output.kf_ref
                kf_cur = initializer_output.kf_cur
                # add the two initialized frames in the map
                self.map.add_frame(
                    kf_ref)  # add first frame in map and update its frame id
                self.map.add_frame(
                    kf_cur)  # add second frame in map and update its frame id
                # add the two initialized frames as keyframes in the map
                self.map.add_keyframe(
                    kf_ref)  # add first keyframe in map and update its kid
                self.map.add_keyframe(
                    kf_cur)  # add second keyframe in map and update its kid
                kf_ref.init_observations()
                kf_cur.init_observations()
                # add points in map
                new_pts_count, _, _ = self.map.add_points(
                    initializer_output.pts,
                    None,
                    kf_cur,
                    kf_ref,
                    initializer_output.idxs_cur,
                    initializer_output.idxs_ref,
                    img,
                    do_check=False)
                Printer.green("map: initialized %d new points" %
                              (new_pts_count))
                # update covisibility graph connections
                kf_ref.update_connections()
                kf_cur.update_connections()

                # update tracking info
                self.f_cur = kf_cur
                self.f_cur.kf_ref = kf_ref
                self.kf_ref = kf_cur  # set reference keyframe
                self.kf_last = kf_cur  # set last added keyframe
                self.map.local_map.update(self.kf_ref)
                self.state = SlamState.OK

                self.update_tracking_history()
                self.motion_model.update_pose(kf_cur.timestamp,
                                              kf_cur.position,
                                              kf_cur.quaternion)
                self.motion_model.is_ok = False  # after initialization you cannot use motion model for next frame pose prediction (time ids of initialized poses may not be consecutive)

                self.intializer.reset()

                if kUseDynamicDesDistanceTh:
                    self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                        kf_ref, kf_cur, initializer_output.idxs_ref,
                        initializer_output.idxs_cur)
            return  # EXIT (jump to next frame)

        # get previous frame in map as reference
        f_ref = self.map.get_frame(-1)
        #f_ref_2 = self.map.get_frame(-2)
        self.f_ref = f_ref

        # add current frame f_cur to map
        self.map.add_frame(f_cur)
        self.f_cur.kf_ref = self.kf_ref

        # reset pose state flag
        self.pose_is_ok = False

        with self.map.update_lock:
            # check for map point replacements in previous frame f_ref (some points might have been replaced by local mapping during point fusion)
            self.f_ref.check_replaced_map_points()

            if kUseDynamicDesDistanceTh:
                print('descriptor_distance_sigma: ',
                      self.descriptor_distance_sigma)
                self.local_mapping.descriptor_distance_sigma = self.descriptor_distance_sigma

            # udpdate (velocity) old motion model                                             # c1=ref_ref, c2=ref, c3=cur;  c=cur, r=ref
            #self.velocity = np.dot(f_ref.pose, inv_T(f_ref_2.pose))                          # Tc2c1 = Tc2w * Twc1   (predicted Tcr)
            #self.predicted_pose = g2o.Isometry3d(np.dot(self.velocity, f_ref.pose))          # Tc3w = Tc2c1 * Tc2w   (predicted Tcw)

            # set intial guess for current pose optimization
            if kUseMotionModel and self.motion_model.is_ok:
                print('using motion model for next pose prediction')
                # update f_ref pose according to its reference keyframe (the pose of the reference keyframe could be updated by local mapping)
                self.f_ref.update_pose(
                    self.tracking_history.relative_frame_poses[-1] *
                    self.f_ref.kf_ref.isometry3d)
                # predict pose by using motion model
                self.predicted_pose, _ = self.motion_model.predict_pose(
                    timestamp, self.f_ref.position, self.f_ref.orientation)
                f_cur.update_pose(self.predicted_pose)
            else:
                print('setting f_cur.pose <-- f_ref.pose')
                # use reference frame pose as initial guess
                f_cur.update_pose(f_ref.pose)

            # track camera motion from f_ref to f_cur
            self.track_previous_frame(f_ref, f_cur)

            if not self.pose_is_ok:
                # if previous track didn't go well then track the camera motion from kf_ref to f_cur
                self.track_keyframe(self.kf_ref, f_cur)

            # now, having a better estimate of f_cur pose, we can find more map point matches:
            # find matches between {local map points} (points in the local map) and {unmatched keypoints of f_cur}
            if self.pose_is_ok:
                self.track_local_map(f_cur)

        # end block {with self.map.update_lock:}

        # TODO: add relocalization

        # HACK: since local mapping is not fast enough in python (and tracking is not in real-time) => give local mapping more time to process stuff
        self.wait_for_local_mapping(
        )  # N.B.: this must be outside the `with self.map.update_lock:` block

        with self.map.update_lock:

            # update slam state
            if self.pose_is_ok:
                self.state = SlamState.OK
            else:
                self.state = SlamState.LOST
                Printer.red('tracking failure')

            # update motion model state
            self.motion_model.is_ok = self.pose_is_ok

            if self.pose_is_ok:  # if tracking was successful

                # update motion model
                self.motion_model.update_pose(timestamp, f_cur.position,
                                              f_cur.quaternion)

                f_cur.clean_vo_map_points()

                # do we need a new KeyFrame?
                need_new_kf = self.need_new_keyframe(f_cur)

                if need_new_kf:
                    Printer.green('adding new KF with frame id % d: ' %
                                  (f_cur.id))
                    if kLogKFinfoToFile:
                        self.kf_info_logger.info(
                            'adding new KF with frame id % d: ' % (f_cur.id))
                    kf_new = KeyFrame(f_cur, img)
                    self.kf_last = kf_new
                    self.kf_ref = kf_new
                    f_cur.kf_ref = kf_new

                    self.local_mapping.push_keyframe(kf_new)
                    if not kLocalMappingOnSeparateThread:
                        self.local_mapping.do_local_mapping()
                else:
                    Printer.yellow('NOT KF')

                # From ORBSLAM2:
                # Clean outliers once keyframe generation has been managed:
                # we allow points with high innovation (considered outliers by the Huber Function)
                # pass to the new keyframe, so that bundle adjustment will finally decide
                # if they are outliers or not. We don't want next frame to estimate its position
                # with those points so we discard them in the frame.
                f_cur.clean_outlier_map_points()

            if self.f_cur.kf_ref is None:
                self.f_cur.kf_ref = self.kf_ref

            self.update_tracking_history(
            )  # must stay after having updated slam state (self.state)

            Printer.green("map: %d points, %d keyframes" %
                          (self.map.num_points(), self.map.num_keyframes()))
            #self.update_history()

            self.timer_main_track.refresh()

            duration = time.time() - time_start
            print('tracking duration: ', duration)
コード例 #11
0
ファイル: slam.py プロジェクト: daoran/pyslam-features
    def track_previous_frame(self, f_ref, f_cur):
        print('>>>> tracking previous frame ...')
        is_search_frame_by_projection_failure = False
        use_search_frame_by_projection = self.motion_model.is_ok and kUseSearchFrameByProjection and kUseMotionModel

        if use_search_frame_by_projection:
            # search frame by projection: match map points observed in f_ref with keypoints of f_cur
            print('search frame by projection')
            search_radius = Parameters.kMaxReprojectionDistanceFrame
            f_cur.reset_points()
            self.timer_seach_frame_proj.start()
            idxs_ref, idxs_cur, num_found_map_pts = search_frame_by_projection(
                f_ref,
                f_cur,
                max_reproj_distance=search_radius,
                max_descriptor_distance=self.descriptor_distance_sigma)
            self.timer_seach_frame_proj.refresh()
            self.num_matched_kps = len(idxs_cur)
            print("# matched map points in prev frame: %d " %
                  self.num_matched_kps)

            # if not enough map point matches consider a larger search radius
            if self.num_matched_kps < Parameters.kMinNumMatchedFeaturesSearchFrameByProjection:
                f_cur.remove_frame_views(idxs_cur)
                f_cur.reset_points()
                idxs_ref, idxs_cur, num_found_map_pts = search_frame_by_projection(
                    f_ref,
                    f_cur,
                    max_reproj_distance=2 * search_radius,
                    max_descriptor_distance=0.5 *
                    self.descriptor_distance_sigma)
                self.num_matched_kps = len(idxs_cur)
                Printer.orange(
                    "# matched map points in prev frame (wider search): %d " %
                    self.num_matched_kps)

            if kDebugDrawMatches and True:
                img_matches = draw_feature_matches(f_ref.img,
                                                   f_cur.img,
                                                   f_ref.kps[idxs_ref],
                                                   f_cur.kps[idxs_cur],
                                                   f_ref.sizes[idxs_ref],
                                                   f_cur.sizes[idxs_cur],
                                                   horizontal=False)
                cv2.imshow('tracking frame by projection - matches',
                           img_matches)
                cv2.waitKey(1)

            if self.num_matched_kps < Parameters.kMinNumMatchedFeaturesSearchFrameByProjection:
                f_cur.remove_frame_views(idxs_cur)
                f_cur.reset_points()
                is_search_frame_by_projection_failure = True
                Printer.red(
                    'Not enough matches in search frame by projection: ',
                    self.num_matched_kps)
            else:
                # search frame by projection was successful
                if kUseDynamicDesDistanceTh:
                    self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                        f_ref, f_cur, idxs_ref, idxs_cur)

                # store tracking info (for possible reuse)
                self.idxs_ref = idxs_ref
                self.idxs_cur = idxs_cur

                # f_cur pose optimization 1:
                # here, we use f_cur pose as first guess and exploit the matched map point of f_ref
                self.pose_optimization(f_cur, 'proj-frame-frame')
                # update matched map points; discard outliers detected in last pose optimization
                num_matched_points = f_cur.clean_outlier_map_points()
                print('     # num_matched_map_points: %d' %
                      (self.num_matched_map_points))
                #print('     # matched points: %d' % (num_matched_points) )

                if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackFrame:
                    Printer.red(
                        'failure in tracking previous frame, # matched map points: ',
                        self.num_matched_map_points)
                    self.pose_is_ok = False

        if not use_search_frame_by_projection or is_search_frame_by_projection_failure:
            self.track_reference_frame(f_ref, f_cur, 'match-frame-frame')
コード例 #12
0
def search_frame_for_triangulation_test(f1, f2, img2, img1 = None):   

    idxs2_out = []
    idxs1_out = []
    lines_out = [] 
    num_found_matches = 0
    img2_epi = None     

    if __debug__:
        timer = Timer()
        timer.start()

    O1w = f1.Ow
    O2w = f2.Ow 
    # compute epipoles
    e1,_ = f1.project_point(O2w)  # in first frame  
    e2,_ = f2.project_point(O1w)  # in second frame  
    #print('e1: ', e1)
    #print('e2: ', e2)    
    
    baseline = np.linalg.norm(O1w-O2w) 
        
    # if the translation is too small we cannot triangulate 
    # if False: 
    #     if baseline < Parameters.kMinTraslation:  # we assume the Inializer has been used for building the first map 
    #         Printer.red("search for triangulation: impossible with almost zero translation!")
    #         return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT
    # else:
    medianDepthF2 = f2.compute_points_median_depth()
    ratioBaselineDepth = baseline/medianDepthF2
    if ratioBaselineDepth < Parameters.kMinRatioBaselineDepth:  
        Printer.red("search for triangulation: impossible with too low ratioBaselineDepth!")
        return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT           

    # compute the fundamental matrix between the two frames by using their estimated poses 
    F12, H21 = computeF12(f1, f2)

    idxs1 = []
    for i, p in enumerate(f1.get_points()): 
        if p is None:  # we consider just unmatched keypoints 
            kp = f1.kpsu[i]
            scale_factor = Frame.feature_manager.scale_factors[f1.octaves[i]]
            # discard points which are too close to the epipole 
            if np.linalg.norm(kp-e1) < Parameters.kMinDistanceFromEpipole * scale_factor:             
                continue    
            idxs1.append(i)      
    kpsu1 = f1.kpsu[idxs1]
    
    if __debug__:
        print('search_frame_for_triangulation - timer1: ', timer.elapsed())  
        timer.start()        

    # compute epipolar lines in second image 
    lines2 = cv2.computeCorrespondEpilines(kpsu1.reshape(-1,1,2), 2, F12)  
    lines2 = lines2.reshape(-1,3)
    
    xs2_inf = np.dot(H21, add_ones(kpsu1).T).T # x2inf = H21 * x1  where x2inf is corresponding point according to infinite homography [Hartley Zisserman pag 339]
    xs2_inf = xs2_inf[:, 0:2] / xs2_inf[:, 2:]  
    line_edges = [ epiline_to_end_points(line, e2, x2_inf, f2.width) for line, x2_inf in zip(lines2, xs2_inf) ] 
    #print("line_edges: ", line_edges)

    if __debug__:
        print('search_frame_for_triangulation - timer3: ', timer.elapsed())
        assert(len(line_edges) == len(idxs1))    
        timer.start()

    len_des2 = len(f2.des)
    flag_match = np.full(len_des2, False, dtype=bool)
    dist_match = np.zeros(len_des2)
    index_match = np.full(len_des2, 0, dtype=int)
    for i, idx in enumerate(idxs1):   # N.B.: a point in f1 can be matched to more than one point in f2, we avoid this by caching matches with f2 points
        f2_idx, dist = find_matches_along_line(f2, e2, line_edges[i], f1.des[idx])
        if f2_idx > -1:
            if not flag_match[f2_idx]: # new match
                flag_match[f2_idx] = True 
                dist_match[f2_idx] = dist 
                idxs2_out.append(f2_idx)
                idxs1_out.append(idx)
                index_match[f2_idx] = len(idxs2_out)-1                
                assert(f2.get_point_match(f2_idx) is None)            
                assert(f1.get_point_match(idx) is None)
                if __debug__:
                    lines_out.append(line_edges[i])
            else: # already matched
                if dist < dist_match[f2_idx]:  # update in case of a smaller distance 
                    dist_match[f2_idx] = dist 
                    index = index_match[f2_idx]  
                    idxs1_out[index] = idx    
                    if __debug__:
                        lines_out[index] = line_edges[i]                              
 
    num_found_matches = len(idxs1_out)
    assert(len(idxs1_out) == len(idxs2_out)) 

    if __debug__:   
        #print("num found matches: ", num_found_matches)
        if True: 
            kpsu2 = f2.kpsu[idxs2_out]
            img2_epi = draw_lines(img2.copy(), lines_out, kpsu2)
            #cv2.imshow("epipolar lines",img2_epi)
            #cv2.waitKey(0)

    if __debug__:
        print('search_frame_for_triangulation - timer4: ', timer.elapsed())

    return idxs1_out, idxs2_out, num_found_matches, img2_epi
コード例 #13
0
def search_and_fuse(points, keyframe, 
                    max_reproj_distance=Parameters.kMaxReprojectionDistanceFuse,
                    max_descriptor_distance = 0.5*Parameters.kMaxDescriptorDistance,
                    ratio_test=Parameters.kMatchRatioTestMap):
    #max_descriptor_distance = 0.5 * Parameters.kMaxDescriptorDistance 
    
    fused_pts_count = 0
    Ow = keyframe.Ow 
    if len(points) == 0:
        Printer.red('search_and_fuse - no points')        
        return 
        
    # get all matched points of keyframe 
    good_pts_idxs = np.flatnonzero(points!=None) 
    good_pts = points[good_pts_idxs] 
     
    if len(good_pts_idxs) == 0:
        Printer.red('search_and_fuse - no matched points')
        return
    
    # check if points are visible 
    good_pts_visible, good_projs, good_depths, good_dists = keyframe.are_visible(good_pts)
    
    if len(good_pts_visible) == 0:
        Printer.red('search_and_fuse - no visible points')
        return    
    
    predicted_levels = predict_detection_levels(good_pts, good_dists) 
    kp_scale_factors = Frame.feature_manager.scale_factors[predicted_levels]              
    radiuses = max_reproj_distance * kp_scale_factors     
    kd_idxs = keyframe.kd.query_ball_point(good_projs, radiuses)    

    #for i, p in enumerate(points):
    for i,p,j in zip(good_pts_idxs,good_pts,range(len(good_pts))):            
                
        if not good_pts_visible[j] or p.is_bad:     # point not visible in frame or point is bad 
            #print('p[%d] visible: %d, bad: %d' % (i, int(good_pts_visible[j]), int(p.is_bad))) 
            continue  
                  
        if p.is_in_keyframe(keyframe):    # we already matched this map point to this keyframe
            #print('p[%d] already in keyframe' % (i)) 
            continue
                
        # predicted_level = p.predict_detection_level(good_dists[j])         
        # kp_scale_factor = Frame.feature_manager.scale_factors[predicted_level]              
        # radius = max_reproj_distance * kp_scale_factor     
        predicted_level = predicted_levels[j]
        
        #print('p[%d] radius: %f' % (i,radius))         
            
        best_dist = math.inf 
        best_dist2 = math.inf
        best_level = -1 
        best_level2 = -1               
        best_kd_idx = -1        
            
        # find closest keypoints of frame        
        proj = good_projs[j]
        #for kd_idx in keyframe.kd.query_ball_point(proj, radius):  
        for kd_idx in kd_idxs[j]:             
                
            # check detection level     
            kp_level = keyframe.octaves[kd_idx]    
            if (kp_level<predicted_level-1) or (kp_level>predicted_level):   
                #print('p[%d] wrong predicted level **********************************' % (i))                       
                continue
        
            # check the reprojection error     
            kp = keyframe.kpsu[kd_idx]
            invSigma2 = Frame.feature_manager.inv_level_sigmas2[kp_level]                
            err = proj - kp       
            chi2 = np.inner(err,err)*invSigma2           
            if chi2 > Parameters.kChi2Mono: # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                #print('p[%d] big reproj err %f **********************************' % (i,chi2))
                continue                  
                            
            descriptor_dist = p.min_des_distance(keyframe.des[kd_idx])
            #print('p[%d] descriptor_dist %f **********************************' % (i,descriptor_dist))            
            
            #if descriptor_dist < max_descriptor_distance and descriptor_dist < best_dist:     
            if descriptor_dist < best_dist:                                      
                best_dist2 = best_dist
                best_level2 = best_level
                best_dist = descriptor_dist
                best_level = kp_level
                best_kd_idx = kd_idx   
            else: 
                if descriptor_dist < best_dist2:  # N.O.
                    best_dist2 = descriptor_dist       
                    best_level2 = kp_level                                   
                                                            
        #if best_kd_idx > -1 and best_dist < max_descriptor_distance:
        if best_dist < max_descriptor_distance:         
            # apply match distance ratio test only if the best and second are in the same scale level 
            if (best_level2 == best_level) and (best_dist > best_dist2 * ratio_test):  # N.O.
                #print('p[%d] best_dist > best_dist2 * ratio_test **********************************' % (i))
                continue                
            p_keyframe = keyframe.get_point_match(best_kd_idx)
            # if there is already a map point replace it otherwise add a new point
            if p_keyframe is not None:
                if not p_keyframe.is_bad:
                    if p_keyframe.num_observations > p.num_observations:
                        p.replace_with(p_keyframe)
                    else:
                        p_keyframe.replace_with(p)                  
            else:
                p.add_observation(keyframe, best_kd_idx) 
                #p.update_info()    # done outside!
            fused_pts_count += 1                  
    return fused_pts_count     
コード例 #14
0
    def initialize(self, f_cur, img_cur):

        if self.num_failures > kNumOfFailuresAfterWichNumMinTriangulatedPointsIsHalved:
            self.num_min_triangulated_points = 0.5 * Parameters.kInitializerNumMinTriangulatedPoints
            self.num_failures = 0
            Printer.orange(
                'Initializer: halved min num triangulated features to ',
                self.num_min_triangulated_points)

        # prepare the output
        out = InitializerOutput()
        is_ok = False

        #print('num frames: ', len(self.frames))

        # if too many frames have passed, move the current idx_f_ref forward
        # this is just one very simple policy that can be used
        if self.f_ref is not None:
            if f_cur.id - self.f_ref.id > kMaxIdDistBetweenIntializingFrames:
                self.f_ref = self.frames[-1]  # take last frame in the buffer
                #self.idx_f_ref = len(self.frames)-1  # take last frame in the buffer
                #self.idx_f_ref = self.frames.index(self.f_ref)  # since we are using a deque, the code of the previous commented line is not valid anymore
                #print('*** idx_f_ref:',self.idx_f_ref)
        #self.f_ref = self.frames[self.idx_f_ref]
        f_ref = self.f_ref
        #print('ref fid: ',self.f_ref.id,', curr fid: ', f_cur.id, ', idxs_ref: ', self.idxs_ref)

        # append current frame
        self.frames.append(f_cur)

        # if the current frames do no have enough features exit
        if len(f_ref.kps) < self.num_min_features or len(
                f_cur.kps) < self.num_min_features:
            Printer.red('Inializer: ko - not enough features!')
            self.num_failures += 1
            return out, is_ok

        # find keypoint matches
        idxs_cur, idxs_ref = match_frames(f_cur, f_ref,
                                          kFeatureMatchRatioTestInitializer)

        print('|------------')
        #print('deque ids: ', [f.id for f in self.frames])
        print('initializing frames ', f_cur.id, ', ', f_ref.id)
        print("# keypoint matches: ", len(idxs_cur))

        Trc = self.estimatePose(f_ref.kpsn[idxs_ref], f_cur.kpsn[idxs_cur])
        Tcr = inv_T(Trc)  # Tcr w.r.t. ref frame
        f_ref.update_pose(np.eye(4))
        f_cur.update_pose(Tcr)

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        print('# keypoint inliers: ', self.num_inliers)
        idx_cur_inliers = idxs_cur[mask_idxs]
        idx_ref_inliers = idxs_ref[mask_idxs]

        # create a temp map for initializing
        map = Map()
        f_ref.reset_points()
        f_cur.reset_points()

        #map.add_frame(f_ref)
        #map.add_frame(f_cur)

        kf_ref = KeyFrame(f_ref)
        kf_cur = KeyFrame(f_cur, img_cur)
        map.add_keyframe(kf_ref)
        map.add_keyframe(kf_cur)

        pts3d, mask_pts3d = triangulate_normalized_points(
            kf_cur.Tcw, kf_ref.Tcw, kf_cur.kpsn[idx_cur_inliers],
            kf_ref.kpsn[idx_ref_inliers])

        new_pts_count, mask_points, _ = map.add_points(
            pts3d,
            mask_pts3d,
            kf_cur,
            kf_ref,
            idx_cur_inliers,
            idx_ref_inliers,
            img_cur,
            do_check=True,
            cos_max_parallax=Parameters.kCosMaxParallaxInitializer)
        print("# triangulated points: ", new_pts_count)

        if new_pts_count > self.num_min_triangulated_points:
            err = map.optimize(verbose=False,
                               rounds=20,
                               use_robust_kernel=True)
            print("init optimization error^2: %f" % err)

            num_map_points = len(map.points)
            print("# map points:   %d" % num_map_points)
            is_ok = num_map_points > self.num_min_triangulated_points

            out.pts = pts3d[mask_points]
            out.kf_cur = kf_cur
            out.idxs_cur = idx_cur_inliers[mask_points]
            out.kf_ref = kf_ref
            out.idxs_ref = idx_ref_inliers[mask_points]

            # set scene median depth to equal desired_median_depth'
            desired_median_depth = Parameters.kInitializerDesiredMedianDepth
            median_depth = kf_cur.compute_points_median_depth(out.pts)
            depth_scale = desired_median_depth / median_depth
            print('forcing current median depth ', median_depth, ' to ',
                  desired_median_depth)

            out.pts[:, :3] = out.pts[:, :3] * depth_scale  # scale points
            tcw = kf_cur.tcw * depth_scale  # scale initial baseline
            kf_cur.update_translation(tcw)

        map.delete()

        if is_ok:
            Printer.green('Inializer: ok!')
        else:
            self.num_failures += 1
            Printer.red('Inializer: ko!')
        print('|------------')
        return out, is_ok