Exemple #1
0
 def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input
     with self.lock:         
         if self.frame is not frame:
             Printer.orange('WARNING: DELF is recomputing both kps and des on last input frame', frame.shape)            
             self.detectAndCompute(frame)
         return self.kps, self.des                
                
Exemple #2
0
 def init_feature_tracker(self, tracker):
     Frame.set_tracker(tracker)  # set the static field of the class
     if kUseEssentialMatrixFitting:
         Printer.orange('forcing feature matcher ratio_test to 0.8')
         tracker.matcher.ratio_test = 0.8
     if tracker.tracker_type == FeatureTrackerTypes.LK:
         raise ValueError(
             "You cannot use Lukas-Kanade tracker in this SLAM approach!")
Exemple #3
0
 def compute(self, img, kps, mask=None):
     Printer.orange(
         'WARNING: you are supposed to call detectAndCompute() for ORB2 instead of compute()'
     )
     Printer.orange(
         'WARNING: ORB2 is recomputing both kps and des on input frame',
         img.shape)
     return self.detectAndCompute(img)
Exemple #4
0
 def set_parent(self, keyframe):
     with self._lock_connections:
         if self == keyframe:
             if __debug__:
                 Printer.orange(
                     'KeyFrameGraph.set_parent - trying to set self as parent'
                 )
             return
         self.parent = keyframe
         keyframe.add_child(self)
Exemple #5
0
 def compute(self, frame):
     if self.first_level == -1:
         frame = self.createBaseImg(frame)  # replace the image with the new level -1 (up-resized image)
     if self.pyramid_type == PyramidType.RESIZE:
         return self.computeResize(frame)
     elif self.pyramid_type == PyramidType.RESIZE_AND_FILTER:          
         return self.computeResizeAndFilter(frame)              
     elif self.pyramid_type == PyramidType.GAUSS_PYRAMID:
         return self.computeGauss(frame) 
     else: 
         Printer.orange('Pyramid - unknown type')    
         return self.computeResizePyramid(frame)                    
Exemple #6
0
    def __init__(self, frame, img=None):
        KeyFrameGraph.__init__(self)
        Frame.__init__(
            self,
            img=None,
            camera=frame.camera,
            pose=frame.pose,
            id=frame.id,
            timestamp=frame.timestamp
        )  # here we MUST have img=None in order to avoid recomputing keypoint info

        if frame.img is not None:
            self.img = frame.img  # this is already a copy of an image
        else:
            if img is not None:
                self.img = img.copy()

        self.map = None

        self.is_keyframe = True
        self.kid = None  # keyframe id

        self._is_bad = False
        self.to_be_erased = False

        # pose relative to parent (this is computed when bad flag is activated)
        self._pose_Tcp = CameraPose()

        # share keypoints info with frame (these are computed once for all on frame initialization and they are not changed anymore)
        self.kps = frame.kps  # keypoint coordinates                  [Nx2]
        self.kpsu = frame.kpsu  # [u]ndistorted keypoint coordinates    [Nx2]
        self.kpsn = frame.kpsn  # [n]ormalized keypoint coordinates     [Nx2] (Kinv * [kp,1])
        self.octaves = frame.octaves  # keypoint octaves                      [Nx1]
        self.sizes = frame.sizes  # keypoint sizes                        [Nx1]
        self.angles = frame.angles  # keypoint angles                       [Nx1]
        self.des = frame.des  # keypoint descriptors                  [NxD] where D is the descriptor length

        if hasattr(frame, '_kd'):
            self._kd = frame._kd
        else:
            Printer.orange('KeyFrame %d computing kdtree for input frame %d' %
                           (self.id, frame.id))
            self._kd = cKDTree(self.kpsu)

        # map points information arrays (copy points coming from frame)
        self.points = frame.get_points(
        )  # map points => self.points[idx] is the map point matched with self.kps[idx] (if is not None)
        self.outliers = np.full(
            self.kpsu.shape[0], False,
            dtype=bool)  # used just in propagate_map_point_matches()
Exemple #7
0
def local_bundle_adjustment(keyframes, points, keyframes_ref=[], fixed_points=False, verbose=False, rounds=10, abort_flag=g2o.Flag(), map_lock=None):

    # create g2o optimizer
    opt = g2o.SparseOptimizer()
    block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCSparseSE3())
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverEigenSE3())  
    #block_solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())          
    solver = g2o.OptimizationAlgorithmLevenberg(block_solver)
    opt.set_algorithm(solver)
    opt.set_force_stop_flag(abort_flag)

    #robust_kernel = g2o.RobustKernelHuber(np.sqrt(5.991))  # chi-square 2 DOFs
    thHuberMono = math.sqrt(5.991);  # chi-square 2 DOFS 

    graph_keyframes, graph_points = {}, {}

    # add frame vertices to graph
    for kf in keyframes:    
        if kf.is_bad:
            continue 
        #print('adding vertex frame ', f.id, ' to graph')
        se3 = g2o.SE3Quat(kf.Rcw, kf.tcw)
        v_se3 = g2o.VertexSE3Expmap()
        v_se3.set_estimate(se3)
        v_se3.set_id(kf.kid * 2)  # even ids  (use f.kid here!)
        v_se3.set_fixed(kf.kid==0)  # (use f.kid here!)
        opt.add_vertex(v_se3)
        graph_keyframes[kf] = v_se3     
           
        # confirm pose correctness
        #est = v_se3.estimate()
        #assert np.allclose(pose[0:3, 0:3], est.rotation().matrix())
        #assert np.allclose(pose[0:3, 3], est.translation())
        
    # add reference frame vertices to graph
    for kf in keyframes_ref:    
        if kf.is_bad:
            continue 
        #print('adding vertex frame ', f.id, ' to graph')
        se3 = g2o.SE3Quat(kf.Rcw, kf.tcw)
        v_se3 = g2o.VertexSE3Expmap()
        v_se3.set_estimate(se3)
        v_se3.set_id(kf.kid * 2)  # even ids  (use f.kid here!)
        v_se3.set_fixed(True)
        opt.add_vertex(v_se3)
        graph_keyframes[kf] = v_se3             

    graph_edges = {}
    num_edges = 0
    num_bad_edges = 0

    # add point vertices to graph 
    for p in points:
        assert(p is not None)
        if p.is_bad:  # do not consider bad points             
            continue  
        if not any([f in keyframes for f in p.keyframes()]):  
            Printer.orange('point %d without a viewing keyframe in input keyframes!!' %(p.id))
            #Printer.orange('         keyframes: ',p.observations_string())
            continue
        #print('adding vertex point ', p.id,' to graph')
        v_p = g2o.VertexSBAPointXYZ()    
        v_p.set_id(p.id * 2 + 1)  # odd ids
        v_p.set_estimate(p.pt[0:3])
        v_p.set_marginalized(True)
        v_p.set_fixed(fixed_points)
        opt.add_vertex(v_p)
        graph_points[p] = v_p

        # add edges
        for kf, p_idx in p.observations():
            if kf.is_bad:
                continue 
            if kf not in graph_keyframes:
                continue
            if __debug__:      
                p_f = kf.get_point_match(p_idx)
                if p_f != p:
                    print('frame: ', kf.id, ' missing point ', p.id, ' at index p_idx: ', p_idx)                    
                    if p_f is not None:
                        print('p_f:', p_f)
                    print('p:',p)
            assert(kf.get_point_match(p_idx) is p)            
            #print('adding edge between point ', p.id,' and frame ', f.id)
            edge = g2o.EdgeSE3ProjectXYZ()
            edge.set_vertex(0, v_p)
            edge.set_vertex(1, graph_keyframes[kf])
            edge.set_measurement(kf.kpsu[p_idx])
            invSigma2 = Frame.feature_manager.inv_level_sigmas2[kf.octaves[p_idx]]
            edge.set_information(np.eye(2)*invSigma2)
            edge.set_robust_kernel(g2o.RobustKernelHuber(thHuberMono))

            edge.fx = kf.camera.fx 
            edge.fy = kf.camera.fy
            edge.cx = kf.camera.cx
            edge.cy = kf.camera.cy

            opt.add_edge(edge)

            graph_edges[edge] = (p,kf,p_idx) # one has kf.points[p_idx] == p
            num_edges += 1            

    if verbose:
        opt.set_verbose(True)

    if abort_flag.value:
        return -1,0

    # initial optimization 
    opt.initialize_optimization()
    opt.optimize(5)
    
    if not abort_flag.value:
        chi2Mono = 5.991 # chi-square 2 DOFs

        # check inliers observation 
        for edge, edge_data in graph_edges.items(): 
            p = edge_data[0]
            if p.is_bad:
                continue 
            if edge.chi2() > chi2Mono or not edge.is_depth_positive():
                edge.set_level(1)
                num_bad_edges += 1
            edge.set_robust_kernel(None)

        # optimize again without outliers 
        opt.initialize_optimization()
        opt.optimize(rounds)

    # search for final outlier observations and clean map  
    num_bad_observations = 0  # final bad observations
    outliers_data = []
    for edge, edge_data in graph_edges.items(): 
        p, kf, p_idx = edge_data
        if p.is_bad:
            continue         
        assert(kf.get_point_match(p_idx) is p) 
        if edge.chi2() > chi2Mono or not edge.is_depth_positive():         
            num_bad_observations += 1
            outliers_data.append(edge_data)       

    if map_lock is None: 
        map_lock = RLock() # put a fake lock 
        
    with map_lock:      
        # remove outlier observations 
        for d in outliers_data:
            p, kf, p_idx = d
            p_f = kf.get_point_match(p_idx)
            if p_f is not None:
                assert(p_f is p)
                p.remove_observation(kf,p_idx)
                # the following instruction is now included in p.remove_observation()
                #f.remove_point(p)   # it removes multiple point instances (if these are present)   
                #f.remove_point_match(p_idx) # this does not remove multiple point instances, but now there cannot be multiple instances any more

        # put frames back
        for kf in graph_keyframes:
            est = graph_keyframes[kf].estimate()
            #R = est.rotation().matrix()
            #t = est.translation()
            #f.update_pose(poseRt(R, t))
            kf.update_pose(g2o.Isometry3d(est.orientation(), est.position()))

        # put points back
        if not fixed_points:
            for p in graph_points:
                p.update_position(np.array(graph_points[p].estimate()))
                p.update_normal_and_depth(force=True)

    active_edges = num_edges-num_bad_edges
    mean_squared_error = opt.active_chi2()/active_edges

    return mean_squared_error, num_bad_observations/max(num_edges,1)
Exemple #8
0
    def track_previous_frame(self, f_ref, f_cur):
        print('>>>> tracking previous frame ...')
        is_search_frame_by_projection_failure = False
        use_search_frame_by_projection = self.motion_model.is_ok and kUseSearchFrameByProjection and kUseMotionModel

        if use_search_frame_by_projection:
            # search frame by projection: match map points observed in f_ref with keypoints of f_cur
            print('search frame by projection')
            search_radius = Parameters.kMaxReprojectionDistanceFrame
            f_cur.reset_points()
            self.timer_seach_frame_proj.start()
            idxs_ref, idxs_cur, num_found_map_pts = search_frame_by_projection(
                f_ref,
                f_cur,
                max_reproj_distance=search_radius,
                max_descriptor_distance=self.descriptor_distance_sigma)
            self.timer_seach_frame_proj.refresh()
            self.num_matched_kps = len(idxs_cur)
            print("# matched map points in prev frame: %d " %
                  self.num_matched_kps)

            # if not enough map point matches consider a larger search radius
            if self.num_matched_kps < Parameters.kMinNumMatchedFeaturesSearchFrameByProjection:
                f_cur.remove_frame_views(idxs_cur)
                f_cur.reset_points()
                idxs_ref, idxs_cur, num_found_map_pts = search_frame_by_projection(
                    f_ref,
                    f_cur,
                    max_reproj_distance=2 * search_radius,
                    max_descriptor_distance=0.5 *
                    self.descriptor_distance_sigma)
                self.num_matched_kps = len(idxs_cur)
                Printer.orange(
                    "# matched map points in prev frame (wider search): %d " %
                    self.num_matched_kps)

            if kDebugDrawMatches and True:
                img_matches = draw_feature_matches(f_ref.img,
                                                   f_cur.img,
                                                   f_ref.kps[idxs_ref],
                                                   f_cur.kps[idxs_cur],
                                                   f_ref.sizes[idxs_ref],
                                                   f_cur.sizes[idxs_cur],
                                                   horizontal=False)
                cv2.imshow('tracking frame by projection - matches',
                           img_matches)
                cv2.waitKey(1)

            if self.num_matched_kps < Parameters.kMinNumMatchedFeaturesSearchFrameByProjection:
                f_cur.remove_frame_views(idxs_cur)
                f_cur.reset_points()
                is_search_frame_by_projection_failure = True
                Printer.red(
                    'Not enough matches in search frame by projection: ',
                    self.num_matched_kps)
            else:
                # search frame by projection was successful
                if kUseDynamicDesDistanceTh:
                    self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                        f_ref, f_cur, idxs_ref, idxs_cur)

                # store tracking info (for possible reuse)
                self.idxs_ref = idxs_ref
                self.idxs_cur = idxs_cur

                # f_cur pose optimization 1:
                # here, we use f_cur pose as first guess and exploit the matched map point of f_ref
                self.pose_optimization(f_cur, 'proj-frame-frame')
                # update matched map points; discard outliers detected in last pose optimization
                num_matched_points = f_cur.clean_outlier_map_points()
                print('     # num_matched_map_points: %d' %
                      (self.num_matched_map_points))
                #print('     # matched points: %d' % (num_matched_points) )

                if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackFrame:
                    Printer.red(
                        'failure in tracking previous frame, # matched map points: ',
                        self.num_matched_map_points)
                    self.pose_is_ok = False

        if not use_search_frame_by_projection or is_search_frame_by_projection_failure:
            self.track_reference_frame(f_ref, f_cur, 'match-frame-frame')
def search_frame_for_triangulation(kf1, kf2, idxs1=None, idxs2=None, 
                                   max_descriptor_distance=0.5*Parameters.kMaxDescriptorDistance):   
    idxs2_out = []
    idxs1_out = []
    num_found_matches = 0
    img2_epi = None     

    if __debug__:
        timer = Timer()
        timer.start()

    O1w = kf1.Ow
    O2w = kf2.Ow
    # compute epipoles
    e1,_ = kf1.project_point(O2w)  # in first frame 
    e2,_ = kf2.project_point(O1w)  # in second frame  
    #print('e1: ', e1)
    #print('e2: ', e2)    
    
    baseline = np.linalg.norm(O1w-O2w) 

    # if the translation is too small we cannot triangulate 
    # if baseline < Parameters.kMinTraslation:  # we assume the Inializer has been used for building the first map 
    #     Printer.red("search for triangulation: impossible with almost zero translation!")
    #     return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT
    # else:    
    medianDepth = kf2.compute_points_median_depth()
    if medianDepth == -1:
        Printer.orange("search for triangulation: f2 with no points")        
        medianDepth = kf1.compute_points_median_depth()        
    ratioBaselineDepth = baseline/medianDepth
    if ratioBaselineDepth < Parameters.kMinRatioBaselineDepth:  
        Printer.orange("search for triangulation: impossible with too low ratioBaselineDepth!")
        return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT        

    # compute the fundamental matrix between the two frames by using their estimated poses 
    F12, H21 = computeF12(kf1, kf2)

    if idxs1 is None or idxs2 is None:
        timerMatch = Timer()
        timerMatch.start()        
        idxs1, idxs2 = Frame.feature_matcher.match(kf1.des, kf2.des)        
        print('search_frame_for_triangulation - matching - timer: ', timerMatch.elapsed())        
    
    rot_histo = RotationHistogram()
    check_orientation = kCheckFeaturesOrientation and Frame.oriented_features     
        
    # check epipolar constraints 
    for i1,i2 in zip(idxs1,idxs2):
        if kf1.get_point_match(i1) is not None or kf2.get_point_match(i2) is not None: # we are searching for keypoint matches where both keypoints do not have a corresponding map point 
            #print('existing point on match')
            continue 
        
        descriptor_dist = Frame.descriptor_distance(kf1.des[i1], kf2.des[i2])
        if descriptor_dist > max_descriptor_distance:
            continue     
        
        kp1 = kf1.kpsu[i1]
        #kp1_scale_factor = Frame.feature_manager.scale_factors[kf1.octaves[i1]]
        #kp1_size = f1.sizes[i1]
        # discard points which are too close to the epipole            
        #if np.linalg.norm(kp1-e1) < Parameters.kMinDistanceFromEpipole * kp1_scale_factor:                 
        #if np.linalg.norm(kp1-e1) - kp1_size < Parameters.kMinDistanceFromEpipole:  # N.B.: this is too much conservative => it filters too much                                       
        #    continue   
        
        kp2 = kf2.kpsu[i2]
        kp2_scale_factor = Frame.feature_manager.scale_factors[kf2.octaves[i2]]        
        # kp2_size = f2.sizes[i2]        
        # discard points which are too close to the epipole            
        delta = kp2-e2        
        #if np.linalg.norm(delta) < Parameters.kMinDistanceFromEpipole * kp2_scale_factor:   
        if np.inner(delta,delta) < kMinDistanceFromEpipole2 * kp2_scale_factor:   # OR.            
        # #if np.linalg.norm(delta) - kp2_size < Parameters.kMinDistanceFromEpipole:  # N.B.: this is too much conservative => it filters too much                                                  
             continue           
        
        # check epipolar constraint         
        sigma2_kp2 = Frame.feature_manager.level_sigmas2[kf2.octaves[i2]]
        if check_dist_epipolar_line(kp1,kp2,F12,sigma2_kp2):
            idxs1_out.append(i1)
            idxs2_out.append(i2)
            
            if check_orientation:
                index_match = len(idxs1_out)-1
                rot = kf1.angles[i1]-kf2.angles[i2]
                rot_histo.push(rot,index_match)            
        #else:
        #    print('discarding point match non respecting epipolar constraint')
         
    if check_orientation:            
        valid_match_idxs = rot_histo.get_valid_idxs()     
        #print('checking orientation consistency - valid matches % :', len(valid_match_idxs)/max(1,len(idxs1_out))*100,'% of ', len(idxs1_out),'matches')
        #print('rotation histogram: ', rot_histo)
        idxs1_out = np.array(idxs1_out)[valid_match_idxs]
        idxs2_out = np.array(idxs2_out)[valid_match_idxs]
                 
    num_found_matches = len(idxs1_out)
             
    if __debug__:
        print('search_frame_for_triangulation - timer: ', timer.elapsed())

    return idxs1_out, idxs2_out, num_found_matches, img2_epi
Exemple #10
0
    def add_points(self,
                   points3d,
                   mask_pts3d,
                   kf1,
                   kf2,
                   idxs1,
                   idxs2,
                   img1,
                   do_check=True,
                   cos_max_parallax=Parameters.kCosMaxParallax):
        with self._lock:
            assert (kf1.is_keyframe
                    and kf2.is_keyframe)  # kf1 and kf2 must be keyframes
            assert (points3d.shape[0] == len(idxs1))
            assert (len(idxs2) == len(idxs1))

            added_points = []
            out_mask_pts3d = np.full(points3d.shape[0], False, dtype=bool)
            if mask_pts3d is None:
                mask_pts3d = np.full(points3d.shape[0], True, dtype=bool)

            ratio_scale_consistency = Parameters.kScaleConsistencyFactor * Frame.feature_manager.scale_factor

            if do_check:
                # project points
                uvs1, depths1 = kf1.project_points(points3d)
                bad_depths1 = depths1 <= 0
                uvs2, depths2 = kf2.project_points(points3d)
                bad_depths2 = depths2 <= 0

                # compute back-projected rays (unit vectors)
                rays1 = np.dot(kf1.Rwc, add_ones(kf1.kpsn[idxs1]).T).T
                norm_rays1 = np.linalg.norm(rays1, axis=-1, keepdims=True)
                rays1 /= norm_rays1
                rays2 = np.dot(kf2.Rwc, add_ones(kf2.kpsn[idxs2]).T).T
                norm_rays2 = np.linalg.norm(rays2, axis=-1, keepdims=True)
                rays2 /= norm_rays2

                # compute dot products of rays
                cos_parallaxs = np.sum(rays1 * rays2, axis=1)
                bad_cos_parallaxs = np.logical_or(
                    cos_parallaxs < 0, cos_parallaxs > cos_max_parallax)

                # compute reprojection errors
                errs1 = uvs1 - kf1.kpsu[idxs1]
                errs1_sqr = np.sum(errs1 * errs1,
                                   axis=1)  # squared reprojection errors
                kps1_levels = kf1.octaves[idxs1]
                invSigmas2_1 = Frame.feature_manager.inv_level_sigmas2[
                    kps1_levels]
                chis2_1 = errs1_sqr * invSigmas2_1  # chi square
                bad_chis2_1 = chis2_1 > Parameters.kChi2Mono  # chi-square 2 DOFs  (Hartley Zisserman pg 119)

                errs2 = uvs2 - kf2.kpsu[idxs2]
                errs2_sqr = np.sum(errs2 * errs2,
                                   axis=1)  # squared reprojection errors
                kps2_levels = kf2.octaves[idxs2]
                invSigmas2_2 = Frame.feature_manager.inv_level_sigmas2[
                    kps2_levels]
                chis2_2 = errs2_sqr * invSigmas2_2  # chi square
                bad_chis2_2 = chis2_2 > Parameters.kChi2Mono  # chi-square 2 DOFs  (Hartley Zisserman pg 119)

                # scale consistency
                scale_factors_x_depths1 = Frame.feature_manager.scale_factors[
                    kps1_levels] * depths1
                scale_factors_x_depths1_x_ratio_scale_consistency = scale_factors_x_depths1 * ratio_scale_consistency
                scale_factors_x_depths2 = Frame.feature_manager.scale_factors[
                    kps2_levels] * depths2
                scale_factors_x_depths2_x_ratio_scale_consistency = scale_factors_x_depths2 * ratio_scale_consistency
                bad_scale_consistency = np.logical_or(
                    (scale_factors_x_depths1 >
                     scale_factors_x_depths2_x_ratio_scale_consistency),
                    (scale_factors_x_depths2 >
                     scale_factors_x_depths1_x_ratio_scale_consistency))

                # combine all checks
                bad_points = bad_cos_parallaxs | bad_depths1 | bad_depths2 | bad_chis2_1 | bad_chis2_2 | bad_scale_consistency

            img_coords = np.rint(kf1.kps[idxs1]).astype(
                np.intp)  # image keypoints coordinates
            # build img patches coordinates
            delta = Parameters.kColorPatchDelta
            patch_extension = 1 + 2 * delta  # patch_extension x patch_extension
            img_pts_start = img_coords - delta
            img_pts_end = img_coords + delta
            img_ranges = np.linspace(img_pts_start,
                                     img_pts_end,
                                     patch_extension,
                                     dtype=np.intp)[:, :].T

            def img_range_elem(ranges, i):
                return ranges[:, i]

            for i, p in enumerate(points3d):
                if not mask_pts3d[i]:
                    #print('p[%d] not good' % i)
                    continue

                idx1_i = idxs1[i]
                idx2_i = idxs2[i]

                # perform different required checks before adding the point
                if do_check:
                    if bad_points[i]:
                        continue

                    # check parallax is large enough (this is going to filter out all points when the inter-frame motion is almost zero)
                    # ray1 = np.dot(kf1.Rwc, add_ones_1D(kf1.kpsn[idx1_i]))
                    # ray2 = np.dot(kf2.Rwc, add_ones_1D(kf2.kpsn[idx2_i]))
                    # cos_parallax = ray1.dot(ray2) / (np.linalg.norm(ray1) * np.linalg.norm(ray2))
                    # if cos_parallax < 0 or cos_parallax > cos_max_parallax:
                    #     #print('p[',i,']: ',p,' not enough parallax: ', cos_parallaxs[i])
                    #     continue

                    # check points are visible on f1
                    # uv1, depth1 = kf1.project_point(p)
                    # is_visible1  = kf1.is_in_image(uv1, depth1) # N.B.: is_in_image() check is redundant since we check the reproj errror
                    # if not is_visible1:
                    #     continue

                    # check points are visible on f2
                    # uv2, depth2 = kf2.project_point(p)
                    # is_visible2  = kf2.is_in_image(uv2, depth2)  # N.B.: is_in_image() check is redundant since we check the reproj errror
                    # if not is_visible2:
                    #     continue

                    # check reprojection error on f1
                    #kp1_level = kf1.octaves[idx1_i]
                    #invSigma2_1 = Frame.feature_manager.inv_level_sigmas2[kp1_level]
                    # err1 = uvs1[i] - kf1.kpsu[idx1_i]
                    # chi2_1 = np.inner(err1,err1)*invSigma2_1
                    # if chi2_1 > Parameters.kChi2Mono: # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                    #     continue

                    # check reprojection error on f2
                    # kp2_level = kf2.octaves[idx2_i]
                    # invSigma2_2 = Frame.feature_manager.inv_level_sigmas2[kp2_level]
                    # err2 = uvs2[i] - kf2.kpsu[idx2_i]
                    # chi2_2 = np.inner(err2,err2)*invSigma2_2
                    # if chi2_2 > Parameters.kChi2Mono: # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                    #     continue

                    #check scale consistency
                    # scale_factor_x_depth1 =  Frame.feature_manager.scale_factors[kps1_levels[i]] * depths1[i]
                    # scale_factor_x_depth2 =  Frame.feature_manager.scale_factors[kps2_levels[i]] * depths2[i]
                    # if (scale_factor_x_depth1 > scale_factor_x_depth2*ratio_scale_consistency) or \
                    #    (scale_factor_x_depth2 > scale_factor_x_depth1*ratio_scale_consistency):
                    #     continue

                # get the color of the point
                try:
                    #color = img1[int(round(kf1.kps[idx1_i, 1])), int(round(kf1.kps[idx1_i, 0]))]
                    #img_pt = np.rint(kf1.kps[idx1_i]).astype(np.int)
                    # color at the point
                    #color = img1[img_pt[1],img_pt[0]]
                    # buils color patch
                    #color_patch = img1[img_pt[1]-delta:img_pt[1]+delta,img_pt[0]-delta:img_pt[0]+delta]
                    #color = color_patch.mean(axis=0).mean(axis=0)  # compute the mean color in the patch

                    # average color in a (1+2*delta) x (1+2*delta) patch
                    #pt_start = img_pts_start[i]
                    #pt_end   = img_pts_end[i]
                    #color_patch = img1[pt_start[1]:pt_end[1],pt_start[0]:pt_end[0]]

                    # average color in a (1+2*delta) x (1+2*delta) patch
                    img_range = img_range_elem(img_ranges, i)
                    color_patch = img1[img_range[1][:, np.newaxis],
                                       img_range[0]]
                    #print('color_patch.shape:',color_patch.shape)

                    color = cv2.mean(
                        color_patch)[:3]  # compute the mean color in the patch
                except IndexError:
                    Printer.orange('color out of range')
                    color = (255, 0, 0)

                # add the point to this map
                mp = MapPoint(p[0:3], color, kf2, idx2_i)
                self.add_point(mp)  # add point to this map
                mp.add_observation(kf1, idx1_i)
                mp.add_observation(kf2, idx2_i)
                mp.update_info()
                out_mask_pts3d[i] = True
                added_points.append(mp)
            return len(added_points), out_mask_pts3d, added_points
Exemple #11
0
from enum import Enum

from scipy.spatial import cKDTree
#from pykdtree.kdtree import KDTree # slower!

from utils_sys import Printer, import_from
from utils_geom import add_ones, s1_diff_deg, s1_dist_deg, l2_distances

ORBextractor = import_from('orbslam2_features', 'ORBextractor')  
      
kPySlamUtilsAvailable= True      
try:
    import pyslam_utils
except:
    kPySlamUtilsAvailable = False 
    Printer.orange('WARNING: cannot import pyslam_utils')
      
from parameters import Parameters 



# convert matrix of pts into list of cv2 keypoints
def convert_pts_to_keypoints(pts, size=1): 
    kps = []
    if pts is not None: 
        if pts.ndim > 2:
            # convert matrix [Nx1x2] of pts into list of keypoints  
            kps = [ cv2.KeyPoint(p[0][0], p[0][1], _size=size) for p in pts ]          
        else: 
            # convert matrix [Nx2] of pts into list of keypoints  
            kps = [ cv2.KeyPoint(p[0], p[1], _size=size) for p in pts ]                      
Exemple #12
0
    def initialize(self, f_cur, img_cur):

        if self.num_failures > kNumOfFailuresAfterWichNumMinTriangulatedPointsIsHalved:
            self.num_min_triangulated_points = 0.5 * Parameters.kInitializerNumMinTriangulatedPoints
            self.num_failures = 0
            Printer.orange(
                'Initializer: halved min num triangulated features to ',
                self.num_min_triangulated_points)

        # prepare the output
        out = InitializerOutput()
        is_ok = False

        #print('num frames: ', len(self.frames))

        # if too many frames have passed, move the current idx_f_ref forward
        # this is just one very simple policy that can be used
        if self.f_ref is not None:
            if f_cur.id - self.f_ref.id > kMaxIdDistBetweenIntializingFrames:
                self.f_ref = self.frames[-1]  # take last frame in the buffer
                #self.idx_f_ref = len(self.frames)-1  # take last frame in the buffer
                #self.idx_f_ref = self.frames.index(self.f_ref)  # since we are using a deque, the code of the previous commented line is not valid anymore
                #print('*** idx_f_ref:',self.idx_f_ref)
        #self.f_ref = self.frames[self.idx_f_ref]
        f_ref = self.f_ref
        #print('ref fid: ',self.f_ref.id,', curr fid: ', f_cur.id, ', idxs_ref: ', self.idxs_ref)

        # append current frame
        self.frames.append(f_cur)

        # if the current frames do no have enough features exit
        if len(f_ref.kps) < self.num_min_features or len(
                f_cur.kps) < self.num_min_features:
            Printer.red('Inializer: ko - not enough features!')
            self.num_failures += 1
            return out, is_ok

        # find keypoint matches
        idxs_cur, idxs_ref = match_frames(f_cur, f_ref,
                                          kFeatureMatchRatioTestInitializer)

        print('|------------')
        #print('deque ids: ', [f.id for f in self.frames])
        print('initializing frames ', f_cur.id, ', ', f_ref.id)
        print("# keypoint matches: ", len(idxs_cur))

        Trc = self.estimatePose(f_ref.kpsn[idxs_ref], f_cur.kpsn[idxs_cur])
        Tcr = inv_T(Trc)  # Tcr w.r.t. ref frame
        f_ref.update_pose(np.eye(4))
        f_cur.update_pose(Tcr)

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        print('# keypoint inliers: ', self.num_inliers)
        idx_cur_inliers = idxs_cur[mask_idxs]
        idx_ref_inliers = idxs_ref[mask_idxs]

        # create a temp map for initializing
        map = Map()
        f_ref.reset_points()
        f_cur.reset_points()

        #map.add_frame(f_ref)
        #map.add_frame(f_cur)

        kf_ref = KeyFrame(f_ref)
        kf_cur = KeyFrame(f_cur, img_cur)
        map.add_keyframe(kf_ref)
        map.add_keyframe(kf_cur)

        pts3d, mask_pts3d = triangulate_normalized_points(
            kf_cur.Tcw, kf_ref.Tcw, kf_cur.kpsn[idx_cur_inliers],
            kf_ref.kpsn[idx_ref_inliers])

        new_pts_count, mask_points, _ = map.add_points(
            pts3d,
            mask_pts3d,
            kf_cur,
            kf_ref,
            idx_cur_inliers,
            idx_ref_inliers,
            img_cur,
            do_check=True,
            cos_max_parallax=Parameters.kCosMaxParallaxInitializer)
        print("# triangulated points: ", new_pts_count)

        if new_pts_count > self.num_min_triangulated_points:
            err = map.optimize(verbose=False,
                               rounds=20,
                               use_robust_kernel=True)
            print("init optimization error^2: %f" % err)

            num_map_points = len(map.points)
            print("# map points:   %d" % num_map_points)
            is_ok = num_map_points > self.num_min_triangulated_points

            out.pts = pts3d[mask_points]
            out.kf_cur = kf_cur
            out.idxs_cur = idx_cur_inliers[mask_points]
            out.kf_ref = kf_ref
            out.idxs_ref = idx_ref_inliers[mask_points]

            # set scene median depth to equal desired_median_depth'
            desired_median_depth = Parameters.kInitializerDesiredMedianDepth
            median_depth = kf_cur.compute_points_median_depth(out.pts)
            depth_scale = desired_median_depth / median_depth
            print('forcing current median depth ', median_depth, ' to ',
                  desired_median_depth)

            out.pts[:, :3] = out.pts[:, :3] * depth_scale  # scale points
            tcw = kf_cur.tcw * depth_scale  # scale initial baseline
            kf_cur.update_translation(tcw)

        map.delete()

        if is_ok:
            Printer.green('Inializer: ok!')
        else:
            self.num_failures += 1
            Printer.red('Inializer: ko!')
        print('|------------')
        return out, is_ok