def main(argv=None): # pylint: disable=unused-argument """Program entrance.""" # create sift detector. sift_wrapper = SiftWrapper(n_sample=FLAGS.max_kpt_num) sift_wrapper.half_sigma = FLAGS.half_sigma sift_wrapper.pyr_off = FLAGS.pyr_off sift_wrapper.ori_off = FLAGS.ori_off sift_wrapper.create() # create deep feature extractor. Printer.yellow('loading model:',FLAGS.model_path,'...') graph = load_frozen_model(FLAGS.model_path, print_nodes=False) #sess = tf.Session(graph=graph) Printer.yellow('...done') with tf.Session(graph=graph, config=config) as sess: # extract deep feature from images. deep_feat1, cv_kpts1, img1 = extract_deep_features( sift_wrapper, sess, FLAGS.img1_path, qtz=False) deep_feat2, cv_kpts2, img2 = extract_deep_features( sift_wrapper, sess, FLAGS.img2_path, qtz=False) # match features by OpenCV brute-force matcher (CPU). matcher_wrapper = MatcherWrapper() # the ratio criterion is set to 0.89 for GeoDesc as described in the paper. deep_good_matches, deep_mask = matcher_wrapper.get_matches( deep_feat1, deep_feat2, cv_kpts1, cv_kpts2, ratio=0.89, cross_check=True, info='deep') deep_display = matcher_wrapper.draw_matches( img1, cv_kpts1, img2, cv_kpts2, deep_good_matches, deep_mask) # compare with SIFT. if FLAGS.cf_sift: sift_feat1 = sift_wrapper.compute(img1, cv_kpts1) sift_feat2 = sift_wrapper.compute(img2, cv_kpts2) sift_good_matches, sift_mask = matcher_wrapper.get_matches( sift_feat1, sift_feat2, cv_kpts1, cv_kpts2, ratio=0.80, cross_check=True, info='sift') sift_display = matcher_wrapper.draw_matches( img1, cv_kpts1, img2, cv_kpts2, sift_good_matches, sift_mask) display = np.concatenate((sift_display, deep_display), axis=0) else: display = deep_display cv2.imshow('display', display) cv2.waitKey() sess.close()
def track(self, img, frame_id, timestamp=None): Printer.cyan('@tracking') time_start = time.time() # check image size is coherent with camera params print("img.shape: ", img.shape) print("camera ", self.camera.height, " x ", self.camera.width) assert img.shape[0:2] == (self.camera.height, self.camera.width) if timestamp is not None: print('timestamp: ', timestamp) self.timer_main_track.start() # build current frame self.timer_frame.start() f_cur = Frame(img, self.camera, timestamp=timestamp) self.f_cur = f_cur print("frame: ", f_cur.id) self.timer_frame.refresh() # reset indexes of matches self.idxs_ref = [] self.idxs_cur = [] if self.state == SlamState.NO_IMAGES_YET: # push first frame in the inizializer self.intializer.init(f_cur) self.state = SlamState.NOT_INITIALIZED return # EXIT (jump to second frame) if self.state == SlamState.NOT_INITIALIZED: # try to inizialize initializer_output, intializer_is_ok = self.intializer.initialize( f_cur, img) if intializer_is_ok: kf_ref = initializer_output.kf_ref kf_cur = initializer_output.kf_cur # add the two initialized frames in the map self.map.add_frame( kf_ref) # add first frame in map and update its frame id self.map.add_frame( kf_cur) # add second frame in map and update its frame id # add the two initialized frames as keyframes in the map self.map.add_keyframe( kf_ref) # add first keyframe in map and update its kid self.map.add_keyframe( kf_cur) # add second keyframe in map and update its kid kf_ref.init_observations() kf_cur.init_observations() # add points in map new_pts_count, _, _ = self.map.add_points( initializer_output.pts, None, kf_cur, kf_ref, initializer_output.idxs_cur, initializer_output.idxs_ref, img, do_check=False) Printer.green("map: initialized %d new points" % (new_pts_count)) # update covisibility graph connections kf_ref.update_connections() kf_cur.update_connections() # update tracking info self.f_cur = kf_cur self.f_cur.kf_ref = kf_ref self.kf_ref = kf_cur # set reference keyframe self.kf_last = kf_cur # set last added keyframe self.map.local_map.update(self.kf_ref) self.state = SlamState.OK self.update_tracking_history() self.motion_model.update_pose(kf_cur.timestamp, kf_cur.position, kf_cur.quaternion) self.motion_model.is_ok = False # after initialization you cannot use motion model for next frame pose prediction (time ids of initialized poses may not be consecutive) self.intializer.reset() if kUseDynamicDesDistanceTh: self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat( kf_ref, kf_cur, initializer_output.idxs_ref, initializer_output.idxs_cur) return # EXIT (jump to next frame) # get previous frame in map as reference f_ref = self.map.get_frame(-1) #f_ref_2 = self.map.get_frame(-2) self.f_ref = f_ref # add current frame f_cur to map self.map.add_frame(f_cur) self.f_cur.kf_ref = self.kf_ref # reset pose state flag self.pose_is_ok = False with self.map.update_lock: # check for map point replacements in previous frame f_ref (some points might have been replaced by local mapping during point fusion) self.f_ref.check_replaced_map_points() if kUseDynamicDesDistanceTh: print('descriptor_distance_sigma: ', self.descriptor_distance_sigma) self.local_mapping.descriptor_distance_sigma = self.descriptor_distance_sigma # udpdate (velocity) old motion model # c1=ref_ref, c2=ref, c3=cur; c=cur, r=ref #self.velocity = np.dot(f_ref.pose, inv_T(f_ref_2.pose)) # Tc2c1 = Tc2w * Twc1 (predicted Tcr) #self.predicted_pose = g2o.Isometry3d(np.dot(self.velocity, f_ref.pose)) # Tc3w = Tc2c1 * Tc2w (predicted Tcw) # set intial guess for current pose optimization if kUseMotionModel and self.motion_model.is_ok: print('using motion model for next pose prediction') # update f_ref pose according to its reference keyframe (the pose of the reference keyframe could be updated by local mapping) self.f_ref.update_pose( self.tracking_history.relative_frame_poses[-1] * self.f_ref.kf_ref.isometry3d) # predict pose by using motion model self.predicted_pose, _ = self.motion_model.predict_pose( timestamp, self.f_ref.position, self.f_ref.orientation) f_cur.update_pose(self.predicted_pose) else: print('setting f_cur.pose <-- f_ref.pose') # use reference frame pose as initial guess f_cur.update_pose(f_ref.pose) # track camera motion from f_ref to f_cur self.track_previous_frame(f_ref, f_cur) if not self.pose_is_ok: # if previous track didn't go well then track the camera motion from kf_ref to f_cur self.track_keyframe(self.kf_ref, f_cur) # now, having a better estimate of f_cur pose, we can find more map point matches: # find matches between {local map points} (points in the local map) and {unmatched keypoints of f_cur} if self.pose_is_ok: self.track_local_map(f_cur) # end block {with self.map.update_lock:} # TODO: add relocalization # HACK: since local mapping is not fast enough in python (and tracking is not in real-time) => give local mapping more time to process stuff self.wait_for_local_mapping( ) # N.B.: this must be outside the `with self.map.update_lock:` block with self.map.update_lock: # update slam state if self.pose_is_ok: self.state = SlamState.OK else: self.state = SlamState.LOST Printer.red('tracking failure') # update motion model state self.motion_model.is_ok = self.pose_is_ok if self.pose_is_ok: # if tracking was successful # update motion model self.motion_model.update_pose(timestamp, f_cur.position, f_cur.quaternion) f_cur.clean_vo_map_points() # do we need a new KeyFrame? need_new_kf = self.need_new_keyframe(f_cur) if need_new_kf: Printer.green('adding new KF with frame id % d: ' % (f_cur.id)) if kLogKFinfoToFile: self.kf_info_logger.info( 'adding new KF with frame id % d: ' % (f_cur.id)) kf_new = KeyFrame(f_cur, img) self.kf_last = kf_new self.kf_ref = kf_new f_cur.kf_ref = kf_new self.local_mapping.push_keyframe(kf_new) if not kLocalMappingOnSeparateThread: self.local_mapping.do_local_mapping() else: Printer.yellow('NOT KF') # From ORBSLAM2: # Clean outliers once keyframe generation has been managed: # we allow points with high innovation (considered outliers by the Huber Function) # pass to the new keyframe, so that bundle adjustment will finally decide # if they are outliers or not. We don't want next frame to estimate its position # with those points so we discard them in the frame. f_cur.clean_outlier_map_points() if self.f_cur.kf_ref is None: self.f_cur.kf_ref = self.kf_ref self.update_tracking_history( ) # must stay after having updated slam state (self.state) Printer.green("map: %d points, %d keyframes" % (self.map.num_points(), self.map.num_keyframes())) #self.update_history() self.timer_main_track.refresh() duration = time.time() - time_start print('tracking duration: ', duration)