def __init__(self, params): self.params = params self.tracker = Tracking(params) self.motion_model = MotionModel() self.graph = CovisibilityGraph() self.mapping = MappingThread(self.graph, params) self.loop_closing = LoopClosing(self, params) self.loop_correction = None self.reference = None # reference keyframe self.preceding = None # last keyframe self.current = None # current frame self.status = defaultdict(bool)
def __init__(self, params, cam): self.params = params self.cam = cam self.motion_model = MotionModel() self.map = Map() self.preceding = None # last keyframe self.current = None # current frame self.status = defaultdict(bool) self.optimizer = BundleAdjustment() self.bundle_adjustment = LocalBA() self.min_measurements = params.pnp_min_measurements self.max_iterations = params.pnp_max_iterations self.timer = RunningAverageTimer() self.lines = True
class SPTAM(object): def __init__(self, params): self.params = params self.tracker = Tracking(params) self.motion_model = MotionModel() self.graph = CovisibilityGraph() self.mapping = MappingThread(self.graph, params) self.loop_closing = LoopClosing(self, params) self.loop_correction = None self.reference = None # reference keyframe self.preceding = None # last keyframe self.current = None # current frame self.status = defaultdict(bool) def stop(self): self.mapping.stop() if self.loop_closing is not None: self.loop_closing.stop() def initialize(self, frame): mappoints, measurements = frame.triangulate() assert len(mappoints) >= self.params.init_min_points, ( 'Not enough points to initialize map.') keyframe = frame.to_keyframe() keyframe.set_fixed(True) self.graph.add_keyframe(keyframe) self.mapping.add_measurements(keyframe, mappoints, measurements) if self.loop_closing is not None: self.loop_closing.add_keyframe(keyframe) self.reference = keyframe self.preceding = keyframe self.current = keyframe self.status['initialized'] = True self.motion_model.update_pose( frame.timestamp, frame.position, frame.orientation) def track(self, frame): while self.is_paused(): time.sleep(1e-4) self.set_tracking(True) self.current = frame print('Tracking:', frame.idx, ' <- ', self.reference.id, self.reference.idx) predicted_pose, _ = self.motion_model.predict_pose(frame.timestamp) frame.update_pose(predicted_pose) if self.loop_closing is not None: if self.loop_correction is not None: estimated_pose = g2o.Isometry3d( frame.orientation, frame.position) estimated_pose = estimated_pose * self.loop_correction frame.update_pose(estimated_pose) self.motion_model.apply_correction(self.loop_correction) self.loop_correction = None local_mappoints = self.filter_points(frame) measurements = frame.match_mappoints( local_mappoints, Measurement.Source.TRACKING) print('measurements:', len(measurements), ' ', len(local_mappoints)) tracked_map = set() for m in measurements: mappoint = m.mappoint mappoint.update_descriptor(m.get_descriptor()) mappoint.increase_measurement_count() tracked_map.add(mappoint) try: self.reference = self.graph.get_reference_frame(tracked_map) pose = self.tracker.refine_pose(frame.pose, frame.cam, measurements) frame.update_pose(pose) self.motion_model.update_pose( frame.timestamp, pose.position(), pose.orientation()) tracking_is_ok = True except: tracking_is_ok = False print('tracking failed!!!') if tracking_is_ok and self.should_be_keyframe(frame, measurements): print('new keyframe', frame.idx) keyframe = frame.to_keyframe() keyframe.update_reference(self.reference) keyframe.update_preceding(self.preceding) self.mapping.add_keyframe(keyframe, measurements) if self.loop_closing is not None: self.loop_closing.add_keyframe(keyframe) self.preceding = keyframe self.set_tracking(False) def filter_points(self, frame): local_mappoints = self.graph.get_local_map_v2( [self.preceding, self.reference])[0] can_view = frame.can_view(local_mappoints) print('filter points:', len(local_mappoints), can_view.sum(), len(self.preceding.mappoints()), len(self.reference.mappoints())) checked = set() filtered = [] for i in np.where(can_view)[0]: pt = local_mappoints[i] if pt.is_bad(): continue pt.increase_projection_count() filtered.append(pt) checked.add(pt) for reference in set([self.preceding, self.reference]): for pt in reference.mappoints(): # neglect can_view test if pt in checked or pt.is_bad(): continue pt.increase_projection_count() filtered.append(pt) return filtered def should_be_keyframe(self, frame, measurements): if self.adding_keyframes_stopped(): return False n_matches = len(measurements) n_matches_ref = len(self.reference.measurements()) print('keyframe check:', n_matches, ' ', n_matches_ref) return ((n_matches / n_matches_ref) < self.params.min_tracked_points_ratio) or n_matches < 20 def set_loop_correction(self, T): self.loop_correction = T def is_initialized(self): return self.status['initialized'] def pause(self): self.status['paused'] = True def unpause(self): self.status['paused'] = False def is_paused(self): return self.status['paused'] def is_tracking(self): return self.status['tracking'] def set_tracking(self, status): self.status['tracking'] = status def stop_adding_keyframes(self): self.status['adding_keyframes_stopped'] = True def resume_adding_keyframes(self): self.status['adding_keyframes_stopped'] = False def adding_keyframes_stopped(self): return self.status['adding_keyframes_stopped']
class SPTAM(object): """ - The interaction between tracking and mapping is through keyframes and CovisibilityGraph Args: object ([type]): [description] """ def __init__(self, params): self.params = params self.tracker = Tracking(params) self.motion_model = MotionModel() self.graph = CovisibilityGraph() self.mapping = MappingThread(self.graph, params) self.loop_closing = LoopClosing(self, params) self.loop_correction = None self.reference = None # reference keyframe which contains the most local map points self.preceding = None # last keyframe self.current = None # current frame self.status = defaultdict(bool) def stop(self): self.mapping.stop() if self.loop_closing is not None: self.loop_closing.stop() def initialize(self, frame): """Use stereo triangulation to initialize the map Args: frame (StereoFrame): new incoming stereo frames(with feature extracted) """ mappoints, measurements = frame.triangulate() assert len(mappoints) >= self.params.init_min_points, ( 'Not enough points to initialize map.') # The first frame is always KF keyframe = frame.to_keyframe() # The first keyframe should be fixed keyframe.set_fixed(True) self.graph.add_keyframe(keyframe) # All the measurements and mappoints are anchored to this very keyframe self.mapping.add_measurements(keyframe, mappoints, measurements) if self.loop_closing is not None: self.loop_closing.add_keyframe(keyframe) self.reference = keyframe self.preceding = keyframe self.current = keyframe self.status['initialized'] = True self.motion_model.update_pose(frame.timestamp, frame.position, frame.orientation) def track(self, frame): """ - Step 1: predict the pose with constant velocity model to get predicted_pose - Step 2: use the sptam.preceding and self.reference frames as seed frame to extract the local map points (sptam.filter_points(frame)), which can be viewed within current frame with the initial pose estimation - Step 3: Find the 2D image matchings with 3D map points' feature descriptors. Also update the feature descriptor for the matched 3D map points to inprove the long term tracking capability - Step 4: Update the self.reference frame by querying the graph to find which frame has containts the most of current local map points set - Step 5: Do a motion only BA to refine the current frame pose - Step 6: Promote current frame to be KF if - a. number of matched 3D map points is less than 20 - b. ration between matched 3D map points in current frame vs reference frame is less than a threhsold """ while self.is_paused(): time.sleep(1e-4) self.set_tracking(True) self.current = frame print('Tracking:', frame.idx, ' <- ', self.reference.id, self.reference.idx) # Step 1: predict the pose predicted_pose, _ = self.motion_model.predict_pose(frame.timestamp) frame.update_pose(predicted_pose) if self.loop_closing is not None: if self.loop_correction is not None: estimated_pose = g2o.Isometry3d(frame.orientation, frame.position) estimated_pose = estimated_pose * self.loop_correction frame.update_pose(estimated_pose) self.motion_model.apply_correction(self.loop_correction) self.loop_correction = None # Step 2: find the local map points using self.reference and self.preceding # frame as seed local_mappoints = self.filter_points(frame) # Step 3: find the matching of the 3D map points in current image with descriptor measurements = frame.match_mappoints(local_mappoints, Measurement.Source.TRACKING) print('measurements:', len(measurements), ' ', len(local_mappoints)) tracked_map = set() # Update the map point feature descripotr for m in measurements: mappoint = m.mappoint mappoint.update_descriptor(m.get_descriptor()) mappoint.increase_measurement_count() tracked_map.add(mappoint) try: # Find which KF contains the most seedpoints self.reference = self.graph.get_reference_frame(tracked_map) pose = self.tracker.refine_pose(frame.pose, frame.cam, measurements) frame.update_pose(pose) self.motion_model.update_pose(frame.timestamp, pose.position(), pose.orientation()) tracking_is_ok = True except: tracking_is_ok = False print('tracking failed!!!') if tracking_is_ok and self.should_be_keyframe(frame, measurements): print('new keyframe', frame.idx) keyframe = frame.to_keyframe() keyframe.update_reference(self.reference) keyframe.update_preceding(self.preceding) self.mapping.add_keyframe(keyframe, measurements) if self.loop_closing is not None: self.loop_closing.add_keyframe(keyframe) self.preceding = keyframe self.set_tracking(False) def filter_points(self, frame): """Use the preceding and reference frame as seeds to extrat the local 3D map points. - Step 1: Use preceding and reference as seed to get a set of local 3D map points - Step 2: Remove the map points which cannot be viewed by the current frame with the initial estimate pose - Step 3: Add the 3d points in the preceding and reference frames into the list """ # get local 3D map points local_mappoints = self.graph.get_local_map_v2( [self.preceding, self.reference])[0] # Check whether those map points are within the frustrum of current view point can_view = frame.can_view(local_mappoints) print('filter points:', len(local_mappoints), can_view.sum(), len(self.preceding.mappoints()), len(self.reference.mappoints())) checked = set() filtered = [] for i in np.where(can_view)[0]: pt = local_mappoints[i] if pt.is_bad(): continue pt.increase_projection_count() filtered.append(pt) checked.add(pt) # Add the 3D map points with in the preceding and reference frames # into the local map points for reference in set([self.preceding, self.reference]): for pt in reference.mappoints(): # neglect can_view test if pt in checked or pt.is_bad(): continue pt.increase_projection_count() filtered.append(pt) return filtered def should_be_keyframe(self, frame, measurements): if self.adding_keyframes_stopped(): return False n_matches = len(measurements) n_matches_ref = len(self.reference.measurements()) print('keyframe check:', n_matches, ' ', n_matches_ref) return ((n_matches / n_matches_ref) < self.params.min_tracked_points_ratio) or n_matches < 20 def set_loop_correction(self, T): self.loop_correction = T def is_initialized(self): return self.status['initialized'] def pause(self): self.status['paused'] = True def unpause(self): self.status['paused'] = False def is_paused(self): return self.status['paused'] def is_tracking(self): return self.status['tracking'] def set_tracking(self, status): self.status['tracking'] = status def stop_adding_keyframes(self): self.status['adding_keyframes_stopped'] = True def resume_adding_keyframes(self): self.status['adding_keyframes_stopped'] = False def adding_keyframes_stopped(self): return self.status['adding_keyframes_stopped']
class Tracker(object): def __init__(self, params, cam): self.params = params self.cam = cam self.motion_model = MotionModel() self.map = Map() self.preceding = None # last keyframe self.current = None # current frame self.status = defaultdict(bool) self.optimizer = BundleAdjustment() self.bundle_adjustment = LocalBA() self.min_measurements = params.pnp_min_measurements self.max_iterations = params.pnp_max_iterations self.timer = RunningAverageTimer() self.lines = True def initialize(self, frame): keyframe = frame.to_keyframe() mappoints, measurements = keyframe.create_mappoints_from_triangulation( ) assert len(mappoints) >= self.params.init_min_points, ( 'Not enough points to initialize map.') keyframe.set_fixed(True) self.extend_graph(keyframe, mappoints, measurements) if self.lines: maplines, line_measurements = keyframe.create_maplines_from_triangulation( ) print(f'Initialized {len(maplines)} lines') for mapline, measurement in zip(maplines, line_measurements): self.map.add_mapline(mapline) self.map.add_line_measurement(keyframe, mapline, measurement) keyframe.add_measurement(measurement) mapline.add_measurement(measurement) self.preceding = keyframe self.current = keyframe self.status['initialized'] = True self.motion_model.update_pose(frame.timestamp, frame.position, frame.orientation) # def clear_optimizer(self): # # Calling optimizer.clear() doesn't fully clear for some reason # # This prevents running time from scaling linearly with the number of frames # self.optimizer = BundleAdjustment() # self.bundle_adjustment = LocalBA() def refine_pose(self, pose, cam, measurements): assert len(measurements) >= self.min_measurements, ( 'Not enough points') self.optimizer = BundleAdjustment() self.optimizer.add_pose(0, pose, cam, fixed=False) for i, m in enumerate(measurements): self.optimizer.add_point(i, m.mappoint.position, fixed=True) self.optimizer.add_edge(0, i, 0, m) self.optimizer.optimize(self.max_iterations) return self.optimizer.get_pose(0) def update(self, i, left_img, right_img, timestamp): # Feature extraction takes 0.12s origin = g2o.Isometry3d() left_frame = Frame(i, origin, self.cam, self.params, left_img, timestamp) right_frame = Frame(i, self.cam.compute_right_camera_pose(origin), self.cam, self.params, right_img, timestamp) frame = StereoFrame(left_frame, right_frame) if i == 0: self.initialize(frame) return # All code in this functions below takes 0.05s self.current = frame predicted_pose, _ = self.motion_model.predict_pose(frame.timestamp) frame.update_pose(predicted_pose) # Get mappoints and measurements take 0.013s local_mappoints = self.get_local_map_points(frame) print(local_mappoints) if len(local_mappoints) == 0: print('Nothing in local_mappoints! Exiting.') exit() measurements = frame.match_mappoints(local_mappoints) # local_maplines = self.get_local_map_lines(frame) # line_measurements = frame.match_maplines(local_maplines) # Refined pose takes 0.02s try: pose = self.refine_pose(frame.pose, self.cam, measurements) frame.update_pose(pose) self.motion_model.update_pose(frame.timestamp, pose.position(), pose.orientation()) tracking_is_ok = True except: tracking_is_ok = False print('tracking failed!!!') if tracking_is_ok and self.should_be_keyframe(frame, measurements): # Keyframe creation takes 0.03s self.create_new_keyframe(frame) # self.optimize_map() def optimize_map(self): """ Python doesn't really work with the multithreading model, so just putting optimization on the main thread """ adjust_keyframes = self.map.search_adjust_keyframes() # Set data time increases with iterations! # self.timer = RunningAverageTimer() self.bundle_adjustment = LocalBA() self.bundle_adjustment.optimizer.set_verbose(True) # with self.timer: self.bundle_adjustment.set_data(adjust_keyframes, []) self.bundle_adjustment.optimize(2) self.bundle_adjustment.update_poses() self.bundle_adjustment.update_points() def extend_graph(self, keyframe, mappoints, measurements): self.map.add_keyframe(keyframe) for mappoint, measurement in zip(mappoints, measurements): self.map.add_mappoint(mappoint) self.map.add_point_measurement(keyframe, mappoint, measurement) keyframe.add_measurement(measurement) mappoint.add_measurement(measurement) def create_new_keyframe(self, frame): keyframe = frame.to_keyframe() keyframe.update_preceding(self.preceding) mappoints, measurements = keyframe.create_mappoints_from_triangulation( ) self.extend_graph(keyframe, mappoints, measurements) if self.lines: maplines, line_measurements = keyframe.create_maplines_from_triangulation( ) frame.visualise_measurements(line_measurements) print(f'New Keyframe with {len(maplines)} lines') for mapline, measurement in zip(maplines, line_measurements): self.map.add_mapline(mapline) self.map.add_line_measurement(keyframe, mapline, measurement) keyframe.add_measurement(measurement) mapline.add_measurement(measurement) self.preceding = keyframe def get_local_map_points(self, frame): checked = set() filtered = [] # Add in map points from preceding and reference for pt in self.preceding.mappoints(): # neglect can_view test # if pt in checked or pt.is_bad(): # print('bad') # continue pt.increase_projection_count() filtered.append(pt) return filtered def get_local_map_lines(self, frame): checked = set() filtered = [] # Add in map points from preceding and reference for ln in self.preceding.maplines(): # neglect can_view test if ln in checked or ln.is_bad(): continue ln.increase_projection_count() filtered.append(ln) return filtered def should_be_keyframe(self, frame, measurements): n_matches = len(measurements) n_matches_ref = len(self.preceding.measurements()) return ((n_matches / n_matches_ref) < self.params.min_tracked_points_ratio) or n_matches < 20
class SPTAM(object): # Initialize the variables # Set connection to the three threads: Tracking, Local Mapping, Loop Closure # Tracking: Detect the robots position and create a keyframe of comparison between 3D points and 2D map # Local Mapping: Refine 3D and 2D comparison, minimize the reprojection error and remove bad points # Loop Closure: Detect revisited places, estimate the relative transformation and correct the keyframes and map features def __init__(self, params): self.params = params # Set thread Tracking self.tracker = Tracking(params) self.motion_model = MotionModel() # Set thread Local Mapping self.graph = CovisibilityGraph() self.mapping = MappingThread(self.graph, params) # Set thread Loop Closure self.loop_closing = LoopClosing(self, params) self.loop_correction = None # Set the keyframe self.reference = None # reference keyframe self.preceding = None # last keyframe self.current = None # current frame self.status = defaultdict(bool) # Stop Loop Closure, if already executed def stop(self): self.mapping.stop() if self.loop_closing is not None: self.loop_closing.stop() def initialize(self, frame): # Create initial map mappoints, measurements = frame.triangulate() assert len(mappoints) >= self.params.init_min_points, ( 'Not enough points to initialize map.') # Create initial keyframe from initial map keyframe = frame.to_keyframe() keyframe.set_fixed(True) self.graph.add_keyframe(keyframe) self.mapping.add_measurements(keyframe, mappoints, measurements) if self.loop_closing is not None: self.loop_closing.add_keyframe(keyframe) # Set reference, preceding and current keyframe to initial keyframe self.reference = keyframe self.preceding = keyframe self.current = keyframe self.status['initialized'] = True # Set initial pose of the robot self.motion_model.update_pose(frame.timestamp, frame.position, frame.orientation) # THREAD - TRACKING def track(self, frame): # While robot is not moving, wait while self.is_paused(): time.sleep(1e-4) # When robot is moving, start tracking self.set_tracking(True) # STEP - FEATURE EXTRACTION: Capture the actual frame of the 3D world self.current = frame print('Tracking:', frame.idx, ' <- ', self.reference.id, self.reference.idx) # STEP - POSE PREDICTION: Predict the current position of the robot predicted_pose, _ = self.motion_model.predict_pose(frame.timestamp) frame.update_pose(predicted_pose) # While step Loop Closing, correct current pose if self.loop_closing is not None: if self.loop_correction is not None: # Use g2o for pose graph optimization estimated_pose = g2o.Isometry3d(frame.orientation, frame.position) # Create copy of the frame and execute correction estimated_pose = estimated_pose * self.loop_correction frame.update_pose(estimated_pose) self.motion_model.apply_correction(self.loop_correction) self.loop_correction = None # STEP - MATCHING: Project map points and search for matches in the neighbourhood local_mappoints = self.filter_points(frame) measurements = frame.match_mappoints(local_mappoints, Measurement.Source.TRACKING) print('measurements:', len(measurements), ' ', len(local_mappoints)) # Use BRISK descriptor to describe the features of the points # Compare the descriptors between map point and features tracked_map = set() for m in measurements: mappoint = m.mappoint mappoint.update_descriptor(m.get_descriptor()) mappoint.increase_measurement_count() tracked_map.add(mappoint) # STEP - POSE REFINEMENT: first get actual pose try: self.reference = self.graph.get_reference_frame(tracked_map) # Compare the previous camera pose with the relative motion in the current, local frame pose = self.tracker.refine_pose(frame.pose, frame.cam, measurements) # Update the pose frame.update_pose(pose) self.motion_model.update_pose(frame.timestamp, pose.position(), pose.orientation()) tracking_is_ok = True except: tracking_is_ok = False print('tracking failed!!!') # STEP - KEYFRAME SELECTION if tracking_is_ok and self.should_be_keyframe(frame, measurements): print('new keyframe', frame.idx) keyframe = frame.to_keyframe() keyframe.update_reference(self.reference) keyframe.update_preceding(self.preceding) # Set new keyframe self.mapping.add_keyframe(keyframe, measurements) # THREAD - LOOP CLOSURE # Add the keyframe to see if this place already has been visited if self.loop_closing is not None: self.loop_closing.add_keyframe(keyframe) self.preceding = keyframe self.set_tracking(False) # Helping method for STEP - MATCHING def filter_points(self, frame): # Project the mappoints local_mappoints = self.graph.get_local_map_v2( [self.preceding, self.reference])[0] # Set current view points can_view = frame.can_view(local_mappoints) print('filter points:', len(local_mappoints), can_view.sum(), len(self.preceding.mappoints()), len(self.reference.mappoints())) checked = set() filtered = [] # Check if points are in current frame, if yes increase count for i in np.where(can_view)[0]: pt = local_mappoints[i] if pt.is_bad(): continue pt.increase_projection_count() filtered.append(pt) checked.add(pt) # Get all references for this points for reference in set([self.preceding, self.reference]): for pt in reference.mappoints(): # neglect can_view test if pt in checked or pt.is_bad(): continue pt.increase_projection_count() filtered.append(pt) # Return filtered map points that are in the current view return filtered # Helping method for STEP - KEYFRAME SELECTION def should_be_keyframe(self, frame, measurements): if self.adding_keyframes_stopped(): return False # Set actual matches and the ones of the current keyframe n_matches = len(measurements) n_matches_ref = len(self.reference.measurements()) print('keyframe check:', n_matches, ' ', n_matches_ref) # Set only as new keyframe, if minimum number of tracked points ratio is fullfilled # or if the current keyframe has less than 20 matches return ((n_matches / n_matches_ref) < self.params.min_tracked_points_ratio) or n_matches < 20 # THREAD - LOOP CLOSURE # STEP - LOOP CORRECTION def set_loop_correction(self, T): self.loop_correction = T # Other helping methods def is_initialized(self): return self.status['initialized'] def pause(self): self.status['paused'] = True def unpause(self): self.status['paused'] = False def is_paused(self): return self.status['paused'] def is_tracking(self): return self.status['tracking'] def set_tracking(self, status): self.status['tracking'] = status def stop_adding_keyframes(self): self.status['adding_keyframes_stopped'] = True def resume_adding_keyframes(self): self.status['adding_keyframes_stopped'] = False def adding_keyframes_stopped(self): return self.status['adding_keyframes_stopped']