def __init__(self, params, sensor_type): config_file = params["SLAM.settings_path"] vocab_file = params["SLAM.vocab_path"] # check the existence of the configuration file if not os.path.exists(config_file): raise FileNotFoundError(config_file + " not found") if not os.path.exists(vocab_file): raise FileNotFoundError(vocab_file + " not found") # ORB_Slam2 don't have the imu interfaces so the STEREO_IMU and # MONOCULAR_IMU are mapped in the STEREO and MONOCULAR sensor if sensor_type == Sensor.MONOCULAR or sensor_type == Sensor.MONOCULAR_IMU: print("the input sensor select is MONOCULAR") self.slam = orbslam2.System(vocab_file, config_file, orbslam2.Sensor.MONOCULAR) self.sensor_type = Sensor.MONOCULAR if sensor_type == Sensor.STEREO or sensor_type == Sensor.STEREO_IMU: print("the input sensor select is STEREO") self.slam = orbslam2.System(vocab_file, config_file, orbslam2.Sensor.STEREO) self.sensor_type = Sensor.STEREO if sensor_type == Sensor.RGBD: print("the input sensor select is RGBD") self.slam = orbslam2.System(vocab_file, config_file, orbslam2.Sensor.RGBD) self.sensor_type = sensor_type self.slam.set_use_viewer(False) self.slam.initialize()
def main(args): sequence = [] with open(os.path.join(args.seq, 'rgb_left.txt'), 'r') as f_left, open(os.path.join(args.seq, 'rgb_right.txt'), 'r') as f_right: for line_left, line_right in zip(f_left, f_right): line_left, line_right = line_left.strip(), line_right.strip() if line_left.startswith('#'): continue ts, img_left_path = line_left.split() _, img_right_path = line_right.split() sequence.append((float(ts), os.path.join(args.seq, img_left_path), os.path.join(args.seq, img_right_path))) slam = orbslam2.System(args.vocab, args.config, orbslam2.Sensor.STEREO) slam.set_use_viewer(True) slam.initialize() for ts, path_left, path_right in sequence: img_left = cv2.imread(path_left) img_right = cv2.imread(path_right) slam.process_image_stereo(img_left, img_right, ts) sleep(0.1) save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown()
def main(vocab_path, settings_path): slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) slam.set_use_viewer(True) slam.initialize() camera = Camera.instance(width=960, height=540, capture_width=1280, capture_height=720) print('-----') print('Start processing sequence ...') ts = 0 #cv2.namedWindow("test") while (True): img = camera.value.copy() t1 = time.time() slam.process_image_mono(img, float(ts)) t2 = time.time() ttrack = t2 - t1 print("frame id:" + str(ts), ttrack) time.sleep(0.01) ts += 1 save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown() return 0
def __init__( self, vocab_path="./configs/ORBvoc.txt", slam_settings_path="./configs/terrasentia_orb.yaml", visualize=False, ): super().__init__("orbslam2") self.slam = orbslam2.System(vocab_path, slam_settings_path, orbslam2.Sensor.MONOCULAR) self.slam.set_use_viewer(visualize) self.slam.initialize() self.slam.reset() self.start_time = self.get_clock().now().to_msg() # This makes it so NO data is dropped, a large buffer will occur # This is necessary because the map building doesn't have to be # meeting any safety critical times. q = QoSProfile(history=2) self.subscription = self.create_subscription( Image, "/terrasentia/usb_cam_node/image_raw", self.update_internal_state, qos_profile=q, ) self.publisher = self.create_publisher(PoseStamped, "pose", qos_profile=q) self.bridge = CvBridge() self.get_logger().info("ORBSLAM2 succesfully started")
def main(args): sequence = [] with open(os.path.join(args.seq, 'rgb_left.txt'), 'r') as f_left, open(os.path.join(args.seq, 'depth.txt'), 'r') as f_depth: for line_left, line_depth in zip(f_left, f_depth): line_left, line_depth = line_left.strip(), line_depth.strip() if line_left.startswith('#'): continue ts, img_left_path = line_left.split() _, depth_path = line_depth.split() sequence.append((float(ts), os.path.join(args.seq, img_left_path), os.path.join(args.seq, depth_path))) slam = orbslam2.System(args.vocab, args.config, orbslam2.Sensor.RGBD) slam.set_use_viewer(True) slam.initialize() for ts, path_left, path_depth in sequence: img_left = cv2.imread(path_left) depth = cv2.imread(path_depth, cv2.IMREAD_UNCHANGED) slam.process_image_rgbd(img_left, depth, ts) sleep(0.1) save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown()
def orbslam(imgs, timestamps, vocab, settings): num_images = len(imgs) slam = orbslam2.System(vocab, settings, orbslam2.Sensor.MONOCULAR) slam.set_use_viewer(False) slam.initialize() times_track = [0 for _ in range(num_images)] print('-----') print('Start processing sequence ...') print('Images in the sequence: {0}'.format(num_images)) for idx in tnrange(num_images): image = cv2.imread(imgs[idx], cv2.IMREAD_UNCHANGED) tframe = timestamps[idx] if image is None: print("failed to load image at {0}".format(imgs[idx])) break t1 = time.time() slam.process_image_mono(image, tframe) t2 = time.time() ttrack = t2 - t1 times_track[idx] = ttrack t = 0 if idx < num_images - 1: t = timestamps[idx + 1] - tframe elif idx > 0: t = tframe - timestamps[idx - 1] if ttrack < t: time.sleep(t - ttrack) times_track = sorted(times_track) total_time = sum(times_track) print('-----') print('median tracking time: {0}'.format(times_track[num_images // 2])) print('mean tracking time: {0}'.format(total_time / num_images)) tmp = np.expand_dims([0, 0, 0, 1], axis=0) #convert pose and inverse pose into 4x4 matrices pose = np.array(slam.get_keyframe_poses()) tframe = [t[0] for t in pose] pose = [np.concatenate((f[1:].reshape(3, 4), tmp), axis=0) for f in pose] inverse_pose = np.array(slam.get_inverse_keyframe_poses()) inverse_pose = [ np.concatenate((f[1:].reshape(3, 4), tmp), axis=0) for f in inverse_pose ] points = [np.array(frame) for frame in slam.get_keyframe_points()] slam.shutdown() return points, pose, inverse_pose, tframe
def __init__( self, config, device=torch.device("cuda:0"), monocheckpoint="habitat_baselines/slambased/data/mp3d_resnet50.pth", ): super(ORBSLAM2MonodepthAgent, self).__init__(config) self.num_actions = config.NUM_ACTIONS self.dist_threshold_to_stop = config.DIST_TO_STOP self.slam_vocab_path = config.SLAM_VOCAB_PATH assert os.path.isfile(self.slam_vocab_path) self.slam_settings_path = config.SLAM_SETTINGS_PATH assert os.path.isfile(self.slam_settings_path) self.slam = orbslam2.System( self.slam_vocab_path, self.slam_settings_path, orbslam2.Sensor.RGBD ) self.slam.set_use_viewer(False) self.slam.initialize() self.device = device self.map_size_meters = config.MAP_SIZE self.map_cell_size = config.MAP_CELL_SIZE self.pos_th = config.DIST_REACHED_TH self.next_wp_th = config.NEXT_WAYPOINT_TH self.angle_th = config.ANGLE_TH self.obstacle_th = config.MIN_PTS_IN_OBSTACLE self.depth_denorm = config.DEPTH_DENORM self.planned_waypoints = [] self.mapper = DirectDepthMapper( camera_height=config.CAMERA_HEIGHT, near_th=config.D_OBSTACLE_MIN, far_th=config.D_OBSTACLE_MAX, h_min=config.H_OBSTACLE_MIN, h_max=config.H_OBSTACLE_MAX, map_size=config.MAP_SIZE, map_cell_size=config.MAP_CELL_SIZE, device=device, ) self.planner = DifferentiableStarPlanner( max_steps=config.PLANNER_MAX_STEPS, preprocess=config.PREPROCESS_MAP, beta=config.BETA, device=device, ) self.slam_to_world = 1.0 self.timestep = 0.1 self.timing = False self.checkpoint = monocheckpoint if not os.path.isfile(self.checkpoint): mp3d_url = "http://cmp.felk.cvut.cz/~mishkdmy/navigation/mp3d_ft_monodepth_resnet50.pth" # suncg_me_url = "http://cmp.felk.cvut.cz/~mishkdmy/navigation/suncg_me_resnet.pth" # suncg_mf_url = "http://cmp.felk.cvut.cz/~mishkdmy/navigation/suncg_mf_resnet.pth" url = mp3d_url print("No monodepth checkpoint found. Downloading...", url) download(url, self.checkpoint) self.monodepth = MonoDepthEstimator(self.checkpoint) self.reset() return
def Initialize_mapping_calibration(self, disparity_bool=True, slam_bool=False, file_capture=False, fisheye=True): # Initialize the mapping and the disparity matcher, to be called once and outside the loop #The mapping for the correction, careful always give the left image to the left parameters # and the right to the right parameters if fisheye: self.map1l, self.map2l = cv.fisheye.initUndistortRectifyMap( self.K_l, self.D_l, self.R_l, self.P_l, (self.w, self.h), cv.CV_32FC1) self.map1r, self.map2r = cv.fisheye.initUndistortRectifyMap( self.K_r, self.D_r, self.R_r, self.P_r, (self.w, self.h), cv.CV_32FC1) else: self.map1l, self.map2l = cv.initUndistortRectifyMap( self.K_l, self.D_l, self.R_l, self.P_l, (self.w, self.h), cv.CV_32FC1) self.map1r, self.map2r = cv.initUndistortRectifyMap( self.K_r, self.D_r, self.R_r, self.P_r, (self.w, self.h), cv.CV_32FC1) if disparity_bool: # The accuracy and the range of the disparity depends on these parameters self.left_matcher = cv.StereoSGBM_create( minDisparity=-self._minDisparity, numDisparities=16 * self.a, # max_disp has to be dividable by 16 f. E. HH 192, 256 blockSize=self._blockSize, P1=8 * 3 * self.window_size** 2, # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely P2=32 * 3 * self.window_size**2, disp12MaxDiff=self._disp12MaxDiff, uniquenessRatio=self._uniquenessRatio, speckleWindowSize=self._speckleWindowSize, speckleRange=self._speckleRange, preFilterCap=self._preFilterCap, mode=cv.STEREO_SGBM_MODE_SGBM_3WAY) #Though this seems unecessary, it lowers the computation of the disparity map wls_filter = cv.ximgproc.createDisparityWLSFilter( matcher_left=self.left_matcher) if slam_bool: vocab_path = "Parameters/ORBvoc.txt" settings_path = "Parameters/EuRoC.yaml" self.slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.STEREO) self.slam.set_use_viewer(True) self.slam.initialize() if file_capture: self.f = open("Data.txt", "w+")
def main(vocab_path, settings_path, sequence_path, association_path): rgb_filenames, depth_filenames, timestamps = load_images(association_path) num_images = len(timestamps) slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.RGBD) slam.set_use_viewer(True) slam.initialize() times_track = [0 for _ in range(num_images)] print('-----') print('Start processing sequence ...') print('Images in the sequence: {0}'.format(num_images)) for idx in range(num_images): rgb_image = cv2.imread(os.path.join(sequence_path, rgb_filenames[idx]), cv2.IMREAD_UNCHANGED) depth_image = cv2.imread( os.path.join(sequence_path, depth_filenames[idx]), cv2.IMREAD_UNCHANGED) tframe = timestamps[idx] if rgb_image is None: print("failed to load image at {0}".format(rgb_filenames[idx])) return 1 if depth_image is None: print("failed to depth at {0}".format(depth_filenames[idx])) return 1 t1 = time.time() slam.process_image_rgbd(rgb_image, depth_image, tframe) t2 = time.time() ttrack = t2 - t1 times_track[idx] = ttrack t = 0 if idx < num_images - 1: t = timestamps[idx + 1] - tframe elif idx > 0: t = tframe - timestamps[idx - 1] if ttrack < t: time.sleep(t - ttrack) save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown() times_track = sorted(times_track) total_time = sum(times_track) print('-----') print('median tracking time: {0}'.format(times_track[num_images // 2])) print('mean tracking time: {0}'.format(total_time / num_images)) return 0
def main(vocab_path, settings_path, path_to_image_folder, path_to_times_file, save_file='trajectory.txt', with_time=False): image_filenames, timestamps = load_images(path_to_image_folder, path_to_times_file) num_images = len(image_filenames) slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) slam.set_use_viewer(True) slam.initialize() times_track = [0 for _ in range(num_images)] print('-----') print('Start processing sequence ...') print('Images in the sequence: {0}'.format(num_images)) for idx in range(num_images): image = cv2.imread(image_filenames[idx], cv2.IMREAD_UNCHANGED) tframe = timestamps[idx] if image is None: print("failed to load image at {0}".format(image_filenames[idx])) return 1 t1 = time.time() slam.process_image_mono(image, tframe) t2 = time.time() ttrack = t2 - t1 times_track[idx] = ttrack t = 0 if idx < num_images - 1: t = timestamps[idx + 1] - tframe elif idx > 0: t = tframe - timestamps[idx - 1] if ttrack < t: time.sleep(t - ttrack) # save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') save_trajectory(slam.get_trajectory_points(), save_file, with_time=with_time) slam.shutdown() times_track = sorted(times_track) total_time = sum(times_track) print('-----') print('median tracking time: {0}'.format(times_track[num_images // 2])) print('mean tracking time: {0}'.format(total_time / num_images)) return 0
def __init__(self, config, device=torch.device("cuda:0")): self.num_actions = config.NUM_ACTIONS self.dist_threshold_to_stop = config.DIST_TO_STOP self.slam_vocab_path = config.SLAM_VOCAB_PATH assert os.path.isfile(self.slam_vocab_path) self.slam_settings_path = config.SLAM_SETTINGS_PATH assert os.path.isfile(self.slam_settings_path) self.slam = orbslam2.System(self.slam_vocab_path, self.slam_settings_path, orbslam2.Sensor.RGBD) self.slam.set_use_viewer(False) self.slam.initialize() self.device = device self.map_size_meters = config.MAP_SIZE self.map_cell_size = config.MAP_CELL_SIZE self.pos_th = config.DIST_REACHED_TH self.next_wp_th = config.NEXT_WAYPOINT_TH self.angle_th = config.ANGLE_TH self.obstacle_th = config.MIN_PTS_IN_OBSTACLE self.depth_denorm = config.DEPTH_DENORM self.planned_waypoints = [] self.mapper = DirectDepthMapper( camera_height=config.CAMERA_HEIGHT, near_th=config.D_OBSTACLE_MIN, far_th=config.D_OBSTACLE_MAX, h_min=config.H_OBSTACLE_MIN, h_max=config.H_OBSTACLE_MAX, map_size=config.MAP_SIZE, map_cell_size=config.MAP_CELL_SIZE, device=device, ) self.planner = DifferentiableStarPlanner( max_steps=config.PLANNER_MAX_STEPS, preprocess=config.PREPROCESS_MAP, beta=config.BETA, device=device, ) self.slam_to_world = 1.0 self.timestep = 0.1 self.timing = False self.reset() # print('self', self, dir(self)) for attr_name in dir(self): if '__' not in attr_name: print(attr_name, getattr(self, attr_name), type(getattr(self, attr_name))) type_name = str(type(getattr(self, attr_name))) if 'numpy' in type_name or 'ensor' in type_name: print(getattr(getattr(self, attr_name), 'shape')) return
def __init__(self, fps=10., scale=20., height=40., online=False, settings_path='carla.yaml', vocab_path='~/ORB_SLAM2/Vocabulary/ORBvoc.txt'): self.slam = orbslam2.System(os.path.expanduser(vocab_path), settings_path, orbslam2.Sensor.MONOCULAR) self.slam.set_use_viewer(True) self.slam.initialize() self.fps = fps self.scale = scale self.height = height self.prev_line = None self.online = online self.reset_between = False self.num_train_images = 0 self.blank_image = np.zeros(0) self.tframe = 0 self.coord_transform = lambda x: x
def main(args): sequence = [] with open(os.path.join(args.seq, 'rgb_left.txt'), 'r') as f: for line in f: line = line.strip() if line.startswith('#'): continue ts, img_path = line.split() sequence.append((float(ts), os.path.join(args.seq, img_path))) slam = orbslam2.System(args.vocab, args.config, orbslam2.Sensor.MONOCULAR) slam.set_use_viewer(True) slam.initialize() for ts, path in sequence: img = cv2.imread(path) slam.process_image_mono(img, ts) sleep(0.1) save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown()
def Initialize_Realsense(self, slam_bool=False, file_capture=False): # Configure depth and color streams self.pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, self.w, self.h, rs.format.z16, 60) config.enable_stream(rs.stream.color, self.w, self.h, rs.format.bgr8, 60) # Start streaming self.pipeline.start(config) align_to = rs.stream.color self.align = rs.align(align_to) if slam_bool: vocab_path = "Parameters/ORBvoc.txt" settings_path = "Parameters/Realsense.yaml" self.slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.RGBD) self.slam.set_use_viewer(True) self.slam.initialize() if file_capture: self.f = open("Data.txt", "w+")
def __init__(self, config, follower, device=torch.device("cuda:0")): self.slam_vocab_path = config.SLAM_VOCAB_PATH assert os.path.isfile(self.slam_vocab_path) self.slam_settings_path = config.SLAM_SETTINGS_PATH assert os.path.isfile(self.slam_settings_path) self.slam = orbslam2.System(self.slam_vocab_path, self.slam_settings_path, orbslam2.Sensor.RGBD) self.slam.set_use_viewer(False) self.slam.initialize() self.device = device self.map_size_meters = config.MAP_SIZE self.map_cell_size = config.MAP_CELL_SIZE self.obstacle_th = config.MIN_PTS_IN_OBSTACLE self.depth_denorm = config.DEPTH_DENORM self.mapper = DirectDepthMapper( camera_height=config.CAMERA_HEIGHT, near_th=config.D_OBSTACLE_MIN, far_th=config.D_OBSTACLE_MAX, h_min=config.H_OBSTACLE_MIN, h_max=config.H_OBSTACLE_MAX, map_size=config.MAP_SIZE, map_cell_size=config.MAP_CELL_SIZE, device=device, ) self.slam_to_world = 1.0 self.timestep = 0.1 self.timing = False self.follower = follower self.reset() return
def main(vocab_path, settings_path, sequence_path, map_file): rgb_filenames, timestamps = load_images(sequence_path) num_images = len(timestamps) slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) slam.set_use_viewer(True) slam.set_map_file(map_file) # set mapfile slam.initialize() times_track = [0 for _ in range(num_images)] print('-----') print('Start processing sequence ...') print('Images in the sequence: {0}'.format(num_images)) for idx in range(num_images): image = cv2.imread(os.path.join(sequence_path, rgb_filenames[idx]), cv2.IMREAD_UNCHANGED) tframe = timestamps[idx] if image is None: print("failed to load image at {0}".format(os.path.join(sequence_path, rgb_filenames[idx]))) return 1 t1 = time.time() slam.process_image_mono(image, tframe) t2 = time.time() ttrack = t2 - t1 times_track[idx] = ttrack t = 0 if idx < num_images - 1: t = timestamps[idx + 1] - tframe elif idx > 0: t = tframe - timestamps[idx - 1] if ttrack < t: time.sleep(t - ttrack) slam.save_map("saved_maps/map_1.bin") slam.reset_and_load_map("saved_maps/map_1.bin") for idx in range(num_images): image = cv2.imread(os.path.join(sequence_path, rgb_filenames[idx]), cv2.IMREAD_UNCHANGED) tframe = timestamps[idx] if image is None: print("failed to load image at {0}".format(os.path.join(sequence_path, rgb_filenames[idx]))) return 1 t1 = time.time() slam.process_image_mono(image, tframe) t2 = time.time() print(slam.get_tracking_state()) ttrack = t2 - t1 times_track[idx] = ttrack t = 0 if idx < num_images - 1: t = timestamps[idx + 1] - tframe elif idx > 0: t = tframe - timestamps[idx - 1] if ttrack < t: time.sleep(t - ttrack) slam.save_map("saved_maps/map_2.bin") save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown() times_track = sorted(times_track) total_time = sum(times_track) print('-----') print('median tracking time: {0}'.format(times_track[num_images // 2])) print('mean tracking time: {0}'.format(total_time / num_images)) return 0
def __init__(self, tonic: Tonic, vocab_path, settings_path): self.tonic = tonic self.slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) self.prevtime = None self.to_world = Pose3DtoWorld()
def main(vocab_path, settings_path, sequence_path, masktp): rgb_filenames, timestamps = load_images(sequence_path) num_images = len(timestamps) slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) # slam.set_use_viewer(True) slam.set_use_viewer(False) slam.initialize() times_track = [0 for _ in range(num_images)] print('-----') print('Start processing sequence ...') print('Images in the sequence: {0}'.format(num_images)) for idx in range(num_images): image = cv2.imread(os.path.join(sequence_path, rgb_filenames[idx]), cv2.IMREAD_UNCHANGED) image2 = seg_process(image, masktp) ''' x,y = process_img(image,[640,360],device,model.cuda(), masktp) z = img_as_ubyte(y) z = cv2.resize(z, (image.shape[1],image.shape[0])) gz = cv2.cvtColor(z, cv2.COLOR_BGR2GRAY) image2 = cv2.bitwise_and(image, image, mask=gz) cv2.imshow('img2',z) print("hi2") k = cv2.waitKey(1) if k == 27: exit(0) ''' tframe = timestamps[idx] if image is None: print("failed to load image at {0}".format( os.path.join(sequence_path, rgb_filenames[idx]))) return 1 t1 = time.time() slam.process_image_mono(image, tframe) t2 = time.time() ttrack = t2 - t1 times_track[idx] = ttrack t = 0 if idx < num_images - 1: t = timestamps[idx + 1] - tframe elif idx > 0: t = tframe - timestamps[idx - 1] if ttrack < t: time.sleep((t - ttrack) * 0.0004) save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown() times_track = sorted(times_track) total_time = sum(times_track) print('-----') print('median tracking time: {0}'.format(times_track[num_images // 2])) print('mean tracking time: {0}'.format(total_time / num_images)) return 0
type=str, default=None, help="path of the map to be loaded") parser.add_argument('--start', type=int, default=0, help="Number of starting frame") parser.add_argument('--end', type=int, default=None, help="Number of ending frame") args = parser.parse_args() vocab_path = args.vocab_path settings_path = args.settings_path data_tuple = read_folder_as_tuples(args.images_path) slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) slam.set_use_viewer(True) slam.initialize() slam.osmap_init() filenames, timestamps = load_images(args.images_path) #slam.activate_localisation_only() first_datapoint = data_tuple[args.start] slam.process_image_mono(first_datapoint[0], first_datapoint[1]) new_ids = [] if args.end is None: end = len(data_tuple) - 1 elif abs(args.start - args.end) < 10: print("WARGNING!!! the number of usable frames is less than 10!") else: end = args.end if args.load_map is not None:
def __init__(self, vocab_path, settings_path): self.slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR)
def run_orbslam(output_queue, input_queue, vocab_file, settings_file, mode): """ Actually run the orbslam system. This is done in a separate process to isolate memory leaks, and in case it crashes. :param output_queue: :param input_queue: :param vocab_file: :param settings_file: :param mode: :return: """ import orbslam2 import logging import trials.slam.tracking_state logging.getLogger(__name__).info("Starting ORBSLAM2...") sensor_mode = orbslam2.Sensor.RGBD if mode == SensorMode.MONOCULAR: sensor_mode = orbslam2.Sensor.MONOCULAR elif mode == SensorMode.STEREO: sensor_mode = orbslam2.Sensor.STEREO tracking_stats = {} orbslam_system = orbslam2.System(vocab_file, settings_file, sensor_mode) orbslam_system.set_use_viewer(False) orbslam_system.initialize() output_queue.put( True ) # Tell the parent process we've set-up correctly and are ready to go. logging.getLogger(__name__).info("ORBSLAM2 Ready.") running = True while running: in_data = input_queue.get(block=True) if isinstance(in_data, tuple) and len(in_data) == 3: img1, img2, timestamp = in_data if mode == SensorMode.MONOCULAR: orbslam_system.process_image_mono(img1, timestamp) elif mode == SensorMode.STEREO: orbslam_system.process_image_stereo(img1, img2, timestamp) elif mode == SensorMode.RGBD: orbslam_system.process_image_rgbd(img1, img2, timestamp) tracking_state = orbslam_system.get_tracking_state() if (tracking_state == orbslam2.TrackingState.SYSTEM_NOT_READY or tracking_state == orbslam2.TrackingState.NO_IMAGES_YET or tracking_state == orbslam2.TrackingState.NOT_INITIALIZED): tracking_stats[ timestamp] = trials.slam.tracking_state.TrackingState.NOT_INITIALIZED elif tracking_state == orbslam2.TrackingState.OK: tracking_stats[ timestamp] = trials.slam.tracking_state.TrackingState.OK else: tracking_stats[ timestamp] = trials.slam.tracking_state.TrackingState.LOST else: # Non-matching input indicates the end of processing, stop the main loop logging.getLogger(__name__).info( "Got terminate input, finishing up and sending results.") running = False # send the final trajectory to the parent output_queue.put((orbslam_system.get_trajectory_points(), tracking_stats)) # shut down the system. This is going to crash it, but that's ok, because it's a subprocess orbslam_system.shutdown() logging.getLogger(__name__).info("Finished running ORBSLAM2")
def main(vocab_path, settings_path, path_to_left_folder, path_to_right_folder, path_to_times_file): left_filenames, right_filenames, timestamps = load_images( path_to_left_folder, path_to_right_folder, path_to_times_file) # read stereo rectification parameters from settings m1l, m2l, m1r, m2r = load_stereo_rectification(settings_path) num_images = len(left_filenames) slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.STEREO) slam.set_use_viewer(True) slam.initialize() times_track = [0 for _ in range(num_images)] print('-----') print('Start processing sequence ...') print('Images in the sequence: {0}'.format(num_images)) for idx in range(num_images): left_image = cv2.imread(left_filenames[idx], cv2.IMREAD_UNCHANGED) right_image = cv2.imread(right_filenames[idx], cv2.IMREAD_UNCHANGED) tframe = timestamps[idx] if left_image is None: print("failed to load image at {0}".format(left_filenames[idx])) return 1 if right_image is None: print("failed to load image at {0}".format(right_filenames[idx])) return 1 left_image = cv2.remap(left_image, m1l, m2l, cv2.INTER_LINEAR) right_image = cv2.remap(right_image, m1r, m2r, cv2.INTER_LINEAR) t1 = time.time() slam.process_image_stereo(left_image, right_image, tframe) t2 = time.time() ttrack = t2 - t1 times_track[idx] = ttrack t = 0 if idx < num_images - 1: t = timestamps[idx + 1] - tframe elif idx > 0: t = tframe - timestamps[idx - 1] if ttrack < t: time.sleep(t - ttrack) save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') slam.shutdown() times_track = sorted(times_track) total_time = sum(times_track) print('-----') print('median tracking time: {0}'.format(times_track[num_images // 2])) print('mean tracking time: {0}'.format(total_time / num_images)) return 0
def main(vocab_path, settings_path, sequence_path, map_file): rgb_filenames, timestamps = load_images(sequence_path) num_images = len(timestamps) slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.MONOCULAR) slam.set_use_viewer(True) slam.initialize() times_track = [0 for _ in range(num_images)] print('-----') print('Start processing sequence ...') print('Images in the sequence: {0}'.format(num_images)) iterations = 0 while iterations < 2: slam.reset() frame_count = 0 for idx in range(num_images): image = cv2.imread(os.path.join(sequence_path, rgb_filenames[idx]), cv2.IMREAD_UNCHANGED) tframe = timestamps[idx] if image is None: print("failed to load image at {0}".format( os.path.join(sequence_path, rgb_filenames[idx]))) return 1 t1 = time.time() slam.process_image_mono(image, tframe) t2 = time.time() ttrack = t2 - t1 times_track[idx] = ttrack t = 0 if idx < num_images - 1: t = timestamps[idx + 1] - tframe elif idx > 0: t = tframe - timestamps[idx - 1] if ttrack < t: time.sleep(t - ttrack) print(frame_count, slam.get_tracking_state()) if slam.get_tracking_state() == orbslam2.TrackingState.OK: break frame_count += 1 iterations += 1 save_trajectory(slam.get_trajectory_points(), 'trajectory.txt') save_trajectory(slam.get_keyframe_points(), 'keyframe.txt') # actual keyframes points = slam.get_map_points() print(points) slam.shutdown() times_track = sorted(times_track) total_time = sum(times_track) print('-----') print('median tracking time: {0}'.format(times_track[num_images // 2])) print('mean tracking time: {0}'.format(total_time / num_images)) return 0