def merge_kaptures(kapture_path_list: List[str], merged_path: str, keep_sensor_ids: bool, images_import_strategy: TransferAction = TransferAction.skip, skip: List[str] = [], force: bool = False) -> None: """ Merge a list of kapture dataset to a new one. :param kapture_path_list: list of path to the top directory of the kapture datasets to merge :param merged_path: path to the merged top directory kapture to create :param keep_sensor_ids: if True, will keep the original sensor identifiers. Otherwise, might rename them. :param skip: list of kapture data type names to optionally skip (trajectories, records_camera, descriptors, ...) :param force: If True, silently overwrite kapture files if already exists. """ os.makedirs(merged_path, exist_ok=True) delete_existing_kapture_files(merged_path, force_erase=force) skip_list = [] if 'trajectories' in skip: skip_list.append(kapture.Trajectories) if 'records_camera' in skip: skip_list.append(kapture.RecordsCamera) if 'records_lidar' in skip: skip_list.append(kapture.RecordsLidar) if 'records_wifi' in skip: skip_list.append(kapture.RecordsWifi) if 'records_gnss' in skip: skip_list.append(kapture.RecordsGnss) if 'keypoints' in skip: skip_list.append(kapture.Keypoints) if 'descriptors' in skip: skip_list.append(kapture.Descriptors) if 'global_features' in skip: skip_list.append(kapture.GlobalFeatures) if 'matches' in skip: skip_list.append(kapture.Matches) if 'points3d' in skip: skip_list.append(kapture.Points3d) if 'observations' in skip: skip_list.append(kapture.Observations) kapture_data_list = [] for kapture_path in kapture_path_list: logger.info(f'Loading {kapture_path}') kapture_data = kapture_from_dir(kapture_path) kapture_data_list.append(kapture_data) if keep_sensor_ids: merged_kapture = merge_keep_ids(kapture_data_list, skip_list, kapture_path_list, merged_path, images_import_strategy) else: merged_kapture = merge_remap(kapture_data_list, skip_list, kapture_path_list, merged_path, images_import_strategy) logger.info('Writing merged kapture data...') kapture_to_dir(merged_path, merged_kapture)
def import_image_folder( images_path: str, kapture_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Imports the images of a folder to a kapture. This creates only images and cameras. :param images_path: path to directory containing the images. :param kapture_path: path to kapture root directory. :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ os.makedirs(kapture_path, exist_ok=True) delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing) cameras = kapture.Sensors() images = kapture.RecordsCamera() file_list = [ os.path.relpath(os.path.join(dirpath, filename), images_path) for dirpath, dirs, filenames in os.walk(images_path) for filename in filenames ] file_list = sorted(file_list) logger.info('starting conversion...') for n, filename in enumerate(file_list): # test if file is a valid image try: # lazy load with Image.open(path.join(images_path, filename)) as im: width, height = im.size model_params = [width, height] except (OSError, PIL.UnidentifiedImageError): # It is not a valid image: skip it logger.info(f'Skipping invalid image file {filename}') continue camera_id = f'sensor{n}' images[(n, camera_id)] = path_secure(filename) # don't forget windows cameras[camera_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA, model_params) # import (copy) image files. logger.info('import image files ...') filename_list = [f for _, _, f in kapture.flatten(images)] import_record_data_from_dir_auto(images_path, kapture_path, filename_list, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images) logger.info('writing imported data...') kapture_to_dir(kapture_path, imported_kapture)
def _import_colmap_overcast_reference(robotcar_path, kapture_path, force_overwrite_existing): # Convert Colmap reference DB to kapture kapture_train_dir = path.join(kapture_path, "mapping") colmap_db_path = path.join(robotcar_path, "3D-models/overcast-reference.db") if path.exists(colmap_db_path): delete_existing_kapture_files(kapture_train_dir, force_overwrite_existing) kapture_train_data = import_colmap( kapture_dir_path=kapture_train_dir, colmap_database_filepath=colmap_db_path, colmap_reconstruction_dir_path='', colmap_images_dir_path=path.join(robotcar_path, "images"), no_geometric_filtering=True, force_overwrite_existing=force_overwrite_existing, images_import_strategy=TransferAction.skip) logger.info(f'saving feature DB to kapture {kapture_train_dir} ...') kapture_to_dir(kapture_train_dir, kapture_train_data) else: logger.warning( f'Colmap feature DB {colmap_db_path} does not exist... skipping.')
def import_robotcar_seasons( robotcar_path: str, # noqa: C901: function a bit long but not too complex kapture_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip, import_feature_db: bool = False, skip_reconstruction: bool = False, rig_collapse: bool = False, use_colmap_intrinsics: bool = False, import_v1: bool = False) -> None: """ Read the RobotCar Seasons data, creates several kaptures with training and query data. :param robotcar_path: path to the robotcar top directory :param kapture_path: path to the kapture top directory :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. :param import_feature_db: if True, will import the features from the database :param skip_reconstruction: if True, will skip the reconstruction part from the training data :param rig_collapse: if True, will collapse the rig :param use_colmap_intrinsics: if True, will use the colmap intrinsics :param import_v1: if True, will use the version 1 of the format """ os.makedirs(kapture_path, exist_ok=True) cameras = import_robotcar_cameras(path.join(robotcar_path, 'intrinsics')) rigs = import_robotcar_rig(path.join(robotcar_path, 'extrinsics')) logger.info("Importing test data") # Test data image_pattern = re.compile( r'(?P<condition>.+)/(?P<camera>\w+)/(?P<timestamp>\d+)\.jpg') queries_path = path.join(robotcar_path, '3D-models', 'individual', 'queries_per_location') kapture_imported_query = {} for root, dirs, files in os.walk(queries_path): for query_file in files: records_camera = kapture.RecordsCamera() # Get list of query images with open(path.join(queries_path, query_file)) as f: for line in f: matches = image_pattern.match(line) image_path = line.strip() if not matches: logger.warning(f"Error matching line in {image_path}") continue matches = matches.groupdict() timestamp = int(matches['timestamp']) camera = str(matches['camera']) # condition = str(matches['condition']) : not used ? records_camera[timestamp, camera] = image_path (query_name, _) = query_file.split('.') kapture_test = kapture.Kapture(sensors=cameras, rigs=rigs, records_camera=records_camera) kapture_imported_query[int( query_name.split('_')[-1])] = kapture_test # Training data logger.info("Importing training data") colmap_reconstructions_path = path.join(robotcar_path, '3D-models', 'individual', 'colmap_reconstructions') kapture_imported_training = {} for root, dirs, files in os.walk(colmap_reconstructions_path): for colmap_reconstruction in dirs: (loc_id, _) = colmap_reconstruction.split('_') kapture_reconstruction_dir = path.join(kapture_path, f"{int(loc_id):02d}", "mapping") delete_existing_kapture_files(kapture_reconstruction_dir, force_overwrite_existing) logger.info(f'Converting reconstruction {loc_id} to kapture ...') kapture_reconstruction_data = import_robotcar_colmap_location( robotcar_path, path.join(colmap_reconstructions_path, colmap_reconstruction), kapture_reconstruction_dir, rigs, skip_reconstruction) # replace intrinsics with the ones found in the text files if not use_colmap_intrinsics: kapture_reconstruction_data.sensors = cameras kapture_imported_training[int( loc_id)] = kapture_reconstruction_data if not import_v1: _import_robotcar_v2_train(robotcar_path, kapture_imported_query, kapture_imported_training, image_pattern) # apply rig collapse if rig_collapse: logger.info('replacing camera poses with rig poses.') for kapture_mapping in kapture_imported_training.values(): kapture.rigs_recover_inplace(kapture_mapping.trajectories, rigs, ['rear']) # IO operations robotcar_image_path = path.join(robotcar_path, "images") for loc_id, kapture_query in kapture_imported_query.items(): loc_id_str = f"{loc_id:02d}" logger.info(f'writing test data: {loc_id_str}') kapture_test_dir = path.join(kapture_path, loc_id_str, "query") delete_existing_kapture_files(kapture_test_dir, force_overwrite_existing) if not kapture_query.records_camera: # all images were removed continue kapture_to_dir(kapture_test_dir, kapture_query) query_images = [ f for _, _, f in kapture.flatten(kapture_query.records_camera) ] import_record_data_from_dir_auto(robotcar_image_path, kapture_test_dir, query_images, images_import_method) for loc_id, kapture_mapping in kapture_imported_training.items(): loc_id_str = f"{loc_id:02d}" logger.info(f'writing mapping data: {loc_id_str}') kapture_reconstruction_dir = path.join(kapture_path, f"{loc_id:02d}", "mapping") kapture_to_dir(kapture_reconstruction_dir, kapture_mapping) mapping_images = [ f for _, _, f in kapture.flatten(kapture_mapping.records_camera) ] import_record_data_from_dir_auto(robotcar_image_path, kapture_reconstruction_dir, mapping_images, images_import_method) if import_feature_db: _import_colmap_overcast_reference(robotcar_path, kapture_path, force_overwrite_existing)
def pycolmap_localize_from_loaded_data( kapture_data: kapture.Kapture, kapture_path: str, tar_handlers: TarCollection, kapture_query_data: kapture.Kapture, output_path: str, pairsfile_path: str, max_error: float, min_inlier_ratio: float, min_num_iterations: int, max_num_iterations: int, confidence: float, keypoints_type: Optional[str], duplicate_strategy: DuplicateCorrespondencesStrategy, rerank_strategy: RerankCorrespondencesStrategy, write_detailed_report: bool, force: bool) -> None: """ Localize images using pycolmap. :param kapture_data: loaded kapture data (incl. points3d) :param kapture_path: path to the kapture to use :param tar_handlers: collection of pre-opened tar archives :param kapture_data: loaded kapture data (mapping and query images) :param output_path: path to the write the localization results :param pairsfile_path: pairs to use :param max_error: RANSAC inlier threshold in pixel :param min_inlier_ratio: abs_pose_options.ransac_options.min_inlier_ratio :param min_num_iterations: abs_pose_options.ransac_options.min_num_trials :param max_num_iterations: abs_pose_options.ransac_options.max_num_trials :param confidence: abs_pose_options.ransac_options.confidence :param keypoints_type: types of keypoints (and observations) to use :param duplicate_strategy: strategy to handle duplicate correspondences (either kpt_id and/or pt3d_id) :param rerank_strategy: strategy to reorder pairs before handling duplicate correspondences :param write_detailed_report: if True, write a json file with inliers, reprojection error for each query :param force: Silently overwrite kapture files if already exists """ assert has_pycolmap if not (kapture_data.records_camera and kapture_data.sensors and kapture_data.keypoints and kapture_data.matches and kapture_data.points3d and kapture_data.observations): raise ValueError('records_camera, sensors, keypoints, matches, ' 'points3d, observations are mandatory for map+query') if not (kapture_query_data.records_camera and kapture_query_data.sensors): raise ValueError('records_camera, sensors are mandatory for query') if keypoints_type is None: keypoints_type = try_get_only_key_from_collection( kapture_data.keypoints) assert keypoints_type is not None assert keypoints_type in kapture_data.keypoints assert keypoints_type in kapture_data.matches if kapture_data.rigs is not None and kapture_data.trajectories is not None: # make sure, rigs are not used in trajectories. logger.info('remove rigs notation.') rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs) kapture_data.rigs.clear() if kapture_query_data.trajectories is not None: logger.warning( "Input query data contains trajectories: they will be ignored") kapture_query_data.trajectories.clear() os.umask(0o002) os.makedirs(output_path, exist_ok=True) delete_existing_kapture_files(output_path, force_erase=force) # load pairsfile pairs = {} with open(pairsfile_path, 'r') as fid: table = kapture.io.csv.table_from_file(fid) for img_query, img_map, _ in table: if img_query not in pairs: pairs[img_query] = [] pairs[img_query].append(img_map) kapture_data.matches[keypoints_type].normalize() keypoints_filepaths = keypoints_to_filepaths( kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path, tar_handlers) obs_for_keypoints_type = { point_id: per_keypoints_type_subdict[keypoints_type] for point_id, per_keypoints_type_subdict in kapture_data.observations.items() if keypoints_type in per_keypoints_type_subdict } point_id_from_obs = { (img_name, kp_id): point_id for point_id in obs_for_keypoints_type.keys() for img_name, kp_id in obs_for_keypoints_type[point_id] } query_images = [(timestamp, sensor_id, image_name) for timestamp, sensor_id, image_name in kapture.flatten( kapture_query_data.records_camera)] # kapture for localized images + pose trajectories = kapture.Trajectories() for timestamp, sensor_id, image_name in tqdm( query_images, disable=logging.getLogger().level >= logging.CRITICAL): if image_name not in pairs: continue # N number of correspondences # points2D - Nx2 array with pixel coordinates # points3D - Nx3 array with world coordinates points2D = [] points3D = [] keypoints_filepath = keypoints_filepaths[image_name] kapture_keypoints_query = image_keypoints_from_file( filepath=keypoints_filepath, dsize=kapture_data.keypoints[keypoints_type].dsize, dtype=kapture_data.keypoints[keypoints_type].dtype) query_cam = kapture_query_data.sensors[sensor_id] assert isinstance(query_cam, kapture.Camera) col_cam_id, width, height, params, _ = get_colmap_camera(query_cam) cfg = { 'model': CAMERA_MODEL_NAME_ID[col_cam_id][0], 'width': int(width), 'height': int(height), 'params': params } points2D, _, points3D, stats = get_correspondences( kapture_data, keypoints_type, kapture_path, tar_handlers, image_name, pairs[image_name], point_id_from_obs, kapture_keypoints_query, None, duplicate_strategy, rerank_strategy) # compute absolute pose # inlier_threshold - RANSAC inlier threshold in pixels # answer - dictionary containing the RANSAC output ret = pycolmap.absolute_pose_estimation(points2D, points3D, cfg, max_error, min_inlier_ratio, min_num_iterations, max_num_iterations, confidence) # add pose to output kapture if ret['success'] and ret['num_inliers'] > 0: pose = kapture.PoseTransform(ret['qvec'], ret['tvec']) if write_detailed_report: num_2dpoints = len(points2D) points2D_final, K, distortion = get_camera_matrix_from_kapture( np.array(points2D, dtype=np.float), query_cam) points2D_final = list(points2D_final.reshape( (num_2dpoints, 2))) inliers = np.where(ret['inliers'])[0].tolist() reprojection_error = compute_reprojection_error( pose, ret['num_inliers'], inliers, points2D_final, points3D, K, distortion) cache = { "num_correspondences": len(points3D), "num_inliers": inliers, "inliers": ret['inliers'], "reprojection_error": reprojection_error, "stats": stats } cache_path = os.path.join(output_path, f'pycolmap_cache/{image_name}.json') save_to_json(cache, cache_path) trajectories[timestamp, sensor_id] = pose kapture_data_localized = kapture.Kapture( sensors=kapture_query_data.sensors, trajectories=trajectories, records_camera=kapture_query_data.records_camera, rigs=kapture_query_data.rigs) kapture.io.csv.kapture_to_dir(output_path, kapture_data_localized)
def import_extended_cmu_seasons( cmu_path: str, top_kaptures_path: str, slice_range: List[int], import_all_files: bool = False, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Import extended CMU data to kapture. Will make training and query kaptures for every CMU slice. :param cmu_path: path to the top directory of the CMU dataset files :param top_kaptures_path: top directory for the kaptures to create :param slice_range: range of CMU slices to import :param import_all_files: if Tre, will import all files :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ os.makedirs(top_kaptures_path, exist_ok=True) cameras = import_extended_cmu_seasons_intrinsics( path.join(cmu_path, 'intrinsics.txt')) for slice_n in slice_range: # prepare paths slice_path = os.path.join(cmu_path, f'slice{slice_n}') training_images_path = os.path.join(slice_path, 'database') query_images_path = os.path.join(slice_path, 'query') gt_trajectories_path = os.path.join( slice_path, f'ground-truth-database-images-slice{slice_n}.txt') query_image_list = os.path.join(slice_path, f'test-images-slice{slice_n}.txt') query_gt_path = os.path.join(slice_path, 'camera-poses') query_gt_list = [ os.path.join(query_gt_path, f) for f in os.listdir(query_gt_path) ] # Import training images kapture_training_path = path.join(top_kaptures_path, f'slice{slice_n}', "mapping") delete_existing_kapture_files(kapture_training_path, force_overwrite_existing) training_records_camera, training_trajectories = import_extended_cmu_seasons_images( gt_trajectories_path) training_kapture = kapture.Kapture( sensors=cameras, records_camera=training_records_camera, trajectories=training_trajectories) if import_all_files: _add_images_from_folder(training_images_path, training_kapture) kapture_to_dir(kapture_training_path, training_kapture) # finally import images if images_import_method != TransferAction.skip: filename_list = [ f for _, _, f in kapture.flatten(training_kapture.records_camera) ] logger.info(f'importing {len(filename_list)} image files ...') import_record_data_from_dir_auto(training_images_path, kapture_training_path, filename_list, images_import_method) # Import query images kapture_query_path = path.join(top_kaptures_path, f'slice{slice_n}', "query") delete_existing_kapture_files(kapture_query_path, force_erase=force_overwrite_existing) query_records_camera, query_trajectories = import_extended_cmu_seasons_images( query_image_list) query_kapture = kapture.Kapture(sensors=cameras, records_camera=query_records_camera, trajectories=query_trajectories) # import query gt when possible query_gt_kapture = [] for query_gt_path in query_gt_list: query_gt_records_camera, query_gt_trajectories = import_extended_cmu_seasons_images( query_gt_path) query_gt_kapture.append( kapture.Kapture(sensors=cameras, records_camera=query_gt_records_camera, trajectories=query_gt_trajectories)) data_to_merge = [query_kapture] + query_gt_kapture query_kapture = merge_keep_ids( data_to_merge, skip_list=[], data_paths=["" for _ in range(len(data_to_merge))], kapture_path="", images_import_method=TransferAction.skip) if import_all_files: _add_images_from_folder(query_images_path, query_kapture) kapture_to_dir(kapture_query_path, query_kapture) # finally import images if images_import_method != TransferAction.skip: filename_list = [ f for _, _, f in kapture.flatten(query_kapture.records_camera) ] logger.info(f'importing {len(filename_list)} image files ...') import_record_data_from_dir_auto(query_images_path, kapture_query_path, filename_list, images_import_method)
def import_7scenes(d7scenes_path: str, kapture_dir_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip, partition: Optional[str] = None ) -> None: """ Imports RGB-D Dataset 7-Scenes dataset and save them as kapture. :param d7scenes_path: path to the 7scenes sequence root path :param kapture_dir_path: path to kapture top directory :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt to exists. """ os.makedirs(kapture_dir_path, exist_ok=True) delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing) logger.info('loading all content ...') d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)') # populate all relevant files d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path)) for dp, _, fs in os.walk(d7scenes_path) for fn in fs) logger.info('populating 7-scenes files ...') d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict() for filename in sorted(d7s_filenames) if d7s_filename_re.search(filename)} # reorg as shot[seq, id] = {color: , depth: , pose: , ...} shots = {} for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()): shot_id = (file_attribs.get('sequence'), file_attribs['frame_id']) shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename # fake timestamps for timestamp, shot_id in enumerate(shots): shots[shot_id]['timestamp'] = timestamp # if given, filter partition if partition is not None: # read the authors split file partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition]) if not path.isfile(partition_filepath): raise FileNotFoundError(f'partition file is missing: {partition_filepath}.') with open(partition_filepath, 'rt') as file: split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()] assert len(split_sequences) > 0 # filter out shots = {(seq, frame): shot for (seq, frame), shot in shots.items() if seq in split_sequences} if len(shots) == 0: raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.') # eg. shots['seq-01', '000000'] = # { # 'color': 'seq-01/frame-000000.color.jpg', # 'depth': 'seq-01/frame-000000.depth.png', # 'pose': 'seq-01/frame-000000.pose.txt', # 'timestamp': 0} # images + depth maps logger.info('populating image and depth maps files ...') snapshots = kapture.RecordsCamera() depth_maps = kapture.RecordsDepth() for shot in shots.values(): snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color'] kapture_depth_map_filename = shot['depth'][:-len('.png')] # kapture depth files are not png depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename # poses logger.info('import poses files ...') trajectories = kapture.Trajectories() for shot in shots.values(): pose_filepath = path.join(d7scenes_path, shot['pose']) pose_mat = np.loadtxt(pose_filepath) # camera-to-world, 4×4 matrix in homogeneous coordinates rotation_mat = pose_mat[0:3, 0:3] position_vec = pose_mat[0:3, 3] rotation_quat = quaternion.from_rotation_matrix(rotation_mat) pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec) pose_cam_from_world = pose_world_from_cam.inverse() trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world # sensors """ From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585). """ sensors = kapture.Sensors() camera_type = kapture.CameraType.SIMPLE_PINHOLE camera_params = [640, 480, 585, 320, 240] # w, h, f, cx, cy sensors[RGB_SENSOR_ID] = kapture.Camera( name=RGB_SENSOR_ID, camera_type=camera_type, camera_params=camera_params ) sensors[DEPTH_SENSOR_ID] = kapture.Camera( name=DEPTH_SENSOR_ID, camera_type=camera_type, camera_params=camera_params, sensor_type='depth' ) # bind camera and depth sensor into a rig logger.info('building rig with camera and depth sensor ...') rigs = kapture.Rigs() rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform() rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform() # import (copy) image files. logger.info('copying image files ...') image_filenames = [f for _, _, f in kapture.flatten(snapshots)] import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method) # import (copy) depth map files. logger.info('converting depth files ...') depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path) hide_progress = logger.getEffectiveLevel() > logging.INFO for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress): depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png') depth_map = np.array(Image.open(depth_map_filepath_7scenes)) # change invalid depth from 65535 to 0 depth_map[depth_map == 65535] = 0 # depth maps is in mm in 7scenes, convert it to meters depth_map = depth_map.astype(np.float32) * 1.0e-3 kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map) # pack into kapture format imported_kapture = kapture.Kapture( records_camera=snapshots, records_depth=depth_maps, rigs=rigs, trajectories=trajectories, sensors=sensors) logger.info('writing imported data ...') kapture_to_dir(kapture_dir_path, imported_kapture)
def import_virtual_gallery(input_root_path: str, configuration: str, light_range: List[int], loop_range: List[int], camera_range: List[int], occlusion_range: List[int], as_rig: bool, images_import_method: TransferAction, kapture_path: str, force_overwrite_existing: bool = False) -> None: """ Creates a kapture with a virtual gallery. :param input_root_path: root path of virtual gallery :param configuration: training, testing or all (both) :param light_range: list of lights to include :param loop_range: list of training loops to include :param camera_range: list of training cameras to include :param occlusion_range: list of testing occlusion levels to include :param as_rig: in training trajectories, writes the position of the rig instead of individual cameras :param kapture_path: path to kapture top directory :param force_overwrite_existing: Silently overwrite kapture files if already exists. """ # Check for existing files os.makedirs(kapture_path, exist_ok=True) delete_existing_kapture_files(kapture_path, force_overwrite_existing) offset = 0 cameras = kapture.Sensors() images = kapture.RecordsCamera() trajectories = kapture.Trajectories() rigs = kapture.Rigs() # Process all training data if configuration == "training" or configuration == "all": logger.info("Reading training files") camera_range_set = set(camera_range) training_intrinsics = import_training_intrinsics(input_root_path, light_range, loop_range, camera_range_set) training_extrinsics = import_training_extrinsics(input_root_path, light_range, loop_range, camera_range_set) convert_training_intrinsics(training_intrinsics, cameras) convert_training_extrinsics(offset, training_extrinsics, images, trajectories, as_rig) rigs.update(training_rig_config) offset += len(training_extrinsics) # Process all testing data if configuration == "testing" or configuration == "all": logger.info("Reading testing files") testing_intrinsics = import_testing_intrinsics(input_root_path, light_range, occlusion_range) testing_extrinsics = import_testing_extrinsics(input_root_path, light_range, occlusion_range) convert_testing_intrinsics(testing_intrinsics, cameras) convert_testing_extrinsics(offset, testing_extrinsics, images, trajectories) offset += len(testing_extrinsics) logger.info("Writing imported data to disk") kapture_data = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories, rigs=rigs or None) # import images image_list = [name for _, _, name in kapture.flatten(kapture_data.records_camera)] import_record_data_from_dir_auto(input_root_path, kapture_path, image_list, images_import_method) kapture_to_dir(kapture_path, kapture_data)
def import_idl_dataset_cvpr17(idl_dataset_path: str, gt_path: Union[str, None], kapture_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Reads the IDL dataset and copy it to a kapture. :param idl_dataset_path: path to the IDL dataset :param gt_path: ground truth data path :param kapture_path: path to the kapture top directory to create :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ os.makedirs(kapture_path, exist_ok=True) delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing) cameras = kapture.Sensors() images = kapture.RecordsCamera() trajectories = kapture.Trajectories() file_list = [os.path.relpath(os.path.join(dirpath, filename), idl_dataset_path) for dirpath, dirs, filenames in os.walk(idl_dataset_path) for filename in filenames] file_list = sorted(file_list) logger.info('starting conversion...') for n, filename in enumerate(file_list): # test if file is a valid image try: # lazy load with Image.open(path.join(idl_dataset_path, filename)) as im: width, height = im.size model_params = [width, height] except Exception: continue camera_id = f'sensor{n}' images[(n, camera_id)] = path_secure(filename) # don't forget windows model = kapture.CameraType.UNKNOWN_CAMERA if gt_path is not None: # replace image extension with .camera file_gt_path = os.path.splitext(os.path.join(gt_path, filename))[0] + ".camera" if os.path.isfile(file_gt_path): with open(file_gt_path) as fin: lines = fin.readlines() lines = (line.rstrip().split() for line in lines) # split fields lines = list(lines) fx = float(lines[0][0]) cx = float(lines[0][2]) fy = float(lines[1][1]) cy = float(lines[1][2]) width_file = float(lines[8][0]) height_file = float(lines[8][1]) assert (width_file == width) assert (height_file == height) model = kapture.CameraType.PINHOLE model_params = [width, height, fx, fy, cx, cy] rotation_matrix = [[float(v) for v in line] for line in lines[4:7]] rotation = quaternion.from_rotation_matrix(rotation_matrix) center_of_projection = [float(v) for v in lines[7]] pose = kapture.PoseTransform(rotation, center_of_projection).inverse() trajectories[(n, camera_id)] = pose cameras[camera_id] = kapture.Camera(model, model_params) # if no trajectory were added, no need to create the file if not trajectories: trajectories = None # import (copy) image files. logger.info('import image files ...') filename_list = [f for _, _, f in kapture.flatten(images)] import_record_data_from_dir_auto(idl_dataset_path, kapture_path, filename_list, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories) logger.info('writing imported data...') kapture_to_dir(kapture_path, imported_kapture)
def extract_kapture_keypoints(kapture_root, config, output_dir='', overwrite=False): """ Extract r2d2 keypoints and descritors to the kapture format directly """ print('extract_kapture_keypoints...') kdata = kapture_from_dir(kapture_root, matches_pairsfile_path=None, skip_list= [kapture.GlobalFeatures, kapture.Matches, kapture.Points3d, kapture.Observations]) export_dir = output_dir if output_dir else kapture_root # root of output directory for features os.makedirs(export_dir, exist_ok=True) assert kdata.records_camera is not None image_list = [filename for _, _, filename in kapture.flatten(kdata.records_camera)] # resume extraction if some features exist try: # load existing features, if any kdata.keypoints = keypoints_from_dir(export_dir, None) kdata.descriptors = descriptors_from_dir(export_dir, None) if kdata.keypoints is not None and kdata.descriptors is not None and not overwrite: image_list = [name for name in image_list if name not in kdata.keypoints or name not in kdata.descriptors] except FileNotFoundError: pass except: logging.exception("Error with importing existing local features.") # clear features first if overwriting if overwrite: delete_existing_kapture_files(export_dir, True, only=[kapture.Descriptors, kapture.Keypoints]) if len(image_list) == 0: print('All features were already extracted') return else: print(f'Extracting r2d2 features for {len(image_list)} images') iscuda = common.torch_set_gpu([torch.cuda.is_available()]) # load the network... net = load_network(config['checkpoint']) if iscuda: net = net.cuda() # create the non-maxima detector detector = NonMaxSuppression( rel_thr = config['reliability_thr'], rep_thr = config['repeatability_thr']) keypoints_dtype = None if kdata.keypoints is None else kdata.keypoints.dtype descriptors_dtype = None if kdata.descriptors is None else kdata.descriptors.dtype keypoints_dsize = None if kdata.keypoints is None else kdata.keypoints.dsize descriptors_dsize = None if kdata.descriptors is None else kdata.descriptors.dsize for image_name in image_list: img_path = get_image_fullpath(kapture_root, image_name) if img_path.endswith('.txt'): images = open(img_path).read().splitlines() + images continue print(f"\nExtracting features for {img_path}") img = Image.open(img_path).convert('RGB') W, H = img.size img = norm_RGB(img)[None] if iscuda: img = img.cuda() # extract keypoints/descriptors for a single image xys, desc, scores = extract_multiscale(net, img, detector, scale_f = config['scale_f'], min_scale = config['min_scale'], max_scale = config['max_scale'], min_size = config['min_size'], max_size = config['max_size'], verbose = True) xys = xys.cpu().numpy() desc = desc.cpu().numpy() scores = scores.cpu().numpy() idxs = scores.argsort()[-config['top_k'] or None:] xys = xys[idxs] desc = desc[idxs] if keypoints_dtype is None or descriptors_dtype is None: keypoints_dtype = xys.dtype descriptors_dtype = desc.dtype keypoints_dsize = xys.shape[1] descriptors_dsize = desc.shape[1] kdata.keypoints = kapture.Keypoints('r2d2', keypoints_dtype, keypoints_dsize) kdata.descriptors = kapture.Descriptors('r2d2', descriptors_dtype, descriptors_dsize) keypoints_config_absolute_path = get_csv_fullpath(kapture.Keypoints, export_dir) descriptors_config_absolute_path = get_csv_fullpath(kapture.Descriptors, export_dir) keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints) descriptors_to_file(descriptors_config_absolute_path, kdata.descriptors) else: assert kdata.keypoints.type_name == 'r2d2' assert kdata.descriptors.type_name == 'r2d2' assert kdata.keypoints.dtype == xys.dtype assert kdata.descriptors.dtype == desc.dtype assert kdata.keypoints.dsize == xys.shape[1] assert kdata.descriptors.dsize == desc.shape[1] keypoints_fullpath = get_keypoints_fullpath(export_dir, image_name) print(f"Saving {xys.shape[0]} keypoints to {keypoints_fullpath}") image_keypoints_to_file(keypoints_fullpath, xys) kdata.keypoints.add(image_name) descriptors_fullpath = get_descriptors_fullpath(export_dir, image_name) print(f"Saving {desc.shape[0]} descriptors to {descriptors_fullpath}") image_descriptors_to_file(descriptors_fullpath, desc) kdata.descriptors.add(image_name) if not keypoints_check_dir(kdata.keypoints, export_dir) or \ not descriptors_check_dir(kdata.descriptors, export_dir): print('local feature extraction ended successfully but not all files were saved')
def import_image_list(images_list_filenames: List[str], images_dirpath: str, kapture_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Imports the list of images to a kapture. This creates only images and cameras. :param images_list_filenames: list of text files containing image file names :param images_dirpath: path to images directory. :param kapture_path: path to kapture root directory. :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ assert isinstance(images_list_filenames, list) os.makedirs(kapture_path, exist_ok=True) delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing) cameras = kapture.Sensors() images = kapture.RecordsCamera() offset = 0 logger.info('starting conversion...') for images_list_filename in images_list_filenames: logger.info(f'loading {images_list_filename}') with open(images_list_filename) as file: images_list = file.readlines() # remove end line char and empty lines images_list = [line.rstrip() for line in images_list if line != '\n'] for i in range(0, len(images_list)): line = images_list[i].split() image_file_name = line[0] if len(line) > 1: model = line[1] model_params = line[2:] else: model = kapture.CameraType.UNKNOWN_CAMERA.value try: # lazy open with Image.open(path.join(images_dirpath, image_file_name)) as im: width, height = im.size model_params = [width, height] except (OSError, PIL.UnidentifiedImageError): # It is not a valid image: skip it logger.info(f'Skipping invalid image file {image_file_name}') continue camera_id = f'sensor{i + offset}' cameras[camera_id] = kapture.Camera(model, model_params) images[(i + offset, camera_id)] = image_file_name offset += len(images_list) # import (copy) image files. logger.info('import image files ...') filename_list = [f for _, _, f in kapture.flatten(images)] import_record_data_from_dir_auto(images_dirpath, kapture_path, filename_list, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images) logger.info('writing imported data...') kapture_to_dir(kapture_path, imported_kapture)
def import_opensfm( opensfm_root_dir: str, kapture_root_dir: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.copy) -> None: """ Convert an openSfM structure to a kapture on disk. Also copy, move or link the images files if necessary. :param opensfm_root_dir: the openSfM top directory :param kapture_root_dir: top directory of kapture created :param force_overwrite_existing: if true, will remove existing kapture data without prompting the user :param images_import_method: action to apply on images: link, copy, move or do nothing. :return: the constructed kapture object """ disable_tqdm = logger.getEffectiveLevel() != logging.INFO # load reconstruction opensfm_reconstruction_filepath = path.join(opensfm_root_dir, 'reconstruction.json') with open(opensfm_reconstruction_filepath, 'rt') as f: opensfm_reconstruction = json.load(f) # remove the single list @ root opensfm_reconstruction = opensfm_reconstruction[0] # prepare space for output os.makedirs(kapture_root_dir, exist_ok=True) delete_existing_kapture_files(kapture_root_dir, force_erase=force_overwrite_existing) # import cameras kapture_sensors = kapture.Sensors() assert 'cameras' in opensfm_reconstruction # import cameras for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items( ): camera = import_camera(osfm_camera, name=osfm_camera_id) kapture_sensors[osfm_camera_id] = camera # import shots logger.info('importing images and trajectories ...') kapture_images = kapture.RecordsCamera() kapture_trajectories = kapture.Trajectories() opensfm_image_dir_path = path.join(opensfm_root_dir, 'images') assert 'shots' in opensfm_reconstruction image_timestamps, image_sensors = {}, { } # used later to retrieve the timestamp of an image. for timestamp, (image_filename, shot) in enumerate( opensfm_reconstruction['shots'].items()): sensor_id = shot['camera'] image_timestamps[image_filename] = timestamp image_sensors[image_filename] = sensor_id # in OpenSfm, (sensor, timestamp) is not unique. rotation_vector = shot['rotation'] q = quaternion.from_rotation_vector(rotation_vector) translation = shot['translation'] # capture_time = shot['capture_time'] # may be invalid # gps_position = shot['gps_position'] kapture_images[timestamp, sensor_id] = image_filename kapture_trajectories[timestamp, sensor_id] = kapture.PoseTransform(r=q, t=translation) # copy image files filename_list = [f for _, _, f in kapture.flatten(kapture_images)] import_record_data_from_dir_auto( source_record_dirpath=opensfm_image_dir_path, destination_kapture_dirpath=kapture_root_dir, filename_list=filename_list, copy_strategy=images_import_method) # Imports Gnss kapture_gnss = _import_gnss(opensfm_root_dir, kapture_sensors, image_sensors, image_timestamps, disable_tqdm) # Imports descriptors, keypoints and matches kapture_descriptors, kapture_keypoints, kapture_matches = _import_features_and_matches( opensfm_root_dir, kapture_root_dir, disable_tqdm) # import 3-D points if 'points' in opensfm_reconstruction: logger.info('importing points 3-D') opensfm_points = opensfm_reconstruction['points'] points_data = [] for point_id in sorted(opensfm_points): point_data = opensfm_points[point_id] point_data = point_data['coordinates'] + point_data['color'] points_data.append(point_data) kapture_points = kapture.Points3d(points_data) else: kapture_points = None # saving kapture csv files logger.info('saving kapture files') kapture_data = kapture.Kapture(sensors=kapture_sensors, records_camera=kapture_images, records_gnss=kapture_gnss, trajectories=kapture_trajectories, keypoints=kapture_keypoints, descriptors=kapture_descriptors, matches=kapture_matches, points3d=kapture_points) kapture_to_dir(kapture_root_dir, kapture_data)
def pose_approximation(mapping_path: str, query_path: str, output_path: str, global_features_type: Optional[str], topk: int, force_overwrite_existing: bool, method: PoseApproximationMethods, additional_parameters: dict): """ compute approximated pose from image retrieval results :param mapping_path: input path to kapture input root directory :type mapping_path: str :param query_path: input path to a kapture root directory :type query_path: str :param output_path: output path to pairsfile :type output_path: str :param global_features_type: type of global_features, name of the global_features subfolder :param topk: the max number of top retained images :type topk: int :param additional_parameters: store method specific args :type additional_parameters: dict """ assert mapping_path != query_path os.makedirs(output_path, exist_ok=True) delete_existing_kapture_files(output_path, force_erase=force_overwrite_existing) logger.info(f'pose_approximation. loading mapping: {mapping_path}') with get_all_tar_handlers(mapping_path) as mapping_tar_handlers: kdata_map = kapture_from_dir(mapping_path, None, skip_list=[kapture.Keypoints, kapture.Descriptors, kapture.Matches, kapture.Observations, kapture.Points3d], tar_handlers=mapping_tar_handlers) assert kdata_map.sensors is not None assert kdata_map.records_camera is not None assert kdata_map.global_features is not None if global_features_type is None: global_features_type = try_get_only_key_from_collection(kdata_map.global_features) assert global_features_type is not None assert global_features_type in kdata_map.global_features global_features_config = GlobalFeaturesConfig(kdata_map.global_features[global_features_type].type_name, kdata_map.global_features[global_features_type].dtype, kdata_map.global_features[global_features_type].dsize, kdata_map.global_features[global_features_type].metric_type) logger.info(f'computing pairs with {global_features_type}...') map_global_features_to_filepaths = global_features_to_filepaths( kdata_map.global_features[global_features_type], global_features_type, mapping_path, mapping_tar_handlers ) mapping_list = list(sorted(map_global_features_to_filepaths.items())) map_stacked_features = stack_global_features(global_features_config, mapping_list) logger.info(f'pose_approximation. loading query: {query_path}') with get_all_tar_handlers(query_path) as query_tar_handlers: kdata_query = kapture_from_dir(query_path, None, skip_list=[kapture.Keypoints, kapture.Descriptors, kapture.Matches, kapture.Observations, kapture.Points3d], tar_handlers=query_tar_handlers) assert kdata_query.sensors is not None assert kdata_query.records_camera is not None assert kdata_query.global_features is not None assert global_features_type in kdata_query.global_features kdata_mapping_gfeat = kdata_map.global_features[global_features_type] kdata_query_gfeat = kdata_query.global_features[global_features_type] assert kdata_mapping_gfeat.type_name == kdata_query_gfeat.type_name assert kdata_mapping_gfeat.dtype == kdata_query_gfeat.dtype assert kdata_mapping_gfeat.dsize == kdata_query_gfeat.dsize query_global_features_to_filepaths = global_features_to_filepaths( kdata_query_gfeat, global_features_type, query_path, query_tar_handlers ) query_list = list(sorted(query_global_features_to_filepaths.items())) query_stacked_features = stack_global_features(global_features_config, query_list) logger.info('computing pose approximation from with' f' {kdata_map.global_features[global_features_type].type_name}...') # main code weights = get_interpolation_weights(method, query_stacked_features, map_stacked_features, topk, additional_parameters) out_trajectories = get_interpolated_pose(kdata_map, kdata_query, weights) out_kapture = kapture.Kapture(sensors=kdata_query.sensors, records_camera=kdata_query.records_camera, trajectories=out_trajectories) kapture_to_dir(output_path, out_kapture) logger.info('all done')
def import_12scenes(d12scenes_path: str, kapture_dir_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip, partition: Optional[str] = None ) -> None: """ Imports RGB-D Dataset 12-Scenes dataset and save them as kapture. :param d12scenes_path: path to the 12scenes sequence root path :param kapture_dir_path: path to kapture top directory :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. :param partition: if specified = 'mapping' or 'query'. """ os.makedirs(kapture_dir_path, exist_ok=True) delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing) logger.info('loading all content ...') d7s_filename_re = re.compile(r'frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)') # populate all relevant files d12images_path = os.path.join(d12scenes_path, 'data') d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d12images_path)) for dp, _, fs in os.walk(d12images_path) for fn in fs) logger.info('populating 12-scenes files ...') d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict() for filename in sorted(d7s_filenames) if d7s_filename_re.search(filename)} # reorg as shot[seq, id] = {color: , depth: , pose: , ...} shots = {} for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()): shot_id = int(file_attribs['frame_id']) shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename # fake timestamps for timestamp, shot_id in enumerate(shots): shots[shot_id]['timestamp'] = timestamp # if given, filter partition if partition is not None: # read the authors split file partition_filepath = path.join(d12scenes_path, 'split.txt') if not path.isfile(partition_filepath): raise FileNotFoundError(f'partition file is missing: {partition_filepath}.') with open(partition_filepath, 'rt') as file: # note from dsac++; the first sequence is used for testing, everything else for training d7s_split_exp = r'^sequence(?P<sequence>\d+) \[frames=(?P<count>\d+)\] \[start=(?P<start_frame>\d+) ;' \ r' end=(?P<end_frame>\d+)\]$' d7s_split_re = re.compile(d7s_split_exp) split_sequences = [re.match(d7s_split_re, line) for line in file.readlines()] if len(split_sequences) < 1 or not split_sequences[0]: raise ValueError('failed to parse split.txt file') test_split = (int(split_sequences[0].group('start_frame')), int(split_sequences[0].group('end_frame'))) # filter out if partition == "query": shots = {frame: shot for frame, shot in shots.items() if test_split[0] <= frame <= test_split[1] } elif partition == "mapping": shots = {frame: shot for frame, shot in shots.items() if frame < test_split[0] or frame > test_split[1] } else: raise ValueError('invalid partition name') if len(shots) == 0: raise FileNotFoundError('no file found: make sure the path to 12scenes sequence is valid.') # eg. shots['000000'] = # { # 'color': 'seq-01/frame-000000.color.jpg', # 'depth': 'seq-01/frame-000000.depth.png', # 'pose': 'seq-01/frame-000000.pose.txt', # 'timestamp': 0} # images + depth maps logger.info('populating image and depth maps files ...') snapshots = kapture.RecordsCamera() depth_maps = kapture.RecordsDepth() for shot in shots.values(): snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color'] kapture_depth_map_filename = shot['depth'][:-len('.png')] # kapture depth files are not png depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg' # kapture depth files are not png depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename # poses logger.info('import poses files ...') trajectories = kapture.Trajectories() for shot in shots.values(): pose_filepath = path.join(d12images_path, shot['pose']) pose_mat = np.loadtxt(pose_filepath) # camera-to-world, 4×4 matrix in homogeneous coordinates with open(pose_filepath, 'r') as file: if 'INF' in file.read(): timestamp = shot['timestamp'] image_name = shot['color'] logger.debug(f'ts={timestamp}, name={image_name}: ignored inf pose') continue rotation_mat = pose_mat[0:3, 0:3] position_vec = pose_mat[0:3, 3] rotation_quat = quaternion.from_rotation_matrix(rotation_mat) pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec) pose_cam_from_world = pose_world_from_cam.inverse() trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world # sensors """ Read info.txt """ info_filepath = path.join(d12scenes_path, 'info.txt') if not path.isfile(info_filepath): raise FileNotFoundError(f'info file is missing: {info_filepath}.') with open(info_filepath, 'rt') as file: info_dict = {} for line in file.readlines(): line_splits = line.rstrip().split(' = ') info_dict[line_splits[0]] = line_splits[1] sensors = kapture.Sensors() camera_type = kapture.CameraType.PINHOLE assert 'm_calibrationColorIntrinsic' in info_dict assert 'm_colorWidth' in info_dict assert 'm_colorHeight' in info_dict rgb_intrinsics = [float(v) for v in info_dict['m_calibrationColorIntrinsic'].split(' ')] # w, h, fx, fy, cx, cy rgb_camera_params = [int(info_dict['m_colorWidth']), int(info_dict['m_colorHeight']), rgb_intrinsics[0], rgb_intrinsics[5], rgb_intrinsics[2], rgb_intrinsics[6]] sensors[RGB_SENSOR_ID] = kapture.Camera( name=RGB_SENSOR_ID, camera_type=camera_type, camera_params=rgb_camera_params ) assert 'm_calibrationDepthIntrinsic' in info_dict assert 'm_depthWidth' in info_dict assert 'm_depthHeight' in info_dict depth_intrinsics = [float(v) for v in info_dict['m_calibrationDepthIntrinsic'].split(' ')] # w, h, fx, fy, cx, cy depth_camera_params = [int(info_dict['m_depthWidth']), int(info_dict['m_depthHeight']), depth_intrinsics[0], depth_intrinsics[5], depth_intrinsics[2], depth_intrinsics[6]] sensors[DEPTH_SENSOR_ID] = kapture.Camera( name=DEPTH_SENSOR_ID, camera_type=camera_type, camera_params=depth_camera_params, sensor_type='depth' ) sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera( name=REG_DEPTH_SENSOR_ID, camera_type=camera_type, camera_params=rgb_camera_params, sensor_type='depth' ) # bind camera and depth sensor into a rig logger.info('building rig with camera and depth sensor ...') rigs = kapture.Rigs() rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform() rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform() rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform() # import (copy) image files. logger.info('copying image files ...') image_filenames = [f for _, _, f in kapture.flatten(snapshots)] import_record_data_from_dir_auto(d12images_path, kapture_dir_path, image_filenames, images_import_method) # import (copy) depth map files. logger.info('converting depth files ...') depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path) hide_progress = logger.getEffectiveLevel() > logging.INFO for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress): if '.reg' in depth_map_filename: continue depth_map_filepath_12scenes = path.join(d12images_path, depth_map_filename + '.png') depth_map = np.array(Image.open(depth_map_filepath_12scenes)) # depth maps is in mm in 12scenes, convert it to meters depth_map = depth_map.astype(np.float32) * 1.0e-3 kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map) # register depth to rgb reg_depth_map = register_depth(get_K(camera_type, depth_camera_params), get_K(camera_type, rgb_camera_params), np.eye(4), depth_map, rgb_camera_params[0], rgb_camera_params[1]) kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map) # pack into kapture format imported_kapture = kapture.Kapture( records_camera=snapshots, records_depth=depth_maps, rigs=rigs, trajectories=trajectories, sensors=sensors) logger.info('writing imported data ...') kapture_to_dir(kapture_dir_path, imported_kapture)
def pycolmap_rig_localize_from_loaded_data( kapture_data: kapture.Kapture, kapture_path: str, tar_handlers: TarCollection, kapture_query_data: kapture.Kapture, output_path: str, pairsfile_path: str, rig_ids: List[str], apply_rigs_remove: bool, max_error: float, min_inlier_ratio: float, min_num_iterations: int, max_num_iterations: int, confidence: float, keypoints_type: Optional[str], duplicate_strategy: DuplicateCorrespondencesStrategy, rerank_strategy: RerankCorrespondencesStrategy, write_detailed_report: bool, force: bool) -> None: """ Localize images from a multi camera rig using pycolmap :param kapture_data: loaded kapture data (incl. points3d) :param kapture_path: path to the kapture to use :param tar_handlers: collection of pre-opened tar archives :param kapture_data: loaded kapture data (mapping and query images) :param output_path: path to the write the localization results :param pairsfile_path: pairs to use :param rig_ids: list of rig ids that should be localized :param apply_rigs_remove: apply rigs remove before saving poses to disk :param max_error: RANSAC inlier threshold in pixel, shared between all cameras :param min_inlier_ratio: abs_pose_options.ransac_options.min_inlier_ratio :param min_num_iterations: abs_pose_options.ransac_options.min_num_trials :param max_num_iterations: abs_pose_options.ransac_options.max_num_trials :param confidence: abs_pose_options.ransac_options.confidence :param keypoints_type: types of keypoints (and observations) to use :param force: Silently overwrite kapture files if already exists. """ assert has_pycolmap if not (kapture_data.records_camera and kapture_data.sensors and kapture_data.keypoints and kapture_data.matches and kapture_data.points3d and kapture_data.observations): raise ValueError('records_camera, sensors, keypoints, matches, ' 'points3d, observations are mandatory for map+query') if not (kapture_query_data.records_camera and kapture_query_data.sensors): raise ValueError('records_camera, sensors are mandatory for query') if keypoints_type is None: keypoints_type = try_get_only_key_from_collection( kapture_data.keypoints) assert keypoints_type is not None assert keypoints_type in kapture_data.keypoints assert keypoints_type in kapture_data.matches assert kapture_query_data.rigs is not None assert len(kapture_query_data.rigs) >= 1 if len(rig_ids) == 0: rig_ids = get_top_level_rig_ids(kapture_query_data.rigs) final_camera_list = get_all_cameras_from_rig_ids( rig_ids, kapture_query_data.sensors, kapture_query_data.rigs) assert len(final_camera_list) > 0 if kapture_query_data.trajectories: logger.warning( "Input query data contains trajectories: they will be ignored") kapture_query_data.trajectories.clear() os.umask(0o002) os.makedirs(output_path, exist_ok=True) delete_existing_kapture_files(output_path, force_erase=force) # load pairsfile pairs = {} with open(pairsfile_path, 'r') as fid: table = kapture.io.csv.table_from_file(fid) for img_query, img_map, _ in table: if img_query not in pairs: pairs[img_query] = [] pairs[img_query].append(img_map) kapture_data.matches[keypoints_type].normalize() keypoints_filepaths = keypoints_to_filepaths( kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path, tar_handlers) obs_for_keypoints_type = { point_id: per_keypoints_type_subdict[keypoints_type] for point_id, per_keypoints_type_subdict in kapture_data.observations.items() if keypoints_type in per_keypoints_type_subdict } point_id_from_obs = { (img_name, kp_id): point_id for point_id in obs_for_keypoints_type.keys() for img_name, kp_id in obs_for_keypoints_type[point_id] } timestamps = list(kapture_query_data.records_camera.keys()) # kapture for localized images + pose trajectories = kapture.Trajectories() progress_bar = tqdm(total=len(timestamps), disable=logging.getLogger().level >= logging.CRITICAL) for timestamp in timestamps: for rig_id in final_camera_list.keys(): # with S number of sensors # N number of correspondences # points2D - SxNx2 array with pixel coordinates # points3D - SxNx3 array with world coordinates # tvec - Sx3 array with rig relative translations # qvec - Sx4 array with rig relative quaternions # cameras_dict - array of dict of length S points2D = [] points3D = [] tvec = [] qvec = [] cameras_dict = [] cameras = [] # Sx2 array for reproj error stats = [] for sensor_id, relative_pose in final_camera_list[rig_id].items(): if (timestamp, sensor_id) not in kapture_query_data.records_camera: continue img_query = kapture_query_data.records_camera[(timestamp, sensor_id)] if img_query not in pairs: continue keypoints_filepath = keypoints_filepaths[img_query] kapture_keypoints_query = image_keypoints_from_file( filepath=keypoints_filepath, dsize=kapture_data.keypoints[keypoints_type].dsize, dtype=kapture_data.keypoints[keypoints_type].dtype) tvec.append(relative_pose.t_raw) qvec.append(relative_pose.r_raw) col_cam_id, width, height, params, _ = get_colmap_camera( kapture_query_data.sensors[sensor_id]) cameras_dict.append({ 'model': CAMERA_MODEL_NAMES[col_cam_id], 'width': int(width), 'height': int(height), 'params': params }) points2D_it, _, points3D_it, stats_it = get_correspondences( kapture_data, keypoints_type, kapture_path, tar_handlers, img_query, pairs[img_query], point_id_from_obs, kapture_keypoints_query, None, duplicate_strategy, rerank_strategy) if write_detailed_report: cameras.append(kapture_query_data.sensors[sensor_id]) stats.append(stats_it) points2D.append(points2D_it) points3D.append(points3D_it) if len(cameras_dict) == 0: progress_bar and progress_bar.update(1) continue # compute absolute pose # inlier_threshold - RANSAC inlier threshold in pixels # answer - dictionary containing the RANSAC output ret = pycolmap.rig_absolute_pose_estimation( points2D, points3D, cameras_dict, qvec, tvec, max_error, min_inlier_ratio, min_num_iterations, max_num_iterations, confidence) # add pose to output kapture if ret['success'] and ret['num_inliers'] > 0: pose = kapture.PoseTransform(ret['qvec'], ret['tvec']) trajectories[timestamp, rig_id] = pose if write_detailed_report: points2D_final = [] camera_params = [] for points2D_it, query_cam in zip(points2D, cameras): num_2dpoints = len(points2D_it) points2D_final_it, K, distortion = get_camera_matrix_from_kapture( np.array(points2D_it, dtype=np.float), query_cam) points2D_final_it = list( points2D_final_it.reshape((num_2dpoints, 2))) points2D_final.append(points2D_final_it) camera_params.append((K, distortion)) num_correspondences = [ len(points2D_it) for points2D_it in points2D ] # convert ret['inliers'] indexes_flat = [ i for i, points2D_it in enumerate(points2D) for _ in points2D_it ] inliers = [[] for _ in range(len(points2D))] for i, (is_inlier, cam_index) in enumerate( zip(ret['inliers'], indexes_flat)): if is_inlier: inliers[cam_index].append(i) cumulative_len_correspondences = [] s = 0 for num_correspondences_it in num_correspondences: cumulative_len_correspondences.append(s) s += num_correspondences_it inliers = [[ v - cumulative_len_correspondences[i] for v in inliers[i] ] for i in range(len(inliers))] num_inliers = [len(inliers_it) for inliers_it in inliers] per_image_reprojection_error = [] for tvec_it, qvec_it, points2D_it, points3D_it, inliers_it, camera_params_it in zip( tvec, qvec, points2D_final, points3D, inliers, camera_params): if len(inliers_it) == 0: per_image_reprojection_error.append(np.nan) else: pose_relative_it = kapture.PoseTransform( r=qvec_it, t=tvec_it) # rig to sensor # pose = world to rig pose_it = kapture.PoseTransform.compose( [pose_relative_it, pose]) # world to sensor reprojection_error = compute_reprojection_error( pose_it, len(inliers_it), inliers_it, points2D_it, points3D_it, camera_params_it[0], camera_params_it[1]) per_image_reprojection_error.append( reprojection_error) cache = { "num_correspondences": num_correspondences, "num_inliers": num_inliers, "inliers": inliers, "reprojection_error": per_image_reprojection_error, "stats": stats } cache_path = os.path.join( output_path, f'pycolmap_rig_cache/{timestamp}.json') save_to_json(cache, cache_path) progress_bar and progress_bar.update(1) progress_bar and progress_bar.close() # save output kapture if apply_rigs_remove: rigs_remove_inplace(trajectories, kapture_query_data.rigs) kapture_query_data.trajectories = trajectories kapture.io.csv.kapture_to_dir(output_path, kapture_query_data)
def import_nvm(nvm_file_path: str, nvm_images_path: str, kapture_path: str, filter_list_path: Optional[str], ignore_trajectories: bool, add_reconstruction: bool, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Imports nvm data to kapture format. :param nvm_file_path: path to nvm file :param nvm_images_path: path to NVM images directory. :param kapture_path: path to kapture root directory. :param filter_list_path: path to the optional file containing a list of images to process :param ignore_trajectories: if True, will not create trajectories :param add_reconstruction: if True, will add observations, keypoints and 3D points. :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ # TODO implement [optional calibration] # doc : http://ccwu.me/vsfm/doc.html#nvm os.makedirs(kapture_path, exist_ok=True) delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing) logger.info('loading all content...') # if there is a filter list, parse it # keep it as Set[str] to easily find images if filter_list_path: with open(filter_list_path) as file: file_content = file.readlines() # remove end line char and empty lines filter_list = {line.rstrip() for line in file_content if line != '\n'} else: filter_list = None # now do the nvm with open(nvm_file_path) as file: nvm_content = file.readlines() # remove end line char and empty lines nvm_content = [line.rstrip() for line in nvm_content if line != '\n'] # only NVM_V3 is supported assert nvm_content[0] == "NVM_V3" # offset represents the line pointer offset = 1 # camera_id_offset keeps tracks of used camera_id in case of multiple reconstructed models camera_id_offset = 0 # point_id_offset keeps tracks of used point_id in case of multiple reconstructed models point_id_offset = 0 cameras = kapture.Sensors() images = kapture.RecordsCamera() trajectories = kapture.Trajectories() if not ignore_trajectories else None observations = kapture.Observations() if add_reconstruction else None if add_reconstruction else None keypoints = kapture.Keypoints('sift', np.float32, 2) if add_reconstruction else None points3d = [] if add_reconstruction else None # break if number of cameras == 0 or reached end of file while True: # <Model1> <Model2> ... # Each reconstructed <model> contains the following # <Number of cameras> <List of cameras> # <Number of 3D points> <List of points> # In practice, # <Number of cameras> # <List of cameras>, one per line # <Number of 3D points> # <List of points>, one per line number_of_cameras = int(nvm_content[offset]) offset += 1 if number_of_cameras == 0: # a line with <0> signify the end of models break logger.debug('importing model cameras...') # parse all cameras for current model image_idx_to_image_name = parse_cameras(number_of_cameras, nvm_content, offset, camera_id_offset, filter_list, nvm_images_path, cameras, images, trajectories) offset += number_of_cameras camera_id_offset += number_of_cameras # parse all points3d number_of_points = int(nvm_content[offset]) offset += 1 if points3d is not None and number_of_points > 0: assert keypoints is not None assert observations is not None logger.debug('importing model points...') parse_points3d(kapture_path, number_of_points, nvm_content, offset, point_id_offset, image_idx_to_image_name, filter_list, points3d, keypoints, observations) point_id_offset += number_of_points offset += number_of_points # reached end of file? if offset >= len(nvm_content): break # do not export values if none were found. if points3d is not None: points3d = kapture.Points3d(points3d) # import (copy) image files. logger.info('import image files ...') images_filenames = [f for _, _, f in kapture.flatten(images)] import_record_data_from_dir_auto(nvm_images_path, kapture_path, images_filenames, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories, points3d=points3d, keypoints=keypoints, observations=observations) logger.info('writing imported data...') kapture_to_dir(kapture_path, imported_kapture)
def pose_approximation_from_pairsfile(input_path: str, pairsfile_path: str, output_path: str, query_path: Optional[str], topk: Optional[int], method: str, additional_parameters: dict, force: bool): """ localize from pairsfile """ os.makedirs(output_path, exist_ok=True) delete_existing_kapture_files(output_path, force_erase=force) logger.info(f'pose_approximation. loading mapping: {input_path}') kdata = kapture_from_dir(input_path, None, skip_list=[ kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures, kapture.Matches, kapture.Points3d, kapture.Observations ]) if query_path is not None: logger.info(f'pose_approximation. loading query: {query_path}') kdata_query = kapture_from_dir(query_path, skip_list=[ kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures, kapture.Matches, kapture.Points3d, kapture.Observations ]) else: kdata_query = kdata logger.info(f'pose_approximation. loading pairs: {pairsfile_path}') similarity_dict = get_ordered_pairs_from_file(pairsfile_path, kdata_query.records_camera, kdata.records_camera, topk) query_images = set(similarity_dict.keys()) kdata_result = kapture.Kapture(sensors=kapture.Sensors(), records_camera=kapture.RecordsCamera(), trajectories=kapture.Trajectories()) for timestamp, cam_id, image_name in kapture.flatten( kdata_query.records_camera): if image_name not in query_images: continue if cam_id not in kdata_result.sensors: kdata_result.sensors[cam_id] = kdata_query.sensors[cam_id] kdata_result.records_camera[(timestamp, cam_id)] = image_name if kdata.rigs is None: map_trajectories = kdata.trajectories else: map_trajectories = kapture.rigs_remove(kdata.trajectories, kdata.rigs) training_trajectories_reversed = { image_name: map_trajectories[(timestamp, cam_id)] for timestamp, cam_id, image_name in kapture.flatten( kdata.records_camera) if (timestamp, cam_id) in map_trajectories } records_camera_reversed = { image_name: (timestamp, cam_id) for timestamp, cam_id, image_name in kapture.flatten( kdata_result.records_camera) } for image_name, similar_images in similarity_dict.items(): pose_inv_list = [ training_trajectories_reversed[k].inverse() for k, _ in similar_images ] timestamp = records_camera_reversed[image_name][0] cam_id = records_camera_reversed[image_name][1] if method == 'equal_weighted_barycenter': weight_list = [ 1.0 / len(pose_inv_list) for _ in range(len(pose_inv_list)) ] else: assert 'alpha' in additional_parameters alpha = additional_parameters['alpha'] weights = np.zeros((len(pose_inv_list), )) for i, (_, score) in enumerate(similar_images): weights[i] = score weights[:] = weights[:]**(alpha) weights[:] = weights[:] / np.sum(weights[:]) weight_list = weights.tolist() final_pose = average_pose_transform_weighted(pose_inv_list, weight_list).inverse() kdata_result.trajectories[(timestamp, cam_id)] = final_pose kapture_to_dir(output_path, kdata_result) logger.info('all done')
def import_robotcar_seasons( robotcar_path: str, kapture_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip, skip_reconstruction: bool = False, rig_collapse: bool = False, use_colmap_intrinsics: bool = False, import_v1: bool = False) -> None: """ Read the RobotCar Seasons data, creates several kaptures with training and query data. :param robotcar_path: path to the robotcar top directory :param kapture_path: path to the kapture top directory :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. :param skip_reconstruction: if True, will skip the reconstruction part from the training data :param rig_collapse: if True, will collapse the rig """ kapture_path = path.join(kapture_path, "base") os.makedirs(kapture_path, exist_ok=True) cameras = import_robotcar_cameras(path.join(robotcar_path, 'intrinsics')) rigs = import_robotcar_rig(path.join(robotcar_path, 'extrinsics')) logger.info("Importing test data") # Test data image_pattern = re.compile( r'(?P<condition>.+)/(?P<camera>\w+)/(?P<timestamp>\d+)\.jpg') queries_path = path.join(robotcar_path, '3D-models', 'individual', 'queries_per_location') kapture_imported_query = {} for root, dirs, files in os.walk(queries_path): for query_file in files: records_camera = kapture.RecordsCamera() # Get list of query images with open(path.join(queries_path, query_file)) as f: for line in f: matches = image_pattern.match(line) image_path = line.strip() if not matches: logger.warning(f"Error matching line in {image_path}") continue matches = matches.groupdict() timestamp = int(matches['timestamp']) camera = str(matches['camera']) condition = str(matches['condition']) records_camera[timestamp, camera] = image_path (query_name, _) = query_file.split('.') kapture_test = kapture.Kapture(sensors=cameras, rigs=rigs, records_camera=records_camera) kapture_imported_query[int( query_name.split('_')[-1])] = kapture_test # Reference map data logger.info("Importing reference map") colmap_reconstructions_path = path.join(robotcar_path, '3D-models', 'individual', 'colmap_reconstructions') kapture_imported_mapping = {} for root, dirs, files in os.walk(colmap_reconstructions_path): for colmap_reconstruction in dirs: (loc_id, _) = colmap_reconstruction.split('_') kapture_reconstruction_dir = path.join(kapture_path, f"{int(loc_id):02d}", "mapping") delete_existing_kapture_files(kapture_reconstruction_dir, force_erase=force_overwrite_existing) logger.info(f'Converting reconstruction {loc_id} to kapture ...') kapture_reconstruction_data = import_robotcar_colmap_location( robotcar_path, path.join(colmap_reconstructions_path, colmap_reconstruction), kapture_reconstruction_dir, rigs, skip_reconstruction) # replace intrinsics with the ones found in the text files if not use_colmap_intrinsics: kapture_reconstruction_data.sensors = cameras kapture_imported_mapping[int(loc_id)] = kapture_reconstruction_data if not import_v1: queries_per_location = { image_name: (ts, cam_id, loc_id) for loc_id, kdata_test in kapture_imported_query.items() for ts, cam_id, image_name in kapture.flatten(kdata_test.records_camera) } kapture_imported_training = {} # stores kapture for each submap # read robotcar_v2_train.txt v2_train_data = read_robotcar_v2_train(robotcar_path) for image_name, pose in v2_train_data.items(): ts, cam_id, loc_id = queries_per_location[image_name] assert cam_id == 'rear' # create kapture object for submap if it doesn't exist if loc_id not in kapture_imported_training: kapture_loc_id = kapture.Kapture(sensors=cameras, rigs=rigs) kapture_loc_id.records_camera = kapture.RecordsCamera() kapture_loc_id.trajectories = kapture.Trajectories() kapture_imported_training[loc_id] = kapture_loc_id kapture_imported_training[loc_id].records_camera[ ts, cam_id] = image_name kapture_imported_training[loc_id].trajectories[ts, cam_id] = pose matches = image_pattern.match(image_name) if not matches: logger.warning(f"Error matching line in {image_name}") continue matches = matches.groupdict() condition = str(matches['condition']) timestamp = str(matches['timestamp']) camera = str(matches['camera']) # added left and right images in records_camera left_image_name = condition + '/' + 'left' + '/' + timestamp + '.jpg' right_image_name = condition + '/' + 'right' + '/' + timestamp + '.jpg' kapture_imported_training[loc_id].records_camera[ ts, 'left'] = left_image_name kapture_imported_training[loc_id].records_camera[ ts, 'right'] = right_image_name # remove entries from query del kapture_imported_query[loc_id].records_camera[ts][cam_id] del kapture_imported_query[loc_id].records_camera[ts]['left'] del kapture_imported_query[loc_id].records_camera[ts]['right'] del kapture_imported_query[loc_id].records_camera[ts] # all remaining query images are kept; reading robotcar_v2_test.txt is not necessary # apply rig collapse if rig_collapse: logger.info('replacing camera poses with rig poses.') for kdata_mapping in kapture_imported_mapping.values(): kapture.rigs_recover_inplace(kdata_mapping.trajectories, rigs, 'rear') for kdata_training in kapture_imported_training.values(): kapture.rigs_recover_inplace(kdata_training.trajectories, rigs, 'rear') # IO operations robotcar_image_path = path.join(robotcar_path, "images") for loc_id, kdata_query in kapture_imported_query.items(): loc_id_str = f"{loc_id:02d}" logger.info(f'writing test data: {loc_id_str}') kapture_test_dir = path.join(kapture_path, loc_id_str, "query") delete_existing_kapture_files(kapture_test_dir, force_erase=force_overwrite_existing) if not kdata_query.records_camera: # all images were removed continue kapture_to_dir(kapture_test_dir, kdata_query) query_images = [ f for _, _, f in kapture.flatten(kdata_query.records_camera) ] import_record_data_from_dir_auto(robotcar_image_path, kapture_test_dir, query_images, images_import_method) for loc_id, kdata_mapping in kapture_imported_mapping.items(): loc_id_str = f"{loc_id:02d}" logger.info(f'writing mapping data: {loc_id_str}') kapture_reconstruction_dir = path.join(kapture_path, f"{loc_id:02d}", "mapping") delete_existing_kapture_files(kapture_reconstruction_dir, force_erase=force_overwrite_existing) kapture_to_dir(kapture_reconstruction_dir, kdata_mapping) mapping_images = [ f for _, _, f in kapture.flatten(kdata_mapping.records_camera) ] import_record_data_from_dir_auto(robotcar_image_path, kapture_reconstruction_dir, mapping_images, images_import_method) for loc_id, kdata_training in kapture_imported_training.items(): loc_id_str = f"{loc_id:02d}" logger.info(f'writing training data: {loc_id_str}') kapture_training_dir = path.join(kapture_path, f"{loc_id:02d}", "training") delete_existing_kapture_files(kapture_training_dir, force_erase=force_overwrite_existing) kapture_to_dir(kapture_training_dir, kdata_training) mapping_images = [ f for _, _, f in kapture.flatten(kdata_training.records_camera) ] import_record_data_from_dir_auto(robotcar_image_path, kapture_training_dir, mapping_images, images_import_method)
def pyransaclib_localize_from_loaded_data( kapture_data: kapture.Kapture, kapture_path: str, tar_handlers: TarCollection, kapture_query_data: kapture.Kapture, output_path: str, pairsfile_path: str, inlier_threshold: float, number_lo_steps: int, min_num_iterations: int, max_num_iterations: int, refine_poses: bool, keypoints_type: Optional[str], duplicate_strategy: DuplicateCorrespondencesStrategy, rerank_strategy: RerankCorrespondencesStrategy, write_detailed_report: bool, force: bool) -> None: """ Localize images using pyransaclib. :param kapture_data: loaded kapture data (incl. points3d) :param kapture_path: path to the kapture to use :param tar_handlers: collection of pre-opened tar archives :param kapture_data: loaded kapture data (mapping and query images) :param output_path: path to the write the localization results :param pairsfile_path: pairs to use :param inlier_threshold: RANSAC inlier threshold in pixel :param number_lo_steps: number of local optimization iterations in LO-MSAC. Use 0 to use MSAC :param min_num_iterations: minimum number of ransac loops :param max_num_iterations: maximum number of ransac loops :param refine_poses: refine poses with pycolmap :param keypoints_type: types of keypoints (and observations) to use :param force: Silently overwrite kapture files if already exists. """ assert has_pyransaclib if refine_poses: assert has_pycolmap if not (kapture_data.records_camera and kapture_data.sensors and kapture_data.keypoints and kapture_data.matches and kapture_data.points3d and kapture_data.observations): raise ValueError('records_camera, sensors, keypoints, matches, ' 'points3d, observations are mandatory for map+query') if not (kapture_query_data.records_camera and kapture_query_data.sensors): raise ValueError('records_camera, sensors are mandatory for query') if keypoints_type is None: keypoints_type = try_get_only_key_from_collection( kapture_data.keypoints) assert keypoints_type is not None assert keypoints_type in kapture_data.keypoints assert keypoints_type in kapture_data.matches if kapture_data.rigs is not None and kapture_data.trajectories is not None: # make sure, rigs are not used in trajectories. logger.info('remove rigs notation.') rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs) kapture_data.rigs.clear() if kapture_query_data.trajectories is not None: logger.warning( "Input query data contains trajectories: they will be ignored") kapture_query_data.trajectories.clear() os.umask(0o002) os.makedirs(output_path, exist_ok=True) delete_existing_kapture_files(output_path, force_erase=force) # load pairsfile pairs = {} with open(pairsfile_path, 'r') as fid: table = kapture.io.csv.table_from_file(fid) for img_query, img_map, _ in table: if img_query not in pairs: pairs[img_query] = [] pairs[img_query].append(img_map) kapture_data.matches[keypoints_type].normalize() keypoints_filepaths = keypoints_to_filepaths( kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path, tar_handlers) obs_for_keypoints_type = { point_id: per_keypoints_type_subdict[keypoints_type] for point_id, per_keypoints_type_subdict in kapture_data.observations.items() if keypoints_type in per_keypoints_type_subdict } point_id_from_obs = { (img_name, kp_id): point_id for point_id in obs_for_keypoints_type.keys() for img_name, kp_id in obs_for_keypoints_type[point_id] } query_images = [(timestamp, sensor_id, image_name) for timestamp, sensor_id, image_name in kapture.flatten( kapture_query_data.records_camera)] # kapture for localized images + pose trajectories = kapture.Trajectories() progress_bar = tqdm(total=len(query_images), disable=logging.getLogger().level >= logging.CRITICAL) for timestamp, sensor_id, image_name in query_images: if image_name not in pairs: continue keypoints_filepath = keypoints_filepaths[image_name] kapture_keypoints_query = image_keypoints_from_file( filepath=keypoints_filepath, dsize=kapture_data.keypoints[keypoints_type].dsize, dtype=kapture_data.keypoints[keypoints_type].dtype) query_cam = kapture_query_data.sensors[sensor_id] assert isinstance(query_cam, kapture.Camera) num_keypoints = kapture_keypoints_query.shape[0] kapture_keypoints_query, K, distortion = get_camera_matrix_from_kapture( kapture_keypoints_query, query_cam) kapture_keypoints_query = kapture_keypoints_query.reshape( (num_keypoints, 2)) cv2_keypoints_query = np.copy(kapture_keypoints_query) if np.count_nonzero(distortion) > 0: epsilon = np.finfo(np.float64).eps stop_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 500, epsilon) cv2_keypoints_query = cv2.undistortPointsIter( cv2_keypoints_query, K, distortion, R=None, P=K, criteria=stop_criteria) cv2_keypoints_query = cv2_keypoints_query.reshape((num_keypoints, 2)) # center keypoints for i in range(cv2_keypoints_query.shape[0]): cv2_keypoints_query[i, 0] = cv2_keypoints_query[i, 0] - K[0, 2] cv2_keypoints_query[i, 1] = cv2_keypoints_query[i, 1] - K[1, 2] kpts_query = kapture_keypoints_query if ( refine_poses or write_detailed_report) else None points2D, points2D_undistorted, points3D, stats = get_correspondences( kapture_data, keypoints_type, kapture_path, tar_handlers, image_name, pairs[image_name], point_id_from_obs, kpts_query, cv2_keypoints_query, duplicate_strategy, rerank_strategy) # compute absolute pose # inlier_threshold - RANSAC inlier threshold in pixels # answer - dictionary containing the RANSAC output ret = pyransaclib.ransaclib_localization(image_name, K[0, 0], K[1, 1], points2D_undistorted, points3D, inlier_threshold, number_lo_steps, min_num_iterations, max_num_iterations) # add pose to output kapture if ret['success'] and ret['num_inliers'] > 0: pose = kapture.PoseTransform(ret['qvec'], ret['tvec']) if refine_poses: inlier_mask = np.zeros((len(points2D), ), dtype=bool) inlier_mask[ret['inliers']] = True inlier_mask = inlier_mask.tolist() col_cam_id, width, height, params, _ = get_colmap_camera( query_cam) cfg = { 'model': CAMERA_MODEL_NAME_ID[col_cam_id][0], 'width': int(width), 'height': int(height), 'params': params } ret_refine = pycolmap.pose_refinement(pose.t_raw, pose.r_raw, points2D, points3D, inlier_mask, cfg) if ret_refine['success']: pose = kapture.PoseTransform(ret_refine['qvec'], ret_refine['tvec']) logger.debug( f'{image_name} refinement success, new pose: {pose}') if write_detailed_report: reprojection_error = compute_reprojection_error( pose, ret['num_inliers'], ret['inliers'], points2D, points3D, K, distortion) cache = { "num_correspondences": len(points3D), "num_inliers": ret['num_inliers'], "inliers": ret['inliers'], "reprojection_error": reprojection_error, "stats": stats } cache_path = os.path.join( output_path, f'pyransaclib_cache/{image_name}.json') save_to_json(cache, cache_path) trajectories[timestamp, sensor_id] = pose progress_bar.update(1) progress_bar.close() kapture_data_localized = kapture.Kapture( sensors=kapture_query_data.sensors, trajectories=trajectories, records_camera=kapture_query_data.records_camera, rigs=kapture_query_data.rigs) kapture.io.csv.kapture_to_dir(output_path, kapture_data_localized)
def import_7scenes( d7scenes_path: str, kapture_dir_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Imports RGB-D Dataset 7-Scenes dataset and save them as kapture. :param d7scenes_path: path to the 7scenes sequence root path :param kapture_dir_path: path to kapture top directory :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ os.makedirs(kapture_dir_path, exist_ok=True) delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing) logger.info('loading all content ...') POSE_SUFFIX = 'pose' RGB_SUFFIX = 'color' DEPTH_SUFFIX = 'depth' CAMERA_ID = 'kinect' d7s_filename_re = re.compile( r'frame-(?P<timestamp>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)') # populate d7s_filenames = (path.basename(path.join(dp, fn)) for dp, _, fs in os.walk(d7scenes_path) for fn in fs) d7s_filenames = { filename: d7s_filename_re.match(filename).groupdict() for filename in d7s_filenames if d7s_filename_re.match(filename) } # d7s_filenames -> timestamp, suffix, ext if not d7s_filenames: raise ValueError( 'no pose file found: make sure the path to 7scenes sequence is valid.' ) # images logger.info('populating image files ...') d7s_filenames_images = ((int(v['timestamp']), filename) for filename, v in d7s_filenames.items() if v['suffix'] == RGB_SUFFIX) snapshots = kapture.RecordsCamera() for timestamp, image_filename in sorted(d7s_filenames_images): snapshots[timestamp, CAMERA_ID] = image_filename # poses logger.info('import poses files ...') d7s_filenames_poses = ((int(v['timestamp']), filename) for filename, v in d7s_filenames.items() if v['suffix'] == POSE_SUFFIX) trajectories = kapture.Trajectories() for timestamp, pose_filename in d7s_filenames_poses: pose_filepath = path.join(d7scenes_path, pose_filename) pose_mat = np.loadtxt( pose_filepath ) # camera-to-world, 4×4 matrix in homogeneous coordinates rotation_mat = pose_mat[0:3, 0:3] position_vec = pose_mat[0:3, 3] rotation_quat = quaternion.from_rotation_matrix(rotation_mat) pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec) pose_cam_from_world = pose_world_from_cam.inverse() trajectories[timestamp, CAMERA_ID] = pose_cam_from_world # sensors """ From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585). """ sensors = kapture.Sensors() sensors[CAMERA_ID] = kapture.Camera( name='kinect', camera_type=kapture.CameraType.SIMPLE_PINHOLE, camera_params=[640, 480, 585, 320, 240] # w, h, f, cx, cy ) # import (copy) image files. logger.info('copying image files ...') image_filenames = [f for _, _, f in kapture.flatten(snapshots)] import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(records_camera=snapshots, trajectories=trajectories, sensors=sensors) logger.info('writing imported data ...') kapture_to_dir(kapture_dir_path, imported_kapture)
def import_7scenes(d7scenes_path: str, kapture_dir_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip, partition: Optional[str] = None ) -> None: """ Imports RGB-D Dataset 7-Scenes dataset and save them as kapture. :param d7scenes_path: path to the 7scenes sequence root path :param kapture_dir_path: path to kapture top directory :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt to exists. """ os.makedirs(kapture_dir_path, exist_ok=True) delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing) logger.info('loading all content ...') d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)') # populate all relevant files d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path)) for dp, _, fs in os.walk(d7scenes_path) for fn in fs) logger.info('populating 7-scenes files ...') d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict() for filename in sorted(d7s_filenames) if d7s_filename_re.search(filename)} # reorg as shot[seq, id] = {color: , depth: , pose: , ...} shots = {} for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()): shot_id = (file_attribs.get('sequence'), file_attribs['frame_id']) shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename # fake timestamps for timestamp, shot_id in enumerate(shots): shots[shot_id]['timestamp'] = timestamp # if given, filter partition if partition is not None: # read the authors split file partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition]) if not path.isfile(partition_filepath): raise FileNotFoundError(f'partition file is missing: {partition_filepath}.') with open(partition_filepath, 'rt') as file: split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()] assert len(split_sequences) > 0 # filter out shots = {(seq, frame): shot for (seq, frame), shot in shots.items() if seq in split_sequences} if len(shots) == 0: raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.') # eg. shots['seq-01', '000000'] = # { # 'color': 'seq-01/frame-000000.color.jpg', # 'depth': 'seq-01/frame-000000.depth.png', # 'pose': 'seq-01/frame-000000.pose.txt', # 'timestamp': 0} # images + depth maps logger.info('populating image and depth maps files ...') snapshots = kapture.RecordsCamera() depth_maps = kapture.RecordsDepth() for shot in shots.values(): snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color'] kapture_depth_map_filename = shot['depth'][:-len('.png')] # kapture depth files are not png depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg' # kapture depth files are not png depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename # poses logger.info('import poses files ...') trajectories = kapture.Trajectories() for shot in shots.values(): pose_filepath = path.join(d7scenes_path, shot['pose']) pose_mat = np.loadtxt(pose_filepath) # camera-to-world, 4×4 matrix in homogeneous coordinates rotation_mat = pose_mat[0:3, 0:3] position_vec = pose_mat[0:3, 3] rotation_quat = quaternion.from_rotation_matrix(rotation_mat) pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec) pose_cam_from_world = pose_world_from_cam.inverse() trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world # sensors """ From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585). ---- We use the extr. kinect camera parameters from https://projet.liris.cnrs.fr/voir/activities-dataset/kinect-calibration.html. """ sensors = kapture.Sensors() # camera_type = kapture.CameraType.OPENCV # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02, # 2.5673002693536984e-01, -9.3976085633794137e-01, -1.8605549188751580e-03, -2.2232238578189420e-03] # w, h, f, cx, cy, k1, k2, p1, p2, k3 camera_type = kapture.CameraType.SIMPLE_PINHOLE # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02] # w, h, fx, fy, cx, cy camera_params = [640, 480, 525, 320, 240] # w, h, f, cx, cy sensors[RGB_SENSOR_ID] = kapture.Camera( name=RGB_SENSOR_ID, camera_type=camera_type, camera_params=camera_params ) # depth_camera_type = kapture.CameraType.OPENCV # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02, # -1.8932947734719333e-01, 1.1358015104098631e+00, -4.4260345347128536e-03, -5.4869578635708153e-03, -2.2460143607712921e+00] # w, h, f, cx, cy, k1, k2, p1, p2, k3 depth_camera_type = kapture.CameraType.SIMPLE_PINHOLE # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02] # w, h, fx, fy, cx, cy depth_camera_params = [640, 480, 585, 320, 240] # w, h, f, cx, cy sensors[DEPTH_SENSOR_ID] = kapture.Camera( name=DEPTH_SENSOR_ID, camera_type=depth_camera_type, camera_params=depth_camera_params, sensor_type='depth' ) sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera( name=REG_DEPTH_SENSOR_ID, camera_type=depth_camera_type, camera_params=camera_params, sensor_type='depth' ) # bind camera and depth sensor into a rig R = np.array([[9.9996518012567637e-01, 2.6765126468950343e-03, -7.9041012313000904e-03], [-2.7409311281316700e-03, 9.9996302803027592e-01, -8.1504520778013286e-03], [7.8819942130445332e-03, 8.1718328771890631e-03, 9.9993554558014031e-01]]) T = np.array([-2.5558943178152542e-02, 1.0109636268061706e-04, 2.0318321729487039e-03]) Rt = np.vstack((np.hstack((R, T.reshape(3, 1))), np.array([0, 0, 0, 1]))) logger.info('building rig with camera and depth sensor ...') rigs = kapture.Rigs() rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T) rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T) rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform() # import (copy) image files. logger.info('copying image files ...') image_filenames = [f for _, _, f in kapture.flatten(snapshots)] import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method) # import (copy) depth map files. logger.info('converting depth files ...') depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path) hide_progress = logger.getEffectiveLevel() > logging.INFO for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress): if '.reg' in depth_map_filename: continue depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png') depth_map = np.array(Image.open(depth_map_filepath_7scenes)) # change invalid depth from 65535 to 0 depth_map[depth_map == 65535] = 0 # depth maps is in mm in 7scenes, convert it to meters depth_map = depth_map.astype(np.float32) * 1.0e-3 kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map) # register depth to rgb reg_depth_map = register_depth(get_K(depth_camera_type, depth_camera_params), get_K(camera_type, camera_params), Rt, depth_map, camera_params[0], camera_params[1]) kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map) # pack into kapture format imported_kapture = kapture.Kapture( records_camera=snapshots, records_depth=depth_maps, rigs=rigs, trajectories=trajectories, sensors=sensors) logger.info('writing imported data ...') kapture_to_dir(kapture_dir_path, imported_kapture)
def import_opensfm( opensfm_rootdir: str, kapture_rootdir: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.copy) -> None: disable_tqdm = logger.getEffectiveLevel() != logging.INFO # load reconstruction opensfm_reconstruction_filepath = path.join(opensfm_rootdir, 'reconstruction.json') with open(opensfm_reconstruction_filepath, 'rt') as f: opensfm_reconstruction = json.load(f) # remove the single list @ root opensfm_reconstruction = opensfm_reconstruction[0] # prepare space for output os.makedirs(kapture_rootdir, exist_ok=True) delete_existing_kapture_files(kapture_rootdir, force_erase=force_overwrite_existing) # import cameras kapture_sensors = kapture.Sensors() assert 'cameras' in opensfm_reconstruction # import cameras for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items( ): camera = import_camera(osfm_camera, name=osfm_camera_id) kapture_sensors[osfm_camera_id] = camera # import shots logger.info('importing images and trajectories ...') kapture_images = kapture.RecordsCamera() kapture_trajectories = kapture.Trajectories() opensfm_image_dirpath = path.join(opensfm_rootdir, 'images') assert 'shots' in opensfm_reconstruction image_timestamps, image_sensors = {}, { } # used later to retrieve the timestamp of an image. for timestamp, (image_filename, shot) in enumerate( opensfm_reconstruction['shots'].items()): sensor_id = shot['camera'] image_timestamps[image_filename] = timestamp image_sensors[image_filename] = sensor_id # in OpenSfm, (sensor, timestamp) is not unique. rotation_vector = shot['rotation'] q = quaternion.from_rotation_vector(rotation_vector) translation = shot['translation'] # capture_time = shot['capture_time'] # may be invalid # gps_position = shot['gps_position'] kapture_images[timestamp, sensor_id] = image_filename kapture_trajectories[timestamp, sensor_id] = kapture.PoseTransform(r=q, t=translation) # copy image files filename_list = [f for _, _, f in kapture.flatten(kapture_images)] import_record_data_from_dir_auto( source_record_dirpath=opensfm_image_dirpath, destination_kapture_dirpath=kapture_rootdir, filename_list=filename_list, copy_strategy=images_import_method) # gps from pre-extracted exif, in exif/image_name.jpg.exif kapture_gnss = None opensfm_exif_dirpath = path.join(opensfm_rootdir, 'exif') opensfm_exif_suffix = '.exif' if path.isdir(opensfm_exif_dirpath): logger.info('importing GNSS from exif ...') camera_ids = set(image_sensors.values()) # add a gps sensor for each camera map_cam_to_gnss_sensor = { cam_id: 'GPS_' + cam_id for cam_id in camera_ids } for gnss_id in map_cam_to_gnss_sensor.values(): kapture_sensors[gnss_id] = kapture.Sensor( sensor_type='gnss', sensor_params=['EPSG:4326']) # build epsg_code for all cameras kapture_gnss = kapture.RecordsGnss() opensfm_exif_filepath_list = ( path.join(dirpath, filename) for dirpath, _, filename_list in os.walk(opensfm_exif_dirpath) for filename in filename_list if filename.endswith(opensfm_exif_suffix)) for opensfm_exif_filepath in tqdm(opensfm_exif_filepath_list, disable=disable_tqdm): image_filename = path.relpath( opensfm_exif_filepath, opensfm_exif_dirpath)[:-len(opensfm_exif_suffix)] image_timestamp = image_timestamps[image_filename] image_sensor_id = image_sensors[image_filename] gnss_timestamp = image_timestamp gnss_sensor_id = map_cam_to_gnss_sensor[image_sensor_id] with open(opensfm_exif_filepath, 'rt') as f: js_root = json.load(f) if 'gps' not in js_root: logger.warning(f'NO GPS data in "{opensfm_exif_filepath}"') continue gps_coords = { 'x': js_root['gps']['longitude'], 'y': js_root['gps']['latitude'], 'z': js_root['gps'].get('altitude', 0.0), 'dop': js_root['gps'].get('dop', 0), 'utc': 0, } logger.debug( f'found GPS data for ({gnss_timestamp}, {gnss_sensor_id}) in "{opensfm_exif_filepath}"' ) kapture_gnss[gnss_timestamp, gnss_sensor_id] = kapture.RecordGnss(**gps_coords) # import features (keypoints + descriptors) kapture_keypoints = None # kapture.Keypoints(type_name='opensfm', dsize=4, dtype=np.float64) kapture_descriptors = None # kapture.Descriptors(type_name='opensfm', dsize=128, dtype=np.uint8) opensfm_features_dirpath = path.join(opensfm_rootdir, 'features') opensfm_features_suffix = '.features.npz' if path.isdir(opensfm_features_dirpath): logger.info('importing keypoints and descriptors ...') opensfm_features_file_list = (path.join( dp, fn) for dp, _, fs in os.walk(opensfm_features_dirpath) for fn in fs) opensfm_features_file_list = ( filepath for filepath in opensfm_features_file_list if filepath.endswith(opensfm_features_suffix)) for opensfm_feature_filename in tqdm(opensfm_features_file_list, disable=disable_tqdm): image_filename = path.relpath( opensfm_feature_filename, opensfm_features_dirpath)[:-len(opensfm_features_suffix)] opensfm_image_features = np.load(opensfm_feature_filename) opensfm_image_keypoints = opensfm_image_features['points'] opensfm_image_descriptors = opensfm_image_features['descriptors'] logger.debug( f'parsing keypoints and descriptors in {opensfm_feature_filename}' ) if kapture_keypoints is None: # print(type(opensfm_image_keypoints.dtype)) # HAHOG = Hessian Affine feature point detector + HOG descriptor kapture_keypoints = kapture.Keypoints( type_name='HessianAffine', dsize=opensfm_image_keypoints.shape[1], dtype=opensfm_image_keypoints.dtype) if kapture_descriptors is None: kapture_descriptors = kapture.Descriptors( type_name='HOG', dsize=opensfm_image_descriptors.shape[1], dtype=opensfm_image_descriptors.dtype) # convert keypoints file keypoint_filpath = kapture.io.features.get_features_fullpath( data_type=kapture.Keypoints, kapture_dirpath=kapture_rootdir, image_filename=image_filename) kapture.io.features.image_keypoints_to_file( filepath=keypoint_filpath, image_keypoints=opensfm_image_keypoints) # register the file kapture_keypoints.add(image_filename) # convert descriptors file descriptor_filpath = kapture.io.features.get_features_fullpath( data_type=kapture.Descriptors, kapture_dirpath=kapture_rootdir, image_filename=image_filename) kapture.io.features.image_descriptors_to_file( filepath=descriptor_filpath, image_descriptors=opensfm_image_descriptors) # register the file kapture_descriptors.add(image_filename) # import matches kapture_matches = kapture.Matches() opensfm_matches_suffix = '_matches.pkl.gz' opensfm_matches_dirpath = path.join(opensfm_rootdir, 'matches') if path.isdir(opensfm_matches_dirpath): logger.info('importing matches ...') opensfm_matches_file_list = (path.join( dp, fn) for dp, _, fs in os.walk(opensfm_matches_dirpath) for fn in fs) opensfm_matches_file_list = ( filepath for filepath in opensfm_matches_file_list if filepath.endswith(opensfm_matches_suffix)) for opensfm_matches_filename in tqdm(opensfm_matches_file_list, disable=disable_tqdm): image_filename_1 = path.relpath( opensfm_matches_filename, opensfm_matches_dirpath)[:-len(opensfm_matches_suffix)] logger.debug(f'parsing mathes in {image_filename_1}') with gzip.open(opensfm_matches_filename, 'rb') as f: opensfm_matches = pickle.load(f) for image_filename_2, opensfm_image_matches in opensfm_matches.items( ): image_pair = (image_filename_1, image_filename_2) # register the pair to kapture kapture_matches.add(*image_pair) # convert the bin file to kapture kapture_matches_filepath = kapture.io.features.get_matches_fullpath( image_filename_pair=image_pair, kapture_dirpath=kapture_rootdir) kapture_image_matches = np.hstack([ opensfm_image_matches.astype(np.float64), # no macthes scoring = assume all to one np.ones(shape=(opensfm_image_matches.shape[0], 1), dtype=np.float64) ]) kapture.io.features.image_matches_to_file( kapture_matches_filepath, kapture_image_matches) # import 3-D points if 'points' in opensfm_reconstruction: logger.info('importing points 3-D') opensfm_points = opensfm_reconstruction['points'] points_data = [] for point_id in sorted(opensfm_points): point_data = opensfm_points[point_id] point_data = point_data['coordinates'] + point_data['color'] points_data.append(point_data) kapture_points = kapture.Points3d(points_data) else: kapture_points = None # saving kapture csv files logger.info('saving kapture files') kapture_data = kapture.Kapture(sensors=kapture_sensors, records_camera=kapture_images, records_gnss=kapture_gnss, trajectories=kapture_trajectories, keypoints=kapture_keypoints, descriptors=kapture_descriptors, matches=kapture_matches, points3d=kapture_points) kapture.io.csv.kapture_to_dir(dirpath=kapture_rootdir, kapture_data=kapture_data)
def import_bundler( bundler_path: str, image_list_path: str, image_dir_path: str, kapture_dir_path: str, ignore_trajectories: bool, add_reconstruction: bool, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Imports bundler data and save them as kapture. :param bundler_path: path to the bundler model file :param image_list_path: path to the file containing the list of image names :param image_dir_path: input path to bundler image directory. :param kapture_dir_path: path to kapture top directory :param ignore_trajectories: if True, will not import the trajectories :param add_reconstruction: if True, will create 3D points and observations :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ os.makedirs(kapture_dir_path, exist_ok=True) delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing) logger.info('loading all content...') # if there is a filter list, parse it with open(image_list_path) as file: file_content = file.readlines() # remove end line char and empty lines image_list = [line.rstrip() for line in file_content if line != '\n'] with open(bundler_path) as file: bundler_content = file.readlines() # remove end line char and empty lines bundler_content = [ line.rstrip() for line in bundler_content if line != '\n' ] assert bundler_content[0] == "# Bundle file v0.3" # <num_cameras> <num_points> line_1 = bundler_content[1].split() number_of_cameras = int(line_1[0]) number_of_points = int(line_1[1]) offset = 2 number_of_lines_per_camera = 5 # 1 camera + 3 rotation + 1 translation cameras = kapture.Sensors() images = kapture.RecordsCamera() trajectories = kapture.Trajectories() if not ignore_trajectories else None points3d = [] if add_reconstruction else None keypoints = kapture.Keypoints('sift', np.float32, 2) if add_reconstruction else None observations = kapture.Observations() if add_reconstruction else None image_mapping = [] # bundler camera_id -> (name, width, height) for i in range(0, number_of_cameras): start_index = i * number_of_lines_per_camera + offset file_name = image_list[i] # process camera info line_camera = bundler_content[start_index].split() focal_length = float(line_camera[0]) k1 = float(line_camera[1]) k2 = float(line_camera[2]) # lazy open with Image.open(path.join(image_dir_path, file_name)) as im: width, height = im.size image_mapping.append((file_name, width, height)) camera = kapture.Camera( MODEL, [width, height, focal_length, width / 2, height / 2, k1, k2]) camera_id = f'sensor{i}' cameras[camera_id] = camera # process extrinsics rotation_matrix = [[float(v) for v in line.split()] for line in bundler_content[start_index + 1:start_index + 4]] quaternion_wxyz = quaternion.from_rotation_matrix(rotation_matrix) translation = np.array( [float(v) for v in bundler_content[start_index + 4].split()]) pose = kapture.PoseTransform(quaternion_wxyz, translation) # The Bundler model uses a coordinate system that differs from the *computer vision camera # coordinate system*. More specifically, they use the camera coordinate system typically used # in *computer graphics*. In this camera coordinate system, the camera is looking down the # `-z`-axis, with the `x`-axis pointing to the right and the `y`-axis pointing upwards. # rotation Pi around the x axis to get the *computer vision camera # coordinate system* rotation_around_x = quaternion.quaternion(0.0, 1.0, 0.0, 0.0) transformation = kapture.PoseTransform(rotation_around_x, np.array([0, 0, 0])) images[(i, camera_id)] = file_name if trajectories is not None: # transformation.inverse() is equal to transformation (rotation around -Pi or Pi around X is the same) trajectories[(i, camera_id)] = kapture.PoseTransform.compose( [transformation, pose, transformation]) if points3d is not None and number_of_points > 0: assert keypoints is not None assert observations is not None offset += number_of_cameras * number_of_lines_per_camera number_of_lines_per_point = 3 # position color viewlist # (image_name, bundler_keypoint_id ) -> keypoint_id known_keypoints = {} local_keypoints = {} for i in range(0, number_of_points): start_index = i * number_of_lines_per_point + offset position = [float(v) for v in bundler_content[start_index].split()] # apply transformation position = [position[0], -position[1], -position[2]] color = [ float(v) for v in bundler_content[start_index + 1].split() ] # <view list>: length of the list + [<camera> <key> <x> <y>] # x, y origin is the center of the image view_list = bundler_content[start_index + 2].split() number_of_observations = int(view_list[0]) for j in range(number_of_observations): camera_id = int(view_list[1 + 4 * j + 0]) keypoint_id = int(view_list[1 + 4 * j + 1]) x = float(view_list[1 + 4 * j + 2]) y = float(view_list[1 + 4 * j + 3]) file_name, width, height = image_mapping[camera_id] # put (0,0) in upper left corner x += (width / 2) y += (height / 2) # init local_keypoints if needed if file_name not in local_keypoints: local_keypoints[file_name] = [] # do not add the same keypoint twice if (file_name, keypoint_id) not in known_keypoints: # in the kapture format, keypoint id is different. Note that it starts from 0 known_keypoints[(file_name, keypoint_id)] = len( local_keypoints[file_name]) local_keypoints[file_name].append([x, y]) keypoint_idx = known_keypoints[(file_name, keypoint_id)] observations.add(i, file_name, keypoint_idx) points3d.append(position + color) points3d = np.array(points3d) # finally, convert local_keypoints to np.ndarray and add them to the global keypoints variable keypoints = kapture.Keypoints('sift', np.float32, 2) for image_filename, keypoints_array in local_keypoints.items(): keypoints_np_array = np.array(keypoints_array).astype(np.float32) keypoints_out_path = kapture.io.features.get_keypoints_fullpath( kapture_dir_path, image_filename) kapture.io.features.image_keypoints_to_file( keypoints_out_path, keypoints_np_array) keypoints.add(image_filename) if points3d is not None: points3d = kapture.Points3d(points3d) # import (copy) image files. logger.info('import image files ...') filename_list = [f for _, _, f in kapture.flatten(images)] import_record_data_from_dir_auto(image_dir_path, kapture_dir_path, filename_list, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories, points3d=points3d, keypoints=keypoints, observations=observations) logger.info('writing imported data...') kapture_to_dir(kapture_dir_path, imported_kapture)
def extract_kapture_global(kapture_root, config, output_dir='', overwrite=False): logging.info('Extracting NetVLAD features with configuration:\n', config) # use kapture io to identify image paths and loop kdata = kapture_from_dir(kapture_root, matches_pairsfile_path=None, skip_list= [kapture.Matches, kapture.Points3d, kapture.Observations, kapture.Keypoints, kapture.Descriptors]) assert kdata.records_camera is not None export_dir = output_dir if output_dir else kapture_root # root of output directory for features os.makedirs(export_dir, exist_ok=True) image_list = [filename for _, _, filename in kapture.flatten(kdata.records_camera)] # resume extraction if some features exist try: # load features if there are any kdata.global_features = global_features_from_dir(export_dir, None) if kdata.global_features is not None and not overwrite: image_list = [name for name in image_list if name not in kdata.global_features] except FileNotFoundError: pass except: logging.exception("Error with importing existing global features.") # clear features first if overwriting if overwrite: delete_existing_kapture_files(export_dir, True, only=[kapture.GlobalFeatures]) if len(image_list) == 0: print('All features were already extracted') return else: print(f'Extracting NetVLAD features for {len(image_list)} images') # for the global descriptor type specification global_dtype = None if kdata.global_features is None else kdata.global_features.dtype global_dsize = None if kdata.global_features is None else kdata.global_features.dsize # setup network tf.reset_default_graph() if config['grayscale']: tf_batch = tf.placeholder( dtype=tf.float32, shape=[None, None, None, 1]) else: tf_batch = tf.placeholder( dtype=tf.float32, shape=[None, None, None, 3]) # load network and checkpoint net = nets.vgg16NetvladPca(tf_batch) saver = tf.train.Saver() sess = tf.Session() checkpoint = chkpt_path + '/' + config['checkpoint'] saver.restore(sess, checkpoint) for image_name in image_list: img_path = get_image_fullpath(kapture_root, image_name) if img_path.endswith('.txt'): args.images = open(img_path).read().splitlines() + args.images continue print(f"\nExtracting features for {img_path}") if config['grayscale']: image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) image = np.expand_dims( np.expand_dims(image, axis=0), axis=-1) else: image = cv2.imread(img_path, cv2.COLOR_BGR2RGB) image = np.expand_dims(image, axis=0) descriptor = sess.run(net, feed_dict={tf_batch: image})[:, :config['pca_dim']] descriptor = np.squeeze(descriptor) # write global descriptor type specification if global_dtype is None: global_dtype = descriptor.dtype global_dsize = len(descriptor) kdata.global_features = kapture.GlobalFeatures('netvlad', global_dtype, global_dsize) global_descriptors_config_abs_path = get_csv_fullpath(kapture.GlobalFeatures, export_dir) descriptors_to_file(global_descriptors_config_abs_path, kdata.global_features) else: assert kdata.global_features.type_name == "netvlad" assert kdata.global_features.dtype == descriptor.dtype assert kdata.global_features.dsize == len(descriptor) # get output paths global_descriptors_abs_path = get_global_features_fullpath(export_dir, image_name) image_global_features_to_file(global_descriptors_abs_path, descriptor) kdata.global_features.add(image_name) # sess.close() # close session before initializing again for next submap if not global_features_check_dir(kdata.global_features, export_dir): print('global feature extraction ended successfully but not all files were saved')