def import_record_data_from_dir_auto( source_record_dirpath: str, destination_kapture_dirpath: str, filename_list: Iterable[str], copy_strategy: TransferAction = TransferAction.copy ) -> None: """ Imports record_data from a given directory. Automatically replicate the folder hierarchy from source to kapture record directory. If you want to change the file organisation use import_record_data_from_dir_explicit instead. The actual import can be done using actual file copy (copy/move), or symlinks (absolute/relative). :param source_record_dirpath: input path to directory where to import data from. :param destination_kapture_dirpath: input root path to kapture root directory, where to import data to. :param filename_list: input list of filenames to import (filenames are relative path to source_record_dirpath). This image list can be obtained from file walking (populate_files_in_dirpath) or using an already populated kapture.RecordsData. Prefer RecordsData since it will only copy required files, eg: filename_list = [f for _, _, f in kapture.flatten(kapture_data.records_camera)] :param copy_strategy: """ if copy_strategy == TransferAction.root_link: import_record_data_from_dir_link_dir(source_record_dirpath, destination_kapture_dirpath) else: source_filepath_list = (path.join(source_record_dirpath, record_filename) for record_filename in filename_list) kapture_filepath_list = (get_record_fullpath(destination_kapture_dirpath, record_filename) for record_filename in filename_list) transfer_files_from_dir( source_filepath_list, kapture_filepath_list, copy_strategy )
def import_silda( silda_dir_path: str, destination_kapture_dir_path: str, fallback_cam_model: str = 'FOV', do_split_cams: bool = False, corpus: Optional[str] = None, replace_pose_rig: bool = False, force_overwrite_existing: bool = False, images_import_strategy: TransferAction = TransferAction.link_absolute ) -> None: """ Imports data from silda dataset. :param silda_dir_path: path to the silda top directory :param destination_kapture_dir_path: input path to kapture directory. :param fallback_cam_model: camera model to fallback when necessary :param do_split_cams: If true, re-organises and renames the image files to split apart cameras. :param corpus: the list of corpus to be imported, among 'mapping', 'query'. :param replace_pose_rig: if True, replaces poses of individual cameras with poses of the rig. :param force_overwrite_existing: if true, Silently overwrite kapture files if already exists. :param images_import_strategy: how to copy image files. """ # sanity check silda_dir_path = path_secure(path.abspath(silda_dir_path)) destination_kapture_dir_path = path_secure( path.abspath(destination_kapture_dir_path)) if TransferAction.root_link == images_import_strategy and do_split_cams: raise ValueError( 'impossible to only link images directory and applying split cam.') hide_progress_bars = logger.getEffectiveLevel() >= logging.INFO # prepare output directory kapture.io.structure.delete_existing_kapture_files( destination_kapture_dir_path, force_overwrite_existing) os.makedirs(destination_kapture_dir_path, exist_ok=True) # images ########################################################################################################### logger.info('Processing images ...') # silda-images # ... # ├── 1445_0.png # ├── 1445_1.png # ... silda_images_root_path = path.join(silda_dir_path, 'silda-images') # list all png files (its PNG in silda) using a generator. if corpus is not None: assert corpus in SILDA_CORPUS_SPLIT_FILENAMES # if corpus specified, filter by those which directory name match corpus. logger.debug(f'only importing {corpus} part.') corpus_file_path = path.join(silda_dir_path, SILDA_CORPUS_SPLIT_FILENAMES[corpus]) with open(corpus_file_path, 'rt') as corpus_file: corpus_filenames = corpus_file.readlines() image_filenames_original = sorted(filename.strip() for filename in corpus_filenames) else: image_filenames_original = sorted( filename for dir_path, sd, fs in os.walk(silda_images_root_path) for filename in fs if filename.endswith('.png')) image_filenames_kapture = [] snapshots = kapture.RecordsCamera() image_name_to_ids = {} # '1445_0.png' -> (1445, 0) for image_filename_original in tqdm(image_filenames_original, disable=hide_progress_bars): # retrieve info from image filename name_parts_match = SILDA_IMAGE_NAME_PATTERN.match( image_filename_original) assert name_parts_match is not None shot_info: Dict[str, Any] shot_info = name_parts_match.groupdict() shot_info['timestamp'] = int( shot_info['timestamp'] ) # To avoid warnings about type of the value # eg. file_info = {'filename': '1445_0.png', 'timestamp': 1445, 'cam_id': '0'} # create a path of the image into NLE dir if do_split_cams: # re-organise images with subfolders per corpus/camera/timestamp.png kapture_image_filename = path.join( shot_info['cam_id'], '{:04d}.png'.format(shot_info['timestamp'])) else: # keep the original file hierarchy kapture_image_filename = image_filename_original image_filenames_kapture.append(kapture_image_filename) snapshots[shot_info['timestamp'], shot_info['cam_id']] = kapture_image_filename image_name_to_ids[shot_info['filename']] = (shot_info['timestamp'], shot_info['cam_id']) assert len(image_filenames_kapture) == len(image_filenames_original) # intrinsics ####################################################################################################### cameras = _import_cameras(silda_dir_path, snapshots, fallback_cam_model) # extrinsics ####################################################################################################### trajectories = _import_trajectories(silda_dir_path, image_name_to_ids, hide_progress_bars) # rigs rigs = _make_rigs(replace_pose_rig, trajectories) # pack it all together kapture_data = kapture.Kapture(sensors=cameras, records_camera=snapshots, trajectories=trajectories, rigs=rigs) logger.info('saving to Kapture ...') kapture.io.csv.kapture_to_dir(destination_kapture_dir_path, kapture_data) # finally import images if images_import_strategy != TransferAction.skip: # importing image files logger.info(f'importing {len(image_filenames_original)} images ...') assert len(image_filenames_original) == len(image_filenames_kapture) image_file_paths_original = [ path.join(silda_images_root_path, image_filename_kapture) for image_filename_kapture in image_filenames_original ] image_file_paths_kapture = [ get_image_fullpath(destination_kapture_dir_path, image_filename_kapture) for image_filename_kapture in image_filenames_kapture ] transfer_files_from_dir(image_file_paths_original, image_file_paths_kapture, images_import_strategy) logger.info('done.')
def export_opensfm( kapture_root_dir: str, opensfm_root_dir: str, force_overwrite_existing: bool = False, images_export_method: TransferAction = TransferAction.copy) -> None: """ Export the kapture data to an openSfM format :param kapture_root_dir: full path to the top kapture directory :param opensfm_root_dir: path of the directory where to store the data in openSfM format :param force_overwrite_existing: if true, will remove existing openSfM data without prompting the user. :param images_export_method: """ disable_tqdm = logger.getEffectiveLevel( ) > logging.INFO # don't display tqdm for non-verbose levels # load reconstruction kapture_data = kapture.io.csv.kapture_from_dir(kapture_root_dir) # export cameras opensfm_cameras = {} kapture_cameras = { cam_id: cam for cam_id, cam in kapture_data.sensors.items() if cam.sensor_type == 'camera' } for cam_id, kapture_camera in kapture_cameras.items(): opensfm_cameras[cam_id] = export_opensfm_camera(kapture_camera) # export shots opensfm_shots = {} for timestamp, camera_id, image_filename in tqdm(kapture.flatten( kapture_data.records_camera), disable=disable_tqdm): # retrieve pose (if there is one). # opensfm_shots = {image_filename: shot} # shot = {camera , rotation, translation, capture_time, gps_position, ...} opensfm_shot = { 'capture_time': 0, # in ms != timestamp 'camera': camera_id, } if (timestamp, camera_id) in kapture_data.trajectories: pose = kapture_data.trajectories[timestamp, camera_id] rotation_vector = quaternion.as_rotation_vector(pose.r) translation_vector = pose.t.flatten() opensfm_shot.update({ 'rotation': rotation_vector.tolist(), 'translation': translation_vector.tolist() }) opensfm_shots[image_filename] = opensfm_shot # pack it opensfm_reconstruction = { 'cameras': opensfm_cameras, 'shots': opensfm_shots, } # images logger.info( f'writing image files "{path.join(opensfm_root_dir, "images")}".') image_filenames = [ f for _, _, f in kapture.flatten(kapture_data.records_camera) ] kapture_image_file_paths = [ get_record_fullpath(kapture_root_dir, image_filename) for image_filename in image_filenames ] opensfm_image_file_paths = [ path.join(opensfm_root_dir, 'images', image_filename) for image_filename in image_filenames ] transfer_files_from_dir( source_filepath_list=kapture_image_file_paths, destination_filepath_list=opensfm_image_file_paths, force_overwrite=force_overwrite_existing, copy_strategy=images_export_method, ) _export_opensfm_features_and_matches(image_filenames, kapture_data, kapture_root_dir, opensfm_root_dir, disable_tqdm) # export 3D-points files if kapture_data.points3d is not None: logger.info('exporting points 3-D') opensfm_reconstruction['points'] = {} for i, (x, y, z, r, g, b) in tqdm(enumerate(kapture_data.points3d), disable=disable_tqdm): opensfm_reconstruction['points'][i] = { 'coordinates': [x, y, z], 'color': [r, g, b] } # write json files ################################################################################################# os.makedirs(opensfm_root_dir, exist_ok=True) # write reconstruction.json opensfm_reconstruction_filepath = path.join(opensfm_root_dir, 'reconstruction.json') logger.info( f'writing reconstruction file "{opensfm_reconstruction_filepath}".') with open(opensfm_reconstruction_filepath, 'wt') as f: json.dump([opensfm_reconstruction], f, indent=4) # write camera_models.json opensfm_cameras_filepath = path.join(opensfm_root_dir, 'camera_models.json') logger.info(f'writing camera models file "{opensfm_cameras_filepath}".') with open(opensfm_cameras_filepath, 'wt') as f: json.dump(opensfm_cameras, f, indent=4)
def export_openmvg_sfm_data(kapture_path: str, kapture_data: kapture.Kapture, openmvg_sfm_data_file_path: str, openmvg_image_root_path: str, image_action: TransferAction, image_path_flatten: bool, force: bool, kapture_to_openmvg_view_ids: dict = {}) -> Dict: """ Convert the kapture data into an openMVG dataset stored as a dictionary. The format is defined here: https://openmvg.readthedocs.io/en/latest/software/SfM/SfM_OutputFormat/ :param kapture_data: the kapture data :param kapture_path: top directory of the kapture data and the images :param openmvg_sfm_data_file_path: input path to the SfM data file to be written. :param openmvg_image_root_path: input path to openMVG image directory to be created. :param image_action: action to apply on images: link, copy, move or do nothing. :param image_path_flatten: flatten image path (eg. to avoid image name collision in openMVG regions). :param force: if true, will remove existing openMVG data without prompting the user. :param kapture_to_openmvg_view_ids: input/output mapping of kapture image name to corresponding openmvg view id. :return: an SfM_data, the openmvg structure, stored as a dictionary ready to be serialized """ if kapture_data.cameras is None or kapture_data.records_camera is None: raise ValueError( 'export_openmvg_sfm_data needs kapture camera and records_camera.') if image_action == TransferAction.root_link: raise NotImplementedError( 'root link is not implemented, use skip instead.') # refer to the original image dir when skipping image transfer. if image_action == TransferAction.skip: openmvg_image_root_path = get_image_fullpath(kapture_path) if openmvg_image_root_path is None: raise ValueError( f'openmvg_image_root_path must be defined to be able to perform {image_action}.' ) # make sure directory is ready to contain openmvg_sfm_data_file_path os.makedirs(path.dirname(openmvg_sfm_data_file_path), exist_ok=True) # Check we don't have other sensors defined if len(kapture_data.sensors) != len(kapture_data.cameras): extra_sensor_number = len(kapture_data.sensors) - len( kapture_data.cameras) logger.warning( f'We will ignore {extra_sensor_number} sensors that are not camera' ) # openmvg does not support rigs if kapture_data.rigs: logger.info('remove rigs notation.') rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs) kapture_data.rigs.clear() # Compute root path and camera used in records kapture_to_openmvg_cam_ids = {} # kapture_cam_id -> openmvg_cam_id for i, (_, _, kapture_image_name) in enumerate( kapture.flatten(kapture_data.records_camera)): if kapture_image_name not in kapture_to_openmvg_view_ids: kapture_to_openmvg_view_ids[kapture_image_name] = i # polymorphic_status = PolymorphicStatus({}, 1, 1) polymorphic_registry = CerealPointerRegistry( id_key=JSON_KEY.POLYMORPHIC_ID, value_key=JSON_KEY.POLYMORPHIC_NAME) ptr_wrapper_registry = CerealPointerRegistry(id_key=JSON_KEY.ID, value_key=JSON_KEY.DATA) logger.debug(f'exporting intrinsics ...') openmvg_sfm_data_intrinsics = export_openmvg_intrinsics( kapture_cameras=kapture_data.cameras, kapture_to_openmvg_cam_ids=kapture_to_openmvg_cam_ids, polymorphic_registry=polymorphic_registry, ptr_wrapper_registry=ptr_wrapper_registry, ) logger.debug(f'exporting views ...') openmvg_sfm_data_views = export_openmvg_views( kapture_cameras=kapture_data.cameras, kapture_images=kapture_data.records_camera, kapture_trajectories=kapture_data.trajectories, kapture_to_openmvg_cam_ids=kapture_to_openmvg_cam_ids, kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids, polymorphic_registry=polymorphic_registry, ptr_wrapper_registry=ptr_wrapper_registry, image_path_flatten=image_path_flatten, ) logger.debug(f'exporting poses ...') openmvg_sfm_data_poses = export_openmvg_poses( kapture_images=kapture_data.records_camera, kapture_trajectories=kapture_data.trajectories, kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids) # structure : correspond to kapture observations + 3D points logger.debug(f'exporting structure ...') openmvg_sfm_data_structure = export_openmvg_structure( kapture_points_3d=kapture_data.points3d, kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids, kapture_observations=kapture_data.observations, kapture_keypoints=kapture_data.keypoints, kapture_path=kapture_path) openmvg_sfm_data = { JSON_KEY.SFM_DATA_VERSION: OPENMVG_SFM_DATA_VERSION_NUMBER, JSON_KEY.ROOT_PATH: path.abspath(openmvg_image_root_path), JSON_KEY.INTRINSICS: openmvg_sfm_data_intrinsics, JSON_KEY.VIEWS: openmvg_sfm_data_views, JSON_KEY.EXTRINSICS: openmvg_sfm_data_poses, JSON_KEY.STRUCTURE: openmvg_sfm_data_structure, JSON_KEY.CONTROL_POINTS: [], } logger.debug(f'Saving to openmvg {openmvg_sfm_data_file_path}...') with open(openmvg_sfm_data_file_path, "w") as fid: json.dump(openmvg_sfm_data, fid, indent=4) # do the actual image transfer if not image_action == TransferAction.skip: job_copy = ( ( # source path -> dest path get_image_fullpath(kapture_path, kapture_image_name), path.join( openmvg_image_root_path, get_openmvg_image_path(kapture_image_name, image_path_flatten))) for _, _, kapture_image_name in kapture.flatten(kapture_data.records_camera)) source_filepath_list, destination_filepath_list = zip(*job_copy) transfer_files_from_dir( source_filepath_list=source_filepath_list, destination_filepath_list=destination_filepath_list, copy_strategy=image_action, force_overwrite=force)