def get_keypoints_from_database( database: COLMAPDatabase, records_camera: kapture.RecordsCamera, kapture_dirpath: str, keypoint_name: str = 'SIFT') -> Optional[kapture.Keypoints]: """ Writes keypoints files and return the kapture keypoints from the colmap database. Requires records_camera timestamp == colmap_image_id :param database: colmap database. :param records_camera: input images. :param kapture_dirpath: input root path to kapture. :param keypoint_name: name of the keypoints detector (by default, in colmap, its SIFT, but can be imported) :return: kapture keypoints """ image_filenames = set() dtype = np.float32 dsize = None # usually 6, will be retrieved on first keypoints of DB # DB query colmap_keypoints_request = ( (image_id, blob_to_array(data, dtype, (rows, cols)) if (rows > 0 and cols > 0) else np.zeros((0, 6))) for image_id, rows, cols, data in database.execute( "SELECT image_id, rows, cols, data FROM keypoints")) hide_progressbar = logger.getEffectiveLevel() > logging.INFO for colmap_image_id, image_keypoints in tqdm(colmap_keypoints_request, disable=hide_progressbar): if dsize is None: assert image_keypoints.dtype == dtype dsize = int(image_keypoints.shape[1]) elif dsize != image_keypoints.shape[1]: raise ValueError(f'inconsistent keypoints size or type.') # retrieve image path from image_id assert len(records_camera[colmap_image_id]) == 1 timestamp = colmap_image_id image_filename = next((v for v in records_camera[timestamp].values()), None) assert image_filename keypoints_filepath = kapture.io.features.get_keypoints_fullpath( kapture_dirpath, image_filename) if image_keypoints.shape[0] == 0: logger.warning(f'image={image_filename} has 0 keypoints') # save the actual file kapture.io.features.image_keypoints_to_file(keypoints_filepath, image_keypoints) # register it into kapture image_filenames.add(image_filename) if image_filenames: return kapture.Keypoints(keypoint_name, dtype, dsize, image_filenames) else: return None
def sub_kapture_from_img_list(kdata, kdata_path, img_list, pairs): trajectories = kapture.Trajectories() sensors = kapture.Sensors() records = kapture.RecordsCamera() keypoints = kapture.Keypoints(kdata.keypoints._tname, kdata.keypoints._dtype, kdata.keypoints._dsize) if kdata.descriptors != None: descriptors = kapture.Descriptors(kdata.descriptors._tname, kdata.descriptors._dtype, kdata.descriptors._dsize) else: descriptors = None matches = kapture.Matches() timestamp_sensor_id_from_image_name = { img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in kapture.flatten( kdata.records_camera) } for img in img_list: timestamp, sensor_id = timestamp_sensor_id_from_image_name[img] pose = kdata.trajectories[timestamp][sensor_id] sensors[sensor_id] = kdata.sensors[sensor_id] records[timestamp, sensor_id] = img trajectories[timestamp, sensor_id] = pose keypoints.add(img) if kdata.descriptors != None: descriptors.add(img) for i in pairs: image_matches_filepath = get_matches_fullpath((i[0], i[1]), kdata_path) if os.path.exists(image_matches_filepath): matches.add(i[0], i[1]) matches.normalize() return kapture.Kapture(sensors=sensors, trajectories=trajectories, records_camera=records, descriptors=descriptors, keypoints=keypoints, matches=matches)
def sub_kapture_from_img_list(kdata, img_list, pairs, keypoints_type, descriptors_type): trajectories = kapture.Trajectories() sensors = kapture.Sensors() records = kapture.RecordsCamera() keypoints = kapture.Keypoints(kdata.keypoints[keypoints_type].type_name, kdata.keypoints[keypoints_type].dtype, kdata.keypoints[keypoints_type].dsize) if kdata.descriptors is not None and descriptors_type in kdata.descriptors: descriptors = kapture.Descriptors(kdata.descriptors[descriptors_type].type_name, kdata.descriptors[descriptors_type].dtype, kdata.descriptors[descriptors_type].dsize, kdata.descriptors[descriptors_type].keypoints_type, kdata.descriptors[descriptors_type].metric_type) else: descriptors = None matches = kapture.Matches() timestamp_sensor_id_from_image_name = {img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in kapture.flatten(kdata.records_camera)} for img in img_list: timestamp, sensor_id = timestamp_sensor_id_from_image_name[img] sensors[sensor_id] = kdata.sensors[sensor_id] records[timestamp, sensor_id] = img if (timestamp, sensor_id) in kdata.trajectories: pose = kdata.trajectories[timestamp][sensor_id] trajectories[timestamp, sensor_id] = pose keypoints.add(img) if kdata.descriptors is not None: descriptors.add(img) for i in pairs: if i in kdata.matches[keypoints_type]: matches.add(i[0], i[1]) matches.normalize() return kapture.Kapture(sensors=sensors, trajectories=trajectories, records_camera=records, descriptors={descriptors_type: descriptors}, keypoints={keypoints_type: keypoints}, matches={keypoints_type: matches})
def test_init_keypoints_unknown(self): keypoints = kapture.Keypoints('UNKNOWN', int, 64, ['a/a.jpg']) self.assertEqual('UNKNOWN', keypoints.type_name)
def test_init_keypoints_sift(self): keypoints = kapture.Keypoints( 'SIFT', float, 4, ['a/a.jpg', 'b/b.jpg', 'c/c.jpg', 'c/c.jpg']) self.assertEqual('SIFT', keypoints.type_name) self.assertEqual(3, len(keypoints)) self.assertIn('a/a.jpg', keypoints)
def extract_kapture_keypoints(kapture_root, config, output_dir='', overwrite=False): """ Extract r2d2 keypoints and descritors to the kapture format directly """ print('extract_kapture_keypoints...') kdata = kapture_from_dir(kapture_root, matches_pairsfile_path=None, skip_list= [kapture.GlobalFeatures, kapture.Matches, kapture.Points3d, kapture.Observations]) export_dir = output_dir if output_dir else kapture_root # root of output directory for features os.makedirs(export_dir, exist_ok=True) assert kdata.records_camera is not None image_list = [filename for _, _, filename in kapture.flatten(kdata.records_camera)] # resume extraction if some features exist try: # load existing features, if any kdata.keypoints = keypoints_from_dir(export_dir, None) kdata.descriptors = descriptors_from_dir(export_dir, None) if kdata.keypoints is not None and kdata.descriptors is not None and not overwrite: image_list = [name for name in image_list if name not in kdata.keypoints or name not in kdata.descriptors] except FileNotFoundError: pass except: logging.exception("Error with importing existing local features.") # clear features first if overwriting if overwrite: delete_existing_kapture_files(export_dir, True, only=[kapture.Descriptors, kapture.Keypoints]) if len(image_list) == 0: print('All features were already extracted') return else: print(f'Extracting r2d2 features for {len(image_list)} images') iscuda = common.torch_set_gpu([torch.cuda.is_available()]) # load the network... net = load_network(config['checkpoint']) if iscuda: net = net.cuda() # create the non-maxima detector detector = NonMaxSuppression( rel_thr = config['reliability_thr'], rep_thr = config['repeatability_thr']) keypoints_dtype = None if kdata.keypoints is None else kdata.keypoints.dtype descriptors_dtype = None if kdata.descriptors is None else kdata.descriptors.dtype keypoints_dsize = None if kdata.keypoints is None else kdata.keypoints.dsize descriptors_dsize = None if kdata.descriptors is None else kdata.descriptors.dsize for image_name in image_list: img_path = get_image_fullpath(kapture_root, image_name) if img_path.endswith('.txt'): images = open(img_path).read().splitlines() + images continue print(f"\nExtracting features for {img_path}") img = Image.open(img_path).convert('RGB') W, H = img.size img = norm_RGB(img)[None] if iscuda: img = img.cuda() # extract keypoints/descriptors for a single image xys, desc, scores = extract_multiscale(net, img, detector, scale_f = config['scale_f'], min_scale = config['min_scale'], max_scale = config['max_scale'], min_size = config['min_size'], max_size = config['max_size'], verbose = True) xys = xys.cpu().numpy() desc = desc.cpu().numpy() scores = scores.cpu().numpy() idxs = scores.argsort()[-config['top_k'] or None:] xys = xys[idxs] desc = desc[idxs] if keypoints_dtype is None or descriptors_dtype is None: keypoints_dtype = xys.dtype descriptors_dtype = desc.dtype keypoints_dsize = xys.shape[1] descriptors_dsize = desc.shape[1] kdata.keypoints = kapture.Keypoints('r2d2', keypoints_dtype, keypoints_dsize) kdata.descriptors = kapture.Descriptors('r2d2', descriptors_dtype, descriptors_dsize) keypoints_config_absolute_path = get_csv_fullpath(kapture.Keypoints, export_dir) descriptors_config_absolute_path = get_csv_fullpath(kapture.Descriptors, export_dir) keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints) descriptors_to_file(descriptors_config_absolute_path, kdata.descriptors) else: assert kdata.keypoints.type_name == 'r2d2' assert kdata.descriptors.type_name == 'r2d2' assert kdata.keypoints.dtype == xys.dtype assert kdata.descriptors.dtype == desc.dtype assert kdata.keypoints.dsize == xys.shape[1] assert kdata.descriptors.dsize == desc.shape[1] keypoints_fullpath = get_keypoints_fullpath(export_dir, image_name) print(f"Saving {xys.shape[0]} keypoints to {keypoints_fullpath}") image_keypoints_to_file(keypoints_fullpath, xys) kdata.keypoints.add(image_name) descriptors_fullpath = get_descriptors_fullpath(export_dir, image_name) print(f"Saving {desc.shape[0]} descriptors to {descriptors_fullpath}") image_descriptors_to_file(descriptors_fullpath, desc) kdata.descriptors.add(image_name) if not keypoints_check_dir(kdata.keypoints, export_dir) or \ not descriptors_check_dir(kdata.descriptors, export_dir): print('local feature extraction ended successfully but not all files were saved')
def import_from_colmap_images_txt(colmap_images_filepath: str, kapture_dirpath: Optional[str] = None ) -> Tuple[kapture.RecordsCamera, kapture.Trajectories, Optional[kapture.Keypoints]]: """ Imports RecordsCamera, Trajectories and Keypoints from colmap images.txt :param colmap_images_filepath: path to colmap images.txt file :param kapture_dirpath: path to kapture root path. If not given (None), keypoints are not created. :return: kapture images, trajectories and keypoints """ # colmap images file format is : # Image list with two lines of data per image: # IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME # POINTS2D[] as (X, Y, POINT3D_ID) images = kapture.RecordsCamera() trajectories = kapture.Trajectories() keypoints = None image_names = [] # first pass # first pass: IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME # to images and trajectories with open(colmap_images_filepath, 'r') as colmap_images_file: lines = colmap_images_file.readlines() lines = (line for line in lines if not line.startswith('#')) # eliminate comments lines = (line for i, line in enumerate(lines) if (i % 2) == 0) # eliminate even lines # split by space and or comma lines = (re.findall(colmap_reconstruction_split_pattern, line.rstrip()) for line in lines) # split fields # but make sure not to split spaces in file names lines = (line[0:9] + [' '.join(line[9:])] for line in lines) for fields in lines: # IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME timestamp = int(fields[0]) # use image ID as timestamp q = [float(v) for v in fields[1:5]] t = [float(v) for v in fields[5:8]] pose = kapture.PoseTransform(q, t) camera_id = get_camera_kapture_id_from_colmap_id(int(fields[8])) image_name = fields[9] images[timestamp, camera_id] = image_name trajectories[timestamp, camera_id] = pose image_names.append(image_name) # second pass: keypoints, observations and points 3d if kapture_dirpath is not None: # second pass: POINTS2D[] as (X, Y, POINT3D_ID) image_names_with_keypoints = set() # observations = kapture.Observations() with open(colmap_images_filepath, 'r') as colmap_images_file: lines = colmap_images_file.readlines() lines = (line for line in lines if not line.startswith('#')) # eliminate comments lines = (line for i, line in enumerate(lines) if (i % 2) == 1) # eliminate odd lines # split by space and or comma lines = (re.findall(colmap_reconstruction_split_pattern, line.rstrip()) for line in lines) # split fields for image_name, fields in zip(image_names, lines): image_keypoints_colmap = np.array(fields).reshape((-1, 3))[:, 0:2].astype(np.float32) # register as keypoints if there is at least one if image_keypoints_colmap.shape[0] > 0: keypoints_filepath = kapture.io.features.get_keypoints_fullpath(kapture_dirpath, image_name) kapture.io.features.image_keypoints_to_file(keypoints_filepath, image_keypoints_colmap) image_names_with_keypoints.add(image_name) # TODO: observations if image_names_with_keypoints: keypoints = kapture.Keypoints('SIFT', np.float32, 2, image_names_with_keypoints) return images, trajectories, keypoints
def import_bundler( bundler_path: str, image_list_path: str, image_dir_path: str, kapture_dir_path: str, ignore_trajectories: bool, add_reconstruction: bool, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Imports bundler data and save them as kapture. :param bundler_path: path to the bundler model file :param image_list_path: path to the file containing the list of image names :param image_dir_path: input path to bundler image directory. :param kapture_dir_path: path to kapture top directory :param ignore_trajectories: if True, will not import the trajectories :param add_reconstruction: if True, will create 3D points and observations :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ os.makedirs(kapture_dir_path, exist_ok=True) delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing) logger.info('loading all content...') # if there is a filter list, parse it with open(image_list_path) as file: file_content = file.readlines() # remove end line char and empty lines image_list = [line.rstrip() for line in file_content if line != '\n'] with open(bundler_path) as file: bundler_content = file.readlines() # remove end line char and empty lines bundler_content = [ line.rstrip() for line in bundler_content if line != '\n' ] assert bundler_content[0] == "# Bundle file v0.3" # <num_cameras> <num_points> line_1 = bundler_content[1].split() number_of_cameras = int(line_1[0]) number_of_points = int(line_1[1]) offset = 2 number_of_lines_per_camera = 5 # 1 camera + 3 rotation + 1 translation cameras = kapture.Sensors() images = kapture.RecordsCamera() trajectories = kapture.Trajectories() if not ignore_trajectories else None points3d = [] if add_reconstruction else None keypoints = kapture.Keypoints('sift', np.float32, 2) if add_reconstruction else None observations = kapture.Observations() if add_reconstruction else None image_mapping = [] # bundler camera_id -> (name, width, height) for i in range(0, number_of_cameras): start_index = i * number_of_lines_per_camera + offset file_name = image_list[i] # process camera info line_camera = bundler_content[start_index].split() focal_length = float(line_camera[0]) k1 = float(line_camera[1]) k2 = float(line_camera[2]) # lazy open with Image.open(path.join(image_dir_path, file_name)) as im: width, height = im.size image_mapping.append((file_name, width, height)) camera = kapture.Camera( MODEL, [width, height, focal_length, width / 2, height / 2, k1, k2]) camera_id = f'sensor{i}' cameras[camera_id] = camera # process extrinsics rotation_matrix = [[float(v) for v in line.split()] for line in bundler_content[start_index + 1:start_index + 4]] quaternion_wxyz = quaternion.from_rotation_matrix(rotation_matrix) translation = np.array( [float(v) for v in bundler_content[start_index + 4].split()]) pose = kapture.PoseTransform(quaternion_wxyz, translation) # The Bundler model uses a coordinate system that differs from the *computer vision camera # coordinate system*. More specifically, they use the camera coordinate system typically used # in *computer graphics*. In this camera coordinate system, the camera is looking down the # `-z`-axis, with the `x`-axis pointing to the right and the `y`-axis pointing upwards. # rotation Pi around the x axis to get the *computer vision camera # coordinate system* rotation_around_x = quaternion.quaternion(0.0, 1.0, 0.0, 0.0) transformation = kapture.PoseTransform(rotation_around_x, np.array([0, 0, 0])) images[(i, camera_id)] = file_name if trajectories is not None: # transformation.inverse() is equal to transformation (rotation around -Pi or Pi around X is the same) trajectories[(i, camera_id)] = kapture.PoseTransform.compose( [transformation, pose, transformation]) if points3d is not None and number_of_points > 0: assert keypoints is not None assert observations is not None offset += number_of_cameras * number_of_lines_per_camera number_of_lines_per_point = 3 # position color viewlist # (image_name, bundler_keypoint_id ) -> keypoint_id known_keypoints = {} local_keypoints = {} for i in range(0, number_of_points): start_index = i * number_of_lines_per_point + offset position = [float(v) for v in bundler_content[start_index].split()] # apply transformation position = [position[0], -position[1], -position[2]] color = [ float(v) for v in bundler_content[start_index + 1].split() ] # <view list>: length of the list + [<camera> <key> <x> <y>] # x, y origin is the center of the image view_list = bundler_content[start_index + 2].split() number_of_observations = int(view_list[0]) for j in range(number_of_observations): camera_id = int(view_list[1 + 4 * j + 0]) keypoint_id = int(view_list[1 + 4 * j + 1]) x = float(view_list[1 + 4 * j + 2]) y = float(view_list[1 + 4 * j + 3]) file_name, width, height = image_mapping[camera_id] # put (0,0) in upper left corner x += (width / 2) y += (height / 2) # init local_keypoints if needed if file_name not in local_keypoints: local_keypoints[file_name] = [] # do not add the same keypoint twice if (file_name, keypoint_id) not in known_keypoints: # in the kapture format, keypoint id is different. Note that it starts from 0 known_keypoints[(file_name, keypoint_id)] = len( local_keypoints[file_name]) local_keypoints[file_name].append([x, y]) keypoint_idx = known_keypoints[(file_name, keypoint_id)] observations.add(i, file_name, keypoint_idx) points3d.append(position + color) points3d = np.array(points3d) # finally, convert local_keypoints to np.ndarray and add them to the global keypoints variable keypoints = kapture.Keypoints('sift', np.float32, 2) for image_filename, keypoints_array in local_keypoints.items(): keypoints_np_array = np.array(keypoints_array).astype(np.float32) keypoints_out_path = kapture.io.features.get_keypoints_fullpath( kapture_dir_path, image_filename) kapture.io.features.image_keypoints_to_file( keypoints_out_path, keypoints_np_array) keypoints.add(image_filename) if points3d is not None: points3d = kapture.Points3d(points3d) # import (copy) image files. logger.info('import image files ...') filename_list = [f for _, _, f in kapture.flatten(images)] import_record_data_from_dir_auto(image_dir_path, kapture_dir_path, filename_list, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories, points3d=points3d, keypoints=keypoints, observations=observations) logger.info('writing imported data...') kapture_to_dir(kapture_dir_path, imported_kapture)
if args.max_keypoints != float("+inf"): # keep the last (the highest) indexes idx_keep = scores.argsort()[-min(len(keypoints), args.max_keypoints):] keypoints = keypoints[idx_keep] descriptors = descriptors[idx_keep] if keypoints_dtype is None or descriptors_dtype is None: keypoints_dtype = keypoints.dtype descriptors_dtype = descriptors.dtype keypoints_dsize = keypoints.shape[1] descriptors_dsize = descriptors.shape[1] kdata.keypoints = kapture.Keypoints('d2net', keypoints_dtype, keypoints_dsize) kdata.descriptors = kapture.Descriptors('d2net', descriptors_dtype, descriptors_dsize) keypoints_config_absolute_path = get_csv_fullpath(kapture.Keypoints, args.kapture_root) descriptors_config_absolute_path = get_csv_fullpath(kapture.Descriptors, args.kapture_root) keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints) descriptors_to_file(descriptors_config_absolute_path, kdata.descriptors) else: assert kdata.keypoints.type_name == 'd2net' assert kdata.descriptors.type_name == 'd2net' assert kdata.keypoints.dtype == keypoints.dtype assert kdata.descriptors.dtype == descriptors.dtype assert kdata.keypoints.dsize == keypoints.shape[1] assert kdata.descriptors.dsize == descriptors.shape[1]
def extract_kapture_keypoints(args): """ Extract r2d2 keypoints and descritors to the kapture format directly """ print('extract_kapture_keypoints...') kdata = kapture_from_dir(args.kapture_root, matches_pairsfile_path=None, skip_list=[ kapture.GlobalFeatures, kapture.Matches, kapture.Points3d, kapture.Observations ]) assert kdata.records_camera is not None image_list = [ filename for _, _, filename in kapture.flatten(kdata.records_camera) ] if kdata.keypoints is not None and kdata.descriptors is not None: image_list = [ name for name in image_list if name not in kdata.keypoints or name not in kdata.descriptors ] if len(image_list) == 0: print('All features were already extracted') return else: print(f'Extracting r2d2 features for {len(image_list)} images') iscuda = common.torch_set_gpu(args.gpu) # load the network... net = load_network(args.model) if iscuda: net = net.cuda() # create the non-maxima detector detector = NonMaxSuppression(rel_thr=args.reliability_thr, rep_thr=args.repeatability_thr) keypoints_dtype = None if kdata.keypoints is None else kdata.keypoints.dtype descriptors_dtype = None if kdata.descriptors is None else kdata.descriptors.dtype keypoints_dsize = None if kdata.keypoints is None else kdata.keypoints.dsize descriptors_dsize = None if kdata.descriptors is None else kdata.descriptors.dsize for image_name in image_list: img_path = get_image_fullpath(args.kapture_root, image_name) print(f"\nExtracting features for {img_path}") img = Image.open(img_path).convert('RGB') W, H = img.size img = norm_RGB(img)[None] if iscuda: img = img.cuda() # extract keypoints/descriptors for a single image xys, desc, scores = extract_multiscale(net, img, detector, scale_f=args.scale_f, min_scale=args.min_scale, max_scale=args.max_scale, min_size=args.min_size, max_size=args.max_size, verbose=True) xys = xys.cpu().numpy() desc = desc.cpu().numpy() scores = scores.cpu().numpy() idxs = scores.argsort()[-args.top_k or None:] xys = xys[idxs] desc = desc[idxs] if keypoints_dtype is None or descriptors_dtype is None: keypoints_dtype = xys.dtype descriptors_dtype = desc.dtype keypoints_dsize = xys.shape[1] descriptors_dsize = desc.shape[1] kdata.keypoints = kapture.Keypoints('r2d2', keypoints_dtype, keypoints_dsize) kdata.descriptors = kapture.Descriptors('r2d2', descriptors_dtype, descriptors_dsize) keypoints_config_absolute_path = get_csv_fullpath( kapture.Keypoints, args.kapture_root) descriptors_config_absolute_path = get_csv_fullpath( kapture.Descriptors, args.kapture_root) keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints) descriptors_to_file(descriptors_config_absolute_path, kdata.descriptors) else: assert kdata.keypoints.type_name == 'r2d2' assert kdata.descriptors.type_name == 'r2d2' assert kdata.keypoints.dtype == xys.dtype assert kdata.descriptors.dtype == desc.dtype assert kdata.keypoints.dsize == xys.shape[1] assert kdata.descriptors.dsize == desc.shape[1] keypoints_fullpath = get_keypoints_fullpath(args.kapture_root, image_name) print(f"Saving {xys.shape[0]} keypoints to {keypoints_fullpath}") image_keypoints_to_file(keypoints_fullpath, xys) kdata.keypoints.add(image_name) descriptors_fullpath = get_descriptors_fullpath( args.kapture_root, image_name) print(f"Saving {desc.shape[0]} descriptors to {descriptors_fullpath}") image_descriptors_to_file(descriptors_fullpath, desc) kdata.descriptors.add(image_name) if not keypoints_check_dir(kdata.keypoints, args.kapture_root) or \ not descriptors_check_dir(kdata.descriptors, args.kapture_root): print( 'local feature extraction ended successfully but not all files were saved' )
def _import_features_and_matches(opensfm_root_dir, kapture_root_dir, disable_tqdm)\ -> Tuple[kapture.Descriptors, kapture.Keypoints, kapture.Matches]: # import features (keypoints + descriptors) kapture_keypoints = None # kapture.Keypoints(type_name='opensfm', dsize=4, dtype=np.float64) kapture_descriptors = None # kapture.Descriptors(type_name='opensfm', dsize=128, dtype=np.uint8) opensfm_features_dir_path = path.join(opensfm_root_dir, 'features') opensfm_features_suffix = '.features.npz' if path.isdir(opensfm_features_dir_path): logger.info('importing keypoints and descriptors ...') opensfm_features_file_list = (path.join( dp, fn) for dp, _, fs in os.walk(opensfm_features_dir_path) for fn in fs) opensfm_features_file_list = ( filepath for filepath in opensfm_features_file_list if filepath.endswith(opensfm_features_suffix)) for opensfm_feature_filename in tqdm(opensfm_features_file_list, disable=disable_tqdm): image_filename = path.relpath( opensfm_feature_filename, opensfm_features_dir_path)[:-len(opensfm_features_suffix)] opensfm_image_features = np.load(opensfm_feature_filename) opensfm_image_keypoints = opensfm_image_features['points'] opensfm_image_descriptors = opensfm_image_features['descriptors'] logger.debug( f'parsing keypoints and descriptors in {opensfm_feature_filename}' ) if kapture_keypoints is None: # print(type(opensfm_image_keypoints.dtype)) # HAHOG = Hessian Affine feature point detector + HOG descriptor kapture_keypoints = kapture.Keypoints( type_name='HessianAffine', dsize=opensfm_image_keypoints.shape[1], dtype=opensfm_image_keypoints.dtype) if kapture_descriptors is None: kapture_descriptors = kapture.Descriptors( type_name='HOG', dsize=opensfm_image_descriptors.shape[1], dtype=opensfm_image_descriptors.dtype) # convert keypoints file keypoint_file_path = kapture.io.features.get_features_fullpath( data_type=kapture.Keypoints, kapture_dirpath=kapture_root_dir, image_filename=image_filename) kapture.io.features.image_keypoints_to_file( filepath=keypoint_file_path, image_keypoints=opensfm_image_keypoints) # register the file kapture_keypoints.add(image_filename) # convert descriptors file descriptor_file_path = kapture.io.features.get_features_fullpath( data_type=kapture.Descriptors, kapture_dirpath=kapture_root_dir, image_filename=image_filename) kapture.io.features.image_descriptors_to_file( filepath=descriptor_file_path, image_descriptors=opensfm_image_descriptors) # register the file kapture_descriptors.add(image_filename) # import matches kapture_matches = kapture.Matches() opensfm_matches_suffix = '_matches.pkl.gz' opensfm_matches_dir_path = path.join(opensfm_root_dir, 'matches') if path.isdir(opensfm_matches_dir_path): logger.info('importing matches ...') opensfm_matches_file_list = (path.join( dp, fn) for dp, _, fs in os.walk(opensfm_matches_dir_path) for fn in fs) opensfm_matches_file_list = ( filepath for filepath in opensfm_matches_file_list if filepath.endswith(opensfm_matches_suffix)) for opensfm_matches_filename in tqdm(opensfm_matches_file_list, disable=disable_tqdm): image_filename_1 = path.relpath( opensfm_matches_filename, opensfm_matches_dir_path)[:-len(opensfm_matches_suffix)] logger.debug(f'parsing matches in {image_filename_1}') with gzip.open(opensfm_matches_filename, 'rb') as f: opensfm_matches = pickle.load(f) for image_filename_2, opensfm_image_matches in opensfm_matches.items( ): image_pair = (image_filename_1, image_filename_2) # register the pair to kapture kapture_matches.add(*image_pair) # convert the bin file to kapture kapture_matches_filepath = kapture.io.features.get_matches_fullpath( image_filename_pair=image_pair, kapture_dirpath=kapture_root_dir) kapture_image_matches = np.hstack([ opensfm_image_matches.astype(np.float64), # no matches scoring = assume all to one np.ones(shape=(opensfm_image_matches.shape[0], 1), dtype=np.float64) ]) kapture.io.features.image_matches_to_file( kapture_matches_filepath, kapture_image_matches) return kapture_descriptors, kapture_keypoints, kapture_matches
if args.max_keypoints != float("+inf"): # keep the last (the highest) indexes idx_keep = scores.argsort( )[-min(len(keypoints), args.max_keypoints):] keypoints = keypoints[idx_keep] descriptors = descriptors[idx_keep] if keypoints_dtype is None or descriptors_dtype is None: keypoints_dtype = keypoints.dtype descriptors_dtype = descriptors.dtype keypoints_dsize = keypoints.shape[1] descriptors_dsize = descriptors.shape[1] kdata.keypoints[args.keypoints_type] = kapture.Keypoints( 'd2net', keypoints_dtype, keypoints_dsize) kdata.descriptors[args.descriptors_type] = kapture.Descriptors( 'd2net', descriptors_dtype, descriptors_dsize, args.keypoints_type, 'L2') keypoints_config_absolute_path = get_feature_csv_fullpath( kapture.Keypoints, args.keypoints_type, args.kapture_root) descriptors_config_absolute_path = get_feature_csv_fullpath( kapture.Descriptors, args.descriptors_type, args.kapture_root) keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints[args.keypoints_type]) descriptors_to_file(descriptors_config_absolute_path, kdata.descriptors[args.descriptors_type]) else: assert kdata.keypoints[
def import_nvm(nvm_file_path: str, nvm_images_path: str, kapture_path: str, filter_list_path: Optional[str], ignore_trajectories: bool, add_reconstruction: bool, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Imports nvm data to kapture format. :param nvm_file_path: path to nvm file :param nvm_images_path: path to NVM images directory. :param kapture_path: path to kapture root directory. :param filter_list_path: path to the optional file containing a list of images to process :param ignore_trajectories: if True, will not create trajectories :param add_reconstruction: if True, will add observations, keypoints and 3D points. :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ # TODO implement [optional calibration] # doc : http://ccwu.me/vsfm/doc.html#nvm os.makedirs(kapture_path, exist_ok=True) delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing) logger.info('loading all content...') # if there is a filter list, parse it # keep it as Set[str] to easily find images if filter_list_path: with open(filter_list_path) as file: file_content = file.readlines() # remove end line char and empty lines filter_list = {line.rstrip() for line in file_content if line != '\n'} else: filter_list = None # now do the nvm with open(nvm_file_path) as file: nvm_content = file.readlines() # remove end line char and empty lines nvm_content = [line.rstrip() for line in nvm_content if line != '\n'] # only NVM_V3 is supported assert nvm_content[0] == "NVM_V3" # offset represents the line pointer offset = 1 # camera_id_offset keeps tracks of used camera_id in case of multiple reconstructed models camera_id_offset = 0 # point_id_offset keeps tracks of used point_id in case of multiple reconstructed models point_id_offset = 0 cameras = kapture.Sensors() images = kapture.RecordsCamera() trajectories = kapture.Trajectories() if not ignore_trajectories else None observations = kapture.Observations() if add_reconstruction else None if add_reconstruction else None keypoints = kapture.Keypoints('sift', np.float32, 2) if add_reconstruction else None points3d = [] if add_reconstruction else None # break if number of cameras == 0 or reached end of file while True: # <Model1> <Model2> ... # Each reconstructed <model> contains the following # <Number of cameras> <List of cameras> # <Number of 3D points> <List of points> # In practice, # <Number of cameras> # <List of cameras>, one per line # <Number of 3D points> # <List of points>, one per line number_of_cameras = int(nvm_content[offset]) offset += 1 if number_of_cameras == 0: # a line with <0> signify the end of models break logger.debug('importing model cameras...') # parse all cameras for current model image_idx_to_image_name = parse_cameras(number_of_cameras, nvm_content, offset, camera_id_offset, filter_list, nvm_images_path, cameras, images, trajectories) offset += number_of_cameras camera_id_offset += number_of_cameras # parse all points3d number_of_points = int(nvm_content[offset]) offset += 1 if points3d is not None and number_of_points > 0: assert keypoints is not None assert observations is not None logger.debug('importing model points...') parse_points3d(kapture_path, number_of_points, nvm_content, offset, point_id_offset, image_idx_to_image_name, filter_list, points3d, keypoints, observations) point_id_offset += number_of_points offset += number_of_points # reached end of file? if offset >= len(nvm_content): break # do not export values if none were found. if points3d is not None: points3d = kapture.Points3d(points3d) # import (copy) image files. logger.info('import image files ...') images_filenames = [f for _, _, f in kapture.flatten(images)] import_record_data_from_dir_auto(nvm_images_path, kapture_path, images_filenames, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories, points3d=points3d, keypoints=keypoints, observations=observations) logger.info('writing imported data...') kapture_to_dir(kapture_path, imported_kapture)
def import_openmvg_regions(openmvg_regions_directory_path, kapture_data, kapture_path): # look for the "image_describer.json" image_describer_path = path.join(openmvg_regions_directory_path, 'image_describer.json') if not path.isfile(image_describer_path): logger.debug(f'file not found : {image_describer_path}') return with open(image_describer_path) as f: image_describer = json.load(f) # retrieve what type of keypoints it is. keypoints_type = image_describer.get('regions_type', {}).get('polymorphic_name', 'UNDEFINED') keypoints_name = { 'SIFT_Regions': 'SIFT', 'AKAZE_Float_Regions': 'AKAZE' }.get(keypoints_type, keypoints_type) kapture_keypoints = kapture.Keypoints(type_name=keypoints_name, dtype=float, dsize=4) # retrieve what type of descriptors it is. descriptors_type = image_describer.get('image_describer', {}).get('polymorphic_name', 'UNDEFINED') descriptors_props = { 'SIFT_Image_describer': dict(type_name='SIFT', dtype=np.int32, dsize=128), 'AKAZE_Image_describer_SURF': dict(type_name='AKAZE', dtype=np.int32, dsize=128), }.get(descriptors_type) if not descriptors_props: raise ValueError( f'conversion of {descriptors_type} descriptors not implemented.') kapture_descriptors = kapture.Descriptors(**descriptors_props) # populate regions files in openMVG directory # https://github.com/openMVG/openMVG/blob/master/src/openMVG/features/scalar_regions.hpp#L23 for _, _, image_name in kapture.flatten(kapture_data.records_camera): openmvg_image_name = path.splitext(path.basename(image_name))[0] # keypoints openmvg_keypoints_filepath = path.join(openmvg_regions_directory_path, openmvg_image_name + '.feat') if path.isfile(openmvg_keypoints_filepath): # there is a keypoints file in openMVG, lets add it to kapture keypoints_data = np.loadtxt(openmvg_keypoints_filepath) assert keypoints_data.shape[1] == 4 kapture_keypoints.add(image_name) # and convert file kapture_keypoints_filepath = kapture.io.features.get_keypoints_fullpath( kapture_path, image_name) array_to_file(kapture_keypoints_filepath, keypoints_data) # descriptors openmvg_descriptors_filepath = path.join( openmvg_regions_directory_path, openmvg_image_name + '.desc') if path.isfile(openmvg_descriptors_filepath): assert path.isfile(openmvg_keypoints_filepath) # there is a keypoints file in openMVG, lets add it to kapture # assumes descriptors shape from keypoints_data shape descriptors_data_bytes = np.fromfile(openmvg_descriptors_filepath, dtype=np.uint8) nb_features = keypoints_data.shape[0] descriptors_shape = descriptors_data_bytes[0:8].view( descriptors_props['dtype']) assert descriptors_shape[0] == nb_features descriptors_data = descriptors_data_bytes[8:].view( np.uint8).reshape((nb_features, 128)) # descriptors_data.reshape((keypoints_data.shape[0], -1)) kapture_descriptors.add(image_name) # and convert file kapture_descriptors_filepath = kapture.io.features.get_descriptors_fullpath( kapture_path, image_name) array_to_file(kapture_descriptors_filepath, descriptors_data) kapture_data.keypoints = kapture_keypoints kapture_data.descriptors = kapture_descriptors
def add_frames(self, frames: List[Frame], points3d: List[Keypoint]): k = self.kapture if k.records_camera is None: k.records_camera = kt.RecordsCamera() if k.trajectories is None: k.trajectories = kt.Trajectories() if k.keypoints is None: k.keypoints = { self.default_kp_type: kt.Keypoints(self.default_kp_type, np.float32, 2) } if k.points3d is None: k.points3d = kt.Points3d() if k.observations is None: k.observations = kt.Observations() def check_kp(kp): return not kp.bad_qlt and kp.inlier_count > self.min_pt3d_obs and kp.inlier_count / kp.total_count > self.min_pt3d_ratio kp_ids, pts3d = zip(*[(kp.id, kp.pt3d) for kp in points3d if check_kp(kp)]) I = np.argsort(kp_ids) pt3d_ids = dict(zip(np.array(kp_ids)[I], np.arange(len(I)))) pt3d_arr = np.array(pts3d)[I, :] k.points3d = kt.Points3d( np.concatenate((pt3d_arr, np.ones_like(pt3d_arr) * 128), axis=1)) for f in frames: if not f.pose.post: continue id = f.frame_num img = f.orig_image img_file = os.path.join(self.default_cam[1], 'frame%06d.%s' % (id, self.img_format)) img_fullpath = get_record_fullpath(self.path, img_file) os.makedirs(os.path.dirname(img_fullpath), exist_ok=True) if not np.isclose(self.scale, 1.0): img = cv2.resize(img, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_AREA) if self.img_format == self.IMG_FORMAT_PNG: cv2.imwrite(img_fullpath, img, (cv2.IMWRITE_PNG_COMPRESSION, 9)) elif self.img_format == self.IMG_FORMAT_JPG: cv2.imwrite(img_fullpath, img, (cv2.IMWRITE_JPEG_QUALITY, self.jpg_qlt)) else: assert False, 'Invalid image format: %s' % (self.img_format, ) record_id = (id, self.default_cam[0]) k.records_camera[record_id] = img_file pose = f.pose.post if 1 else (-f.pose.post) k.trajectories[record_id] = kt.PoseTransform( r=pose.quat.components, t=pose.loc) k.keypoints[self.default_kp_type].add(img_file) uvs = np.zeros((len(f.kps_uv), 2), np.float32) i = 0 for kp_id, uv in f.kps_uv.items(): if kp_id in pt3d_ids: k.observations.add(int(pt3d_ids[kp_id]), self.default_kp_type, img_file, i) uvs[i, :] = uv / f.img_sc * self.scale i += 1 image_keypoints_to_file( get_keypoints_fullpath(self.default_kp_type, self.path, img_file), uvs[:i, :])
def colmap_localize_from_loaded_data(kapture_data: kapture.Kapture, kapture_path: str, tar_handlers: Optional[TarCollection], colmap_path: str, input_database_path: str, input_reconstruction_path: str, colmap_binary: str, keypoints_type: Optional[str], use_colmap_matches_importer: bool, image_registrator_options: List[str], skip_list: List[str], force: bool) -> None: """ Localize images on a colmap model with the kapture data. :param kapture_data: kapture data to use :param kapture_path: path to the kapture to use :param tar_handler: collection of preloaded tar archives :param colmap_path: path to the colmap build :param input_database_path: path to the map colmap.db :param input_database_path: path to the map colmap.db :param input_reconstruction_path: path to the map reconstruction folder :param colmap_binary: path to the colmap binary executable :param keypoints_type: type of keypoints, name of the keypoints subfolder :param use_colmap_matches_importer: bool, :param image_registrator_options: options for the image registrator :param skip_list: list of steps to skip :param force: Silently overwrite kapture files if already exists. """ os.makedirs(colmap_path, exist_ok=True) if not (kapture_data.records_camera and kapture_data.sensors and kapture_data.keypoints and kapture_data.matches): raise ValueError('records_camera, sensors, keypoints, matches are mandatory') if kapture_data.trajectories: logger.warning("Input data contains trajectories: they will be ignored") kapture_data.trajectories.clear() else: kapture_data.trajectories = kapture.Trajectories() # COLMAP does not fully support rigs. if kapture_data.rigs is not None and kapture_data.trajectories is not None: # make sure, rigs are not used in trajectories. logger.info('remove rigs notation.') rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs) kapture_data.rigs.clear() # Prepare output # Set fixed name for COLMAP database colmap_db_path = path.join(colmap_path, 'colmap.db') image_list_path = path.join(colmap_path, 'images.list') reconstruction_path = path.join(colmap_path, "reconstruction") if 'delete_existing' not in skip_list: safe_remove_file(colmap_db_path, force) safe_remove_file(image_list_path, force) safe_remove_any_path(reconstruction_path, force) os.makedirs(reconstruction_path, exist_ok=True) # Copy colmap db to output if not os.path.exists(colmap_db_path): shutil.copy(input_database_path, colmap_db_path) # find correspondences between the colmap db and the kapture data images_all = {image_path: (ts, cam_id) for ts, shot in kapture_data.records_camera.items() for cam_id, image_path in shot.items()} colmap_db = COLMAPDatabase.connect(colmap_db_path) colmap_image_ids = database_extra.get_colmap_image_ids_from_db(colmap_db) colmap_images = database_extra.get_images_from_database(colmap_db) colmap_db.close() # dict ( kapture_camera -> colmap_camera_id ) colmap_camera_ids = {images_all[image_path][1]: colmap_cam_id for image_path, colmap_cam_id in colmap_images if image_path in images_all} images_to_add = {image_path: value for image_path, value in images_all.items() if image_path not in colmap_image_ids} flatten_images_to_add = [(ts, kapture_cam_id, image_path) for image_path, (ts, kapture_cam_id) in images_to_add.items()] if 'import_to_db' not in skip_list: logger.info("Step 1: Add precomputed keypoints and matches to colmap db") if keypoints_type is None: keypoints_type = try_get_only_key_from_collection(kapture_data.keypoints) assert keypoints_type is not None assert keypoints_type in kapture_data.keypoints assert keypoints_type in kapture_data.matches cameras_to_add = kapture.Sensors() for _, (_, kapture_cam_id) in images_to_add.items(): if kapture_cam_id not in colmap_camera_ids: kapture_cam = kapture_data.sensors[kapture_cam_id] cameras_to_add[kapture_cam_id] = kapture_cam colmap_db = COLMAPDatabase.connect(colmap_db_path) colmap_added_camera_ids = database_extra.add_cameras_to_database(cameras_to_add, colmap_db) colmap_camera_ids.update(colmap_added_camera_ids) colmap_added_image_ids = database_extra.add_images_to_database_from_flatten( colmap_db, flatten_images_to_add, kapture_data.trajectories, colmap_camera_ids) colmap_image_ids.update(colmap_added_image_ids) colmap_image_ids_reversed = {v: k for k, v in colmap_image_ids.items()} # colmap_id : name # add new features colmap_keypoints = database_extra.get_keypoints_set_from_database(colmap_db, colmap_image_ids_reversed) keypoints_all = kapture_data.keypoints[keypoints_type] keypoints_to_add = {name for name in keypoints_all if name not in colmap_keypoints} keypoints_to_add = kapture.Keypoints(keypoints_all.type_name, keypoints_all.dtype, keypoints_all.dsize, keypoints_to_add) database_extra.add_keypoints_to_database(colmap_db, keypoints_to_add, keypoints_type, kapture_path, tar_handlers, colmap_image_ids) # add new matches colmap_matches = kapture.Matches(database_extra.get_matches_set_from_database(colmap_db, colmap_image_ids_reversed)) colmap_matches.normalize() matches_all = kapture_data.matches[keypoints_type] matches_to_add = kapture.Matches({pair for pair in matches_all if pair not in colmap_matches}) # print(list(matches_to_add)) database_extra.add_matches_to_database(colmap_db, matches_to_add, keypoints_type, kapture_path, tar_handlers, colmap_image_ids, export_two_view_geometry=not use_colmap_matches_importer) colmap_db.close() if use_colmap_matches_importer: logger.info('Step 2: Run geometric verification') logger.debug('running colmap matches_importer...') if keypoints_type is None: keypoints_type = try_get_only_key_from_collection(kapture_data.matches) assert keypoints_type is not None assert keypoints_type in kapture_data.matches # compute two view geometry colmap_lib.run_matches_importer_from_kapture_matches( colmap_binary, colmap_use_cpu=True, colmap_gpu_index=None, colmap_db_path=colmap_db_path, kapture_matches=kapture_data.matches[keypoints_type], force=force) else: logger.info('Step 2: Run geometric verification - skipped') if 'image_registrator' not in skip_list: logger.info("Step 3: Run image_registrator") # run image_registrator colmap_lib.run_image_registrator( colmap_binary, colmap_db_path, input_reconstruction_path, reconstruction_path, image_registrator_options ) # run model_converter if 'model_converter' not in skip_list: logger.info("Step 4: Export reconstruction results to txt") colmap_lib.run_model_converter( colmap_binary, reconstruction_path, reconstruction_path )
def import_opensfm( opensfm_rootdir: str, kapture_rootdir: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.copy) -> None: disable_tqdm = logger.getEffectiveLevel() != logging.INFO # load reconstruction opensfm_reconstruction_filepath = path.join(opensfm_rootdir, 'reconstruction.json') with open(opensfm_reconstruction_filepath, 'rt') as f: opensfm_reconstruction = json.load(f) # remove the single list @ root opensfm_reconstruction = opensfm_reconstruction[0] # prepare space for output os.makedirs(kapture_rootdir, exist_ok=True) delete_existing_kapture_files(kapture_rootdir, force_erase=force_overwrite_existing) # import cameras kapture_sensors = kapture.Sensors() assert 'cameras' in opensfm_reconstruction # import cameras for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items( ): camera = import_camera(osfm_camera, name=osfm_camera_id) kapture_sensors[osfm_camera_id] = camera # import shots logger.info('importing images and trajectories ...') kapture_images = kapture.RecordsCamera() kapture_trajectories = kapture.Trajectories() opensfm_image_dirpath = path.join(opensfm_rootdir, 'images') assert 'shots' in opensfm_reconstruction image_timestamps, image_sensors = {}, { } # used later to retrieve the timestamp of an image. for timestamp, (image_filename, shot) in enumerate( opensfm_reconstruction['shots'].items()): sensor_id = shot['camera'] image_timestamps[image_filename] = timestamp image_sensors[image_filename] = sensor_id # in OpenSfm, (sensor, timestamp) is not unique. rotation_vector = shot['rotation'] q = quaternion.from_rotation_vector(rotation_vector) translation = shot['translation'] # capture_time = shot['capture_time'] # may be invalid # gps_position = shot['gps_position'] kapture_images[timestamp, sensor_id] = image_filename kapture_trajectories[timestamp, sensor_id] = kapture.PoseTransform(r=q, t=translation) # copy image files filename_list = [f for _, _, f in kapture.flatten(kapture_images)] import_record_data_from_dir_auto( source_record_dirpath=opensfm_image_dirpath, destination_kapture_dirpath=kapture_rootdir, filename_list=filename_list, copy_strategy=images_import_method) # gps from pre-extracted exif, in exif/image_name.jpg.exif kapture_gnss = None opensfm_exif_dirpath = path.join(opensfm_rootdir, 'exif') opensfm_exif_suffix = '.exif' if path.isdir(opensfm_exif_dirpath): logger.info('importing GNSS from exif ...') camera_ids = set(image_sensors.values()) # add a gps sensor for each camera map_cam_to_gnss_sensor = { cam_id: 'GPS_' + cam_id for cam_id in camera_ids } for gnss_id in map_cam_to_gnss_sensor.values(): kapture_sensors[gnss_id] = kapture.Sensor( sensor_type='gnss', sensor_params=['EPSG:4326']) # build epsg_code for all cameras kapture_gnss = kapture.RecordsGnss() opensfm_exif_filepath_list = ( path.join(dirpath, filename) for dirpath, _, filename_list in os.walk(opensfm_exif_dirpath) for filename in filename_list if filename.endswith(opensfm_exif_suffix)) for opensfm_exif_filepath in tqdm(opensfm_exif_filepath_list, disable=disable_tqdm): image_filename = path.relpath( opensfm_exif_filepath, opensfm_exif_dirpath)[:-len(opensfm_exif_suffix)] image_timestamp = image_timestamps[image_filename] image_sensor_id = image_sensors[image_filename] gnss_timestamp = image_timestamp gnss_sensor_id = map_cam_to_gnss_sensor[image_sensor_id] with open(opensfm_exif_filepath, 'rt') as f: js_root = json.load(f) if 'gps' not in js_root: logger.warning(f'NO GPS data in "{opensfm_exif_filepath}"') continue gps_coords = { 'x': js_root['gps']['longitude'], 'y': js_root['gps']['latitude'], 'z': js_root['gps'].get('altitude', 0.0), 'dop': js_root['gps'].get('dop', 0), 'utc': 0, } logger.debug( f'found GPS data for ({gnss_timestamp}, {gnss_sensor_id}) in "{opensfm_exif_filepath}"' ) kapture_gnss[gnss_timestamp, gnss_sensor_id] = kapture.RecordGnss(**gps_coords) # import features (keypoints + descriptors) kapture_keypoints = None # kapture.Keypoints(type_name='opensfm', dsize=4, dtype=np.float64) kapture_descriptors = None # kapture.Descriptors(type_name='opensfm', dsize=128, dtype=np.uint8) opensfm_features_dirpath = path.join(opensfm_rootdir, 'features') opensfm_features_suffix = '.features.npz' if path.isdir(opensfm_features_dirpath): logger.info('importing keypoints and descriptors ...') opensfm_features_file_list = (path.join( dp, fn) for dp, _, fs in os.walk(opensfm_features_dirpath) for fn in fs) opensfm_features_file_list = ( filepath for filepath in opensfm_features_file_list if filepath.endswith(opensfm_features_suffix)) for opensfm_feature_filename in tqdm(opensfm_features_file_list, disable=disable_tqdm): image_filename = path.relpath( opensfm_feature_filename, opensfm_features_dirpath)[:-len(opensfm_features_suffix)] opensfm_image_features = np.load(opensfm_feature_filename) opensfm_image_keypoints = opensfm_image_features['points'] opensfm_image_descriptors = opensfm_image_features['descriptors'] logger.debug( f'parsing keypoints and descriptors in {opensfm_feature_filename}' ) if kapture_keypoints is None: # print(type(opensfm_image_keypoints.dtype)) # HAHOG = Hessian Affine feature point detector + HOG descriptor kapture_keypoints = kapture.Keypoints( type_name='HessianAffine', dsize=opensfm_image_keypoints.shape[1], dtype=opensfm_image_keypoints.dtype) if kapture_descriptors is None: kapture_descriptors = kapture.Descriptors( type_name='HOG', dsize=opensfm_image_descriptors.shape[1], dtype=opensfm_image_descriptors.dtype) # convert keypoints file keypoint_filpath = kapture.io.features.get_features_fullpath( data_type=kapture.Keypoints, kapture_dirpath=kapture_rootdir, image_filename=image_filename) kapture.io.features.image_keypoints_to_file( filepath=keypoint_filpath, image_keypoints=opensfm_image_keypoints) # register the file kapture_keypoints.add(image_filename) # convert descriptors file descriptor_filpath = kapture.io.features.get_features_fullpath( data_type=kapture.Descriptors, kapture_dirpath=kapture_rootdir, image_filename=image_filename) kapture.io.features.image_descriptors_to_file( filepath=descriptor_filpath, image_descriptors=opensfm_image_descriptors) # register the file kapture_descriptors.add(image_filename) # import matches kapture_matches = kapture.Matches() opensfm_matches_suffix = '_matches.pkl.gz' opensfm_matches_dirpath = path.join(opensfm_rootdir, 'matches') if path.isdir(opensfm_matches_dirpath): logger.info('importing matches ...') opensfm_matches_file_list = (path.join( dp, fn) for dp, _, fs in os.walk(opensfm_matches_dirpath) for fn in fs) opensfm_matches_file_list = ( filepath for filepath in opensfm_matches_file_list if filepath.endswith(opensfm_matches_suffix)) for opensfm_matches_filename in tqdm(opensfm_matches_file_list, disable=disable_tqdm): image_filename_1 = path.relpath( opensfm_matches_filename, opensfm_matches_dirpath)[:-len(opensfm_matches_suffix)] logger.debug(f'parsing mathes in {image_filename_1}') with gzip.open(opensfm_matches_filename, 'rb') as f: opensfm_matches = pickle.load(f) for image_filename_2, opensfm_image_matches in opensfm_matches.items( ): image_pair = (image_filename_1, image_filename_2) # register the pair to kapture kapture_matches.add(*image_pair) # convert the bin file to kapture kapture_matches_filepath = kapture.io.features.get_matches_fullpath( image_filename_pair=image_pair, kapture_dirpath=kapture_rootdir) kapture_image_matches = np.hstack([ opensfm_image_matches.astype(np.float64), # no macthes scoring = assume all to one np.ones(shape=(opensfm_image_matches.shape[0], 1), dtype=np.float64) ]) kapture.io.features.image_matches_to_file( kapture_matches_filepath, kapture_image_matches) # import 3-D points if 'points' in opensfm_reconstruction: logger.info('importing points 3-D') opensfm_points = opensfm_reconstruction['points'] points_data = [] for point_id in sorted(opensfm_points): point_data = opensfm_points[point_id] point_data = point_data['coordinates'] + point_data['color'] points_data.append(point_data) kapture_points = kapture.Points3d(points_data) else: kapture_points = None # saving kapture csv files logger.info('saving kapture files') kapture_data = kapture.Kapture(sensors=kapture_sensors, records_camera=kapture_images, records_gnss=kapture_gnss, trajectories=kapture_trajectories, keypoints=kapture_keypoints, descriptors=kapture_descriptors, matches=kapture_matches, points3d=kapture_points) kapture.io.csv.kapture_to_dir(dirpath=kapture_rootdir, kapture_data=kapture_data)