def add_pose_to_query_kapture(kdata_src, kdata_trg, img_name): timestamp_sensor_id_from_image_name_src = { img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in kapture.flatten( kdata_src.records_camera) } if img_name not in timestamp_sensor_id_from_image_name_src: logger.info( f'{img_name} was not found in localized kapture, that should not be possible, something went wrong' ) return False timestamp_src, sensor_id_src = timestamp_sensor_id_from_image_name_src[ img_name] timestamp_sensor_id_from_image_name_trg = { img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in kapture.flatten( kdata_trg.records_camera) } if img_name not in timestamp_sensor_id_from_image_name_trg: logger.info(f'{img_name} not found in query kapture') return False timestamp_trg, sensor_id_trg = timestamp_sensor_id_from_image_name_trg[ img_name] if not (timestamp_src, sensor_id_src) in kdata_src.trajectories: logger.info(f'{img_name} was not localized') return False kdata_trg.trajectories[ timestamp_trg, sensor_id_trg] = kdata_src.trajectories[timestamp_src, sensor_id_src] return True
def test_t265_db_only(self): kapture_data = import_colmap_database(self._database_filepath, self._kapture_dirpath, no_geometric_filtering=True) # check the numbers self.assertEqual(2, len(kapture_data.sensors)) self.assertEqual(6, len(kapture_data.trajectories)) self.assertEqual(6, len(kapture_data.records_camera)) # check camera ids camera_ids_expected = set(['cam_00001', 'cam_00002' ]) # may evolve in future, not crucial camera_ids_actual = set(kapture_data.sensors.keys()) self.assertEqual(camera_ids_expected, camera_ids_actual) # check camera ids consistent in trajectories camera_ids_trajectories = set( cam_id for _, cam_id, _ in kapture.flatten(kapture_data.trajectories)) self.assertEqual(camera_ids_actual, camera_ids_trajectories) # check camera ids consistent in records_camera camera_ids_records = set( cam_id for _, cam_id, _ in kapture.flatten(kapture_data.records_camera)) self.assertEqual(camera_ids_actual, camera_ids_records) # check camera parameters cam1 = kapture_data.sensors['cam_00001'] self.assertIsInstance(cam1, kapture.Camera) self.assertEqual('camera', cam1.sensor_type) self.assertEqual(kapture.CameraType.OPENCV_FISHEYE, cam1.camera_type) params_expected = [ 848.0, 800.0, 284.468, 285.51, 424.355, 393.742, 0.0008, 0.031, -0.03, 0.005 ] self.assertAlmostEqual(params_expected, cam1.camera_params) # check records timestamp, cam_id, image = next( kapture.flatten(kapture_data.records_camera, is_sorted=True)) self.assertEqual(1, timestamp) self.assertEqual('cam_00002', cam_id) self.assertEqual('rightraw/frame_000000001.jpg', image) # check trajectories timestamp, cam_id, pose = next( kapture.flatten(kapture_data.trajectories, is_sorted=True)) self.assertEqual(1, timestamp) self.assertEqual('cam_00002', cam_id) pose_expected = kapture.PoseTransform( r=[ 0.9540331248716523, -0.03768128483784883, -0.2972570621910482, -0.0062565444214723875 ], t=[2.7109402281860904, 0.13236653865769618, -2.868626176500939]) self.assertTrue(equal_poses(pose_expected, pose)) # this sample has no keypoints, descriptors nor matches self.assertFalse( path.exists(path.join(self._kapture_dirpath, 'reconstruction')))
def test_nested_dict(self): test_values = { 'a': { 'a': 1, 'c': 3, 'b': 4 }, 'c': { 'z': 0, 'y': 9, 'd': 5 }, 'b': { 'u': 7, 'a': 13, 'c': 1 } } expected_list = [('a', 'a', 1), ('a', 'c', 3), ('a', 'b', 4), ('c', 'z', 0), ('c', 'y', 9), ('c', 'd', 5), ('b', 'u', 7), ('b', 'a', 13), ('b', 'c', 1)] expected_list_sorted = [('a', 'a', 1), ('a', 'b', 4), ('a', 'c', 3), ('b', 'a', 13), ('b', 'c', 1), ('b', 'u', 7), ('c', 'd', 5), ('c', 'y', 9), ('c', 'z', 0)] actual_list = list(kapture.flatten(test_values, is_sorted=False)) actual_list_sorted = list(kapture.flatten(test_values, is_sorted=True)) self.assertEqual(actual_list, expected_list) self.assertEqual(actual_list_sorted, expected_list_sorted)
def equal_nested_dict_or_set(data_a, data_b, name_to_log, expected_type=None) -> bool: """ Compare two instances of dictionary or set :return: True if they are identical, False otherwise. """ if expected_type is not None: # do type checking for data_x in [data_a, data_b]: if data_x is not None and not isinstance(data_x, expected_type): raise TypeError( f'expecting type {expected_type} in {name_to_log} (got {type(data_x)})' ) # early check if one (or both) are None if data_a is None and data_b is None: return True elif data_a is None and data_b is not None: return False elif data_a is not None and data_b is None: return False # check values flattened_a = list(flatten(data_a, is_sorted=True)) flattened_b = list(flatten(data_b, is_sorted=True)) are_equal = (flattened_a == flattened_b) if not are_equal: log_difference(flattened_a, flattened_b, name_to_log) return are_equal
def setUp(self): self._rigs = kapture.Rigs() looking_not_straight = quaternion.from_rotation_vector( [0, np.deg2rad(5.), 0]) self._rigs['rig', 'cam1'] = kapture.PoseTransform( t=[-10, 0, 0], r=looking_not_straight).inverse() self._rigs['rig', 'cam2'] = kapture.PoseTransform( t=[+10, 0, 0], r=looking_not_straight.inverse()).inverse() self._trajectories_rigs = kapture.Trajectories() for timestamp, ratio in enumerate(np.linspace(0., 1., num=8)): looking_around = quaternion.from_rotation_vector( [0, np.deg2rad(360. * ratio), 0]) self._trajectories_rigs[timestamp, 'rig'] = kapture.PoseTransform( t=[0, 0, -100.], r=looking_around) self._trajectories_cams = kapture.Trajectories() for timestamp, rig_id, pose_rig_from_world in kapture.flatten( self._trajectories_rigs, is_sorted=True): for rig_id2, cam_id, pose_cam_from_rig in kapture.flatten( self._rigs): assert rig_id == rig_id pose_cam_from_world = kapture.PoseTransform.compose( [pose_cam_from_rig, pose_rig_from_world]) self._trajectories_cams[timestamp, cam_id] = pose_cam_from_world
def equal_image_features( data_a: Optional[Union[kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures]], data_b: Optional[Union[kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures]] ) -> bool: """ Compare two instances of kapture features (keypoints, descriptors or global features). :param data_a: first set of features :param data_b: second set of features :return: True if they are identical, False otherwise. """ # early check if one is None if data_a is None and data_b is None: return True elif data_a is None and data_b is not None: return False elif data_a is not None and data_b is None: return False # should not happen because of previous lines, use assert to help your ide figure out the type of data assert data_a is not None assert data_b is not None if data_a.type_name != data_b.type_name or data_a.dtype != data_b.dtype or data_a.dsize != data_b.dsize: return False flattened_a = list(flatten(data_a, is_sorted=True)) flattened_b = list(flatten(data_b, is_sorted=True)) are_equal = (flattened_a == flattened_b) if not are_equal: log_difference(flattened_a, flattened_b, 'equal_image_features') return are_equal
def get_interpolated_pose(kdata_map: kapture.Kapture, kdata_query: kapture.Kapture, weights: Dict[str, List[Tuple[str, float]]]): """ compute the approximated pose for all query images given the precomputed weights :param kdata_map: map images + their poses as kapture data :type kdata_map: kapture.Kapture :param kdata_query: query images as kapture data :type kdata_query: kapture.Kapture :param weights: weights for the pose interpolation :type weights: Dict[str, List[Tuple[str, float]]] """ output_trajectories = kapture.Trajectories() assert kdata_map.trajectories is not None assert kdata_map.records_camera is not None reverse_map_records_camera = {image_name: (timestamp, sensor_id) for timestamp, sensor_id, image_name in kapture.flatten(kdata_map.records_camera)} if kdata_map.rigs is not None: input_trajectories = kapture.rigs_remove(kdata_map.trajectories, kdata_map.rigs) else: input_trajectories = kdata_map.trajectories assert kdata_query.records_camera is not None reverse_query_records_camera = {image_name: (timestamp, sensor_id) for timestamp, sensor_id, image_name in kapture.flatten(kdata_query.records_camera)} for query_image_name, weighted_map_images in weights.items(): pose_inv_list = [input_trajectories[reverse_map_records_camera[name]].inverse() for name, _ in weighted_map_images] weight_list = [w for _, w in weighted_map_images] output_trajectories[reverse_query_records_camera[query_image_name]] = average_pose_transform_weighted( pose_inv_list, weight_list ).inverse() return output_trajectories
def equal_trajectories(trajectories_a: Optional[kapture.Trajectories], trajectories_b: Optional[kapture.Trajectories]) -> bool: """ Compare two instances of kapture.Trajectories. Poses are compared with is_distance_within_threshold(pose_transform_distance()) :param trajectories_a: first trajectory :param trajectories_b: second trajectory :return: True if they are identical, False otherwise. """ if trajectories_a is None and trajectories_b is None: return True elif trajectories_a is None and trajectories_b is not None: return False elif trajectories_a is not None and trajectories_b is None: return False flattened_a = list(flatten(trajectories_a, is_sorted=True)) flattened_b = list(flatten(trajectories_b, is_sorted=True)) if len(flattened_a) != len(flattened_b): getLogger().debug('equal_trajectories: a and b do not have the same number of elements') return False for (timestamp_a, sensor_id_a, pose_a), (timestamp_b, sensor_id_b, pose_b) in zip(flattened_a, flattened_b): if timestamp_a != timestamp_b or sensor_id_a != sensor_id_b: getLogger().debug( f'equal_trajectories: ({timestamp_a}, {sensor_id_a}, {pose_a.r_raw}, {pose_a.t_raw}) !=' f' ({timestamp_b}, {sensor_id_b}, {pose_b.r_raw}, {pose_b.t_raw})') return False if not equal_poses(pose_a, pose_b): getLogger().debug( f'equal_trajectories: ({timestamp_a}, {sensor_id_a}, {pose_a.r_raw}, {pose_a.t_raw}) ' f'is not close to ' f'({timestamp_b}, {sensor_id_b}, {pose_b.r_raw}, {pose_b.t_raw})') return False return True
def test_dict_of_list(self): test_values = {'a': [1, 4, 3], 'z': [1, 0], 'y': [9, 5]} expected_list = [('a', 1), ('a', 4), ('a', 3), ('z', 1), ('z', 0), ('y', 9), ('y', 5)] expected_list_sorted = [('a', 1), ('a', 3), ('a', 4), ('y', 5), ('y', 9), ('z', 0), ('z', 1)] actual_list = list(kapture.flatten(test_values, is_sorted=False)) actual_list_sorted = list(kapture.flatten(test_values, is_sorted=True)) self.assertEqual(actual_list, expected_list) self.assertEqual(actual_list_sorted, expected_list_sorted)
def equal_sensors(sensors_a: Optional[kapture.Sensors], sensors_b: Optional[kapture.Sensors]) -> bool: """ Compare two instances of kapture.Sensors. model_params for cameras are considered equal if np.isclose says so. :param sensors_a: first sensor definition :param sensors_b: second sensor definition :return: True if they are identical, False otherwise. """ if sensors_a is None and sensors_b is None: return True elif sensors_a is None and sensors_b is not None: return False elif sensors_a is not None and sensors_b is None: return False flattened_a = list(flatten(sensors_a, is_sorted=True)) flattened_b = list(flatten(sensors_b, is_sorted=True)) if len(flattened_a) != len(flattened_b): getLogger().debug( 'equal_sensors: a and b do not have the same number of elements') return False for (sensor_id_a, sensor_a), (sensor_id_b, sensor_b) in zip(flattened_a, flattened_b): # handling special case: name_a='' and name_b=None equal_id = sensor_id_a == sensor_id_b equal_name = (not sensor_a.name and not sensor_b.name) or (sensor_a.name == sensor_b.name) equal_type = sensor_a.sensor_type == sensor_b.sensor_type if not equal_id or not equal_name or not equal_type: getLogger().debug( f'equal_sensors: ({sensor_id_a}, {sensor_a}) != ({sensor_id_b}, {sensor_b})' ) return False equal_params = False if sensor_a.sensor_type in kapture.ALL_CAMERA_SENSOR_TYPES: assert isinstance(sensor_a, Camera) assert isinstance(sensor_b, Camera) if sensor_a.sensor_type != sensor_b.sensor_type: return False if sensor_a.camera_type == sensor_b.camera_type: equal_params = equal_camera_params(sensor_a.camera_params, sensor_b.camera_params) else: equal_params = sensor_a.sensor_params == sensor_b.sensor_params if not equal_params: getLogger().debug( f'equal_sensors: ({sensor_id_a}, {sensor_a}) != ({sensor_id_b}, {sensor_b})' ) return False return True
def _import_cameras(silda_dir_path, snapshots, fallback_cam_model) -> kapture.Sensors: logger.info('Processing sensors ...') cameras = kapture.Sensors() # use hard coded intrinsics # evaluated using colmap # 1 OPENCV_FISHEYE 1024 1024 393.299 394.815 512 512 -0.223483 0.117325 -0.0326138 0.00361082 # fx, fy, cx, cy, omega # 1 FOV 1024 1024 300 300 512 512 0.899632 cam_id_list = sorted( set(cam_id for _, cam_id, _ in kapture.flatten(snapshots))) for cam_id in cam_id_list: # pick a image for that cam id random_image_intrinsic = next( f'{timestamp}_{cam_id}.intrinsics' # keep only filename (thats what silda expect) for timestamp, cid, filename in kapture.flatten(snapshots) if cid == cam_id) logger.debug( f'camera {cam_id} intrinsics : picking at random: ("{random_image_intrinsic}")' ) intrinsic_filepath = path.join(silda_dir_path, 'camera-intrinsics', random_image_intrinsic) logger.debug(f'loading file: "{intrinsic_filepath}"') silda_proj_params = np.loadtxt(intrinsic_filepath) # only retrieve principal point from intrinsics, # because the rest correspond to a fisheye model not available in colmap. principal_point = (silda_proj_params[0:2] * SILDA_IMAGE_SIZE).flatten().tolist() projection = fallback_cam_model if 'OPENCV_FISHEYE' == projection: focal_length = [393.299, 394.815] fisheye_coefficients = [ -0.223483, 0.117325, -0.0326138, 0.00361082 ] # // fx, fy, cx, cy, k1, k2, k3, k4 proj_params = focal_length + principal_point + fisheye_coefficients elif 'FOV' == projection: # use hard coded intrinsics from Torsten reconstruction, ie. : # 217.294036, 217.214703, 512.000000, 507.897400, -0.769113 focal_length = [217.294036, 217.214703] # principal_point = [512.000000, 507.897400] omega = [-0.769113] # fx, fy, cx, cy, omega proj_params = focal_length + principal_point + omega else: raise ValueError( 'Only accepts OPENCV_FISHEYE, or FOV as projection model.') camera = kapture.Camera(projection, SILDA_IMAGE_SIZE.tolist() + proj_params) cameras[cam_id] = camera return cameras
def trajectories_to_ply_stream(stream, trajectories: kapture.Trajectories, axis_length: float = 1.) -> None: """ Writes the trajectories to a stream. trajectories[ts][device_id] = [pose] :param stream: an open stream to write to :param trajectories: trajectories to write :param axis_length: length of the axis """ pose_list = (pose_tr for _, _, pose_tr in kapture.flatten(trajectories, is_sorted=True) if not np.any(np.isnan(pose_tr.t))) # filter out if no position # create 4 points per pose: 1 for center, 3 for axis points_colored_list = [] for pose_tr in pose_list: axis = get_axis_in_world(pose_tr, axis_length) points_colored_list += [p + AXIS_COLORS[i] for i, p in enumerate(axis.tolist())] # write points into ply header_to_ply_stream(stream, nb_vertex=len(points_colored_list)) for p3d in points_colored_list: line = ['{:<25}'.format(i) for i in p3d[0:3]] line += ['{:<4}'.format(i) for i in p3d[3:6]] stream.write(' '.join(line) + '\n')
def merge_trajectories( trajectories_list: List[Optional[kapture.Trajectories]], rig_mappings: List[Dict[str, str]], sensor_mappings: List[Dict[str, str]]) -> kapture.Trajectories: """ Merge several trajectories list into one list with new identifiers for the rigs and the sensors. :param trajectories_list: list of trajectories to merge :param rig_mappings: mapping of the rigs identifiers to their new identifiers :param sensor_mappings: mapping of the sensor identifiers to their new identifiers :return: merged trajectories """ assert len(trajectories_list) > 0 assert len(trajectories_list) == len(rig_mappings) assert len(trajectories_list) == len(sensor_mappings) merged_trajectories = kapture.Trajectories() for trajectories, rig_mapping, sensor_mapping in zip( trajectories_list, rig_mappings, sensor_mappings): if trajectories is None: continue for timestamp, sensor_id, pose in kapture.flatten(trajectories): if sensor_id in rig_mapping: new_sensor_id = rig_mapping[sensor_id] else: new_sensor_id = sensor_mapping[sensor_id] merged_trajectories[(timestamp, new_sensor_id)] = pose return merged_trajectories
def _import_robotcar_v2_train(robotcar_path, kapture_imported_query, kapture_imported_training, image_pattern): queries_per_location = { image_name: (ts, cam_id, loc_id) for loc_id, kapture_test in kapture_imported_query.items() for ts, cam_id, image_name in kapture.flatten(kapture_test.records_camera) } # read robotcar_v2_train.txt v2_train_data = read_robotcar_v2_train(robotcar_path) for image_name, pose in v2_train_data.items(): ts, cam_id, loc_id = queries_per_location[image_name] assert cam_id == 'rear' kapture_imported_training[loc_id].records_camera[ts, cam_id] = image_name kapture_imported_training[loc_id].trajectories[ts, cam_id] = pose matches = image_pattern.match(image_name) if not matches: logger.warning(f"Error matching line in {image_name}") continue matches = matches.groupdict() condition = str(matches['condition']) timestamp = str(matches['timestamp']) # added left and right images in records_camera left_image_name = condition + '/' + 'left' + '/' + timestamp + '.jpg' right_image_name = condition + '/' + 'right' + '/' + timestamp + '.jpg' kapture_imported_training[loc_id].records_camera[ ts, 'left'] = left_image_name kapture_imported_training[loc_id].records_camera[ ts, 'right'] = right_image_name # remove entries from query del kapture_imported_query[loc_id].records_camera[ts][cam_id] del kapture_imported_query[loc_id].records_camera[ts]['left'] del kapture_imported_query[loc_id].records_camera[ts]['right'] del kapture_imported_query[loc_id].records_camera[ts]
def print_records( kapture_data, output_stream, show_detail, show_all, timestamp_unit=None, timestamp_formatting=None ) -> None: """ Prints the records and trajectories to the output stream """ # records (+trajectories) for record_name in ['trajectories', 'records_camera', 'records_lidar', 'records_wifi', 'records_bluetooth', 'records_gnss', 'records_accelerometer', 'records_gyroscope', 'records_magnetic']: records_field = getattr(kapture_data, record_name) records = list(kapture.flatten(records_field)) nb_record = None if records_field is None else len(records) if not show_detail: print_key_value(f'nb {record_name}', nb_record, output_stream, show_none=show_all) elif records_field is not None or show_all: print_title(f'{record_name}', output_stream) if records_field is not None and nb_record > 0: timestamp_range = (min(records_field), max(records_field)) timestamp_range_str = format_timestamp_range(timestamp_range, timestamp_unit, timestamp_formatting) sensors_ids = records_field.sensors_ids print_key_value(' ├─ timestamp range', timestamp_range_str, output_stream, show_none=show_all) print_key_value(' ├─ sensors', f'{len(sensors_ids)}: {sensors_ids}', output_stream, show_none=show_all) print_key_value(' └─ nb total', nb_record, output_stream, show_none=show_all)
def test_export(self): temp_kapture_dirpath = path.join(self._tempdir.name, 'kapture') shutil.copytree(self._kapture_dirpath, temp_kapture_dirpath) kapture_data = kapture.io.csv.kapture_from_dir(temp_kapture_dirpath) images_filepaths = images_to_filepaths(kapture_data.records_camera, temp_kapture_dirpath) # make sure there is no EXIF in images for image_filepath in images_filepaths.values(): clear_exif(image_filepath) # insert gps to exif export_gps_to_exif(kapture_data=kapture_data, kapture_dirpath=temp_kapture_dirpath) rebuilt_records = kapture.RecordsGnss() for timestamp, cam_id, image_name in kapture.flatten( kapture_data.records_camera): image_filepath = get_image_fullpath(temp_kapture_dirpath, image_name) exif_data = read_exif(image_filepath) rebuilt_records[timestamp, 'GPS_' + cam_id] = convert_gps_to_kapture_record(exif_data) self.assertTrue( equal_records_gnss(kapture_data.records_gnss, rebuilt_records))
def export_openmvg_poses( kapture_images: kapture.RecordsCamera, kapture_trajectories: kapture.Trajectories, kapture_to_openmvg_view_ids: Dict[str, int], ) -> List: """ :param kapture_images: :param kapture_trajectories: :param kapture_to_openmvg_view_ids: input dict that maps kapture image ids to openMVG view ids. :return: extrinsics to be serialized """ extrinsics = [] # process all images for timestamp, kapture_cam_id, kapture_image_name in kapture.flatten(kapture_images): assert kapture_image_name in kapture_to_openmvg_view_ids openmvg_view_id = kapture_to_openmvg_view_ids[kapture_image_name] # retrieve image pose from trajectories if timestamp in kapture_trajectories: # there is a pose for that timestamp # The poses are stored both as priors (in the 'views' table) and as known poses (in the 'extrinsics' table) assert kapture_cam_id in kapture_trajectories[timestamp] pose_tr = kapture_trajectories[timestamp].get(kapture_cam_id) prior_q = pose_tr.r prior_t = pose_tr.inverse().t_raw pose_data = {JSON_KEY.CENTER: prior_t, JSON_KEY.ROTATION: quaternion.as_rotation_matrix(prior_q).tolist()} extrinsics.append({JSON_KEY.KEY: openmvg_view_id, JSON_KEY.VALUE: pose_data}) return extrinsics
def merge_table_key3( table_list, sensor_mappings: List[Dict[str, str]], table_constructor, subdict_constructor=dict, ): """ Merge several table with 2 keys (eg. timestamps, device_id) into one. If multiple entry for a key keep only the first one. :param sensor_mappings: mapping of the sensor identifiers to their new identifiers :param table_list: list of table to merge. :param table_constructor: the class type of table. :param subdict_constructor: used to create a new Dict type :return table_merged """ assert len(table_list) > 0 assert len(table_list) == len(sensor_mappings) table_list = [table for table in table_list if table is not None] if not all(isinstance(table, table_constructor) for table in table_list): raise TypeError('unexpected type.') table_merged = table_constructor() for table, sensor_mapping in zip(table_list, sensor_mappings): for key1, sensor_id, key3, entry in kapture.flatten(table): new_sensor_id = sensor_mapping[sensor_id] if (key1, new_sensor_id) not in table_merged: # if timestamp, sensor_id not there yet, create an instance of dict record table_merged[key1, new_sensor_id] = subdict_constructor() table_merged[key1, new_sensor_id].setdefault(key3, entry) return table_merged
def get_poses( k_data: kapture.Kapture, image_set: Union[Set[str], List[str]] ) -> List[Tuple[str, kapture.PoseTransform]]: """ Computes the poses for a set of images within a kapture. :param k_data: the kapture :param image_set: set of image names :return: list of (image name,pose) """ assert k_data.trajectories is not None if isinstance(image_set, list): image_set = set(image_set) assert isinstance(image_set, set) assert isinstance(k_data, kapture.Kapture) # apply rigs to trajectories if k_data.rigs is not None: trajectories = kapture.rigs_remove(k_data.trajectories, k_data.rigs) else: trajectories = k_data.trajectories poses = [] for timestamp, device_id, filename in kapture.flatten( k_data.records_camera, is_sorted=True): if filename in image_set and (timestamp, device_id) in trajectories: pose = trajectories[(timestamp, device_id)] poses.append((filename, pose)) return poses
def export_to_colmap_images_txt( colmap_images_filepath: str, images: kapture.RecordsCamera, trajectories: kapture.Trajectories, colmap_camera_ids: Dict[str, int], colmap_image_ids: Dict[str, int], image_to_keypoints: Dict[str, List[Tuple[float, float, int]]]) -> None: """ Exports kapture to colmap reconstruction file "images.txt". :param colmap_images_filepath: path to colmap file "images.txt" to be writen. :param images: images list to export :param trajectories: poses to export :param colmap_camera_ids: correspondences between kapture camera id and colmap camera id :param colmap_image_ids: correspondences between kapture image id (image path) and colmap image id :param image_to_keypoints: input image_filename -> [(x, y, point_id), (x, y, point_id), ...] """ assert path.basename(colmap_images_filepath) == 'images.txt' assert isinstance(images, kapture.RecordsCamera) assert isinstance(trajectories, kapture.Trajectories) assert isinstance(colmap_camera_ids, dict) assert isinstance(colmap_image_ids, dict) assert isinstance(image_to_keypoints, dict) images_flattened = list(kapture.flatten(images)) images_colmap_header = '# Image list with two lines of data per image:\n' \ '# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n' \ '# POINTS2D[] as (X, Y, POINT3D_ID)\n' \ '# NB IMAGES : {}\n'.format(len(images_flattened)) with open(colmap_images_filepath, 'w') as fid: fid.write(images_colmap_header) for timestamp, sensor_id, sensing_filepath in images_flattened: colmap_cam_id = colmap_camera_ids[sensor_id] colmap_image_id = colmap_image_ids[sensing_filepath] # retrieve image pose from trajectories if timestamp not in trajectories: logging.debug( 'timestamp:{} not in trajectories'.format(timestamp)) continue if sensor_id not in trajectories[timestamp]: logger.debug( 'camera {} not found in trajectories for timestamp {}.'. format(sensor_id, timestamp)) continue pose_tr = trajectories[timestamp].get(sensor_id) line = [colmap_image_id] + pose_tr.r_raw + pose_tr.t_raw + [ colmap_cam_id, sensing_filepath ] fid.write(' '.join('{}'.format(i) for i in line) + '\n') if sensing_filepath in image_to_keypoints: # POINTS2D[] as (X, Y, POINT3D_ID) p2d = [ (str(x), str(y), str(point_id)) for x, y, point_id in image_to_keypoints[sensing_filepath] ] fid.write(' '.join('{} {} {}'.format(i, j, k) for i, j, k in p2d)) fid.write('\n')
def merge_points3d_and_observations( pts3d_obs: List[Tuple[Optional[kapture.Points3d], Optional[kapture.Observations]]] ) -> Tuple[kapture.Points3d, kapture.Observations]: """ Merge a list of points3d with their observations. :param pts3d_obs: list of points3d with observations to merge :return: merged points3d associated to observations """ assert len(pts3d_obs) > 0 merged_points3d = kapture.Points3d() merged_observations = kapture.Observations() point3d_offset = 0 for points3d, observations in pts3d_obs: if points3d is None: continue merged_points3d = kapture.Points3d( np.vstack([merged_points3d, points3d])) if observations is not None: for point3d_idx, (image_path, keypoint_idx) in kapture.flatten(observations): merged_observations.add(point3d_idx + point3d_offset, image_path, keypoint_idx) point3d_offset += merged_points3d.shape[0] return merged_points3d, merged_observations
def merge_table_key2( table_list, sensor_mappings: List[Dict[str, str]], table_constructor, ): """ Merge several table with 2 keys (eg. timestamps, device_id) into one. If multiple entry for a key keep only the first one. :param sensor_mappings: mapping of the sensor identifiers to their new identifiers :param table_list: list of table to merge. :param table_constructor: the class type of table. :return table_merged """ assert len(table_list) > 0 assert len(table_list) == len(sensor_mappings) table_list = [table for table in table_list if table is not None] if not all(isinstance(table, table_constructor) for table in table_list): raise TypeError('unexpected type.') table_merged = table_constructor() for table, sensor_mapping in zip(table_list, sensor_mappings): for key1, sensor_id, entry in kapture.flatten(table): new_sensor_id = sensor_mapping[sensor_id] table_merged[key1, new_sensor_id] = entry return table_merged
def test_import_openmvg(self) -> None: """ Test the import_openmvg function on a small JSON file while linking the images """ self.assertTrue(path.isdir(self._openmvg_sample_path)) self.assertTrue(path.exists(self._kapture_path), "Kapture directory exists") sfm_file = path.join(self._openmvg_sample_path, 'sfm_data_small.json') # on windows, without admin rights, fails with OSError: symbolic link privilege not held # see https://docs.python.org/3.6/library/os.html#os.symlink logger.info(f'Running on "{sys.platform}" which is {"" if self.isWindows else "not"} a Windows platform') file_operation = TransferAction.skip if self.isWindows else TransferAction.link_relative import_openmvg(sfm_file, self._kapture_path, file_operation, True) # test presence or absence of kapture files cameras_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.Sensors]) self.assertTrue(path.isfile(cameras_file_path), "Camera file written") rigs_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.Rigs]) self.assertFalse(path.isfile(rigs_file_path), "Rigs file should be missing") records_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.RecordsCamera]) self.assertTrue(path.isfile(records_file_path), "Camera Records file written") lidars_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.RecordsLidar]) self.assertFalse(path.isfile(lidars_file_path), "Lidar Records file should be missing") trajectories_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.Trajectories]) self.assertTrue(path.isfile(trajectories_file_path), "Trajectories file written") # Reload data and verify kapture_data = kcsv.kapture_from_dir(self._kapture_path) self._verify_data(kapture_data) if not self.isWindows: # Test images path all_records_camera = list(kapture.flatten(kapture_data.records_camera)) for _, _, name in all_records_camera: img_path = get_image_fullpath(self._kapture_path, name) self.assertTrue(path.islink(img_path), f"image link {img_path}")
def match_features(self, kapture_data): image_list = [ filename for _, _, filename in kapture.flatten(kapture_data.records_camera) ] descriptors = [] descriptor_type = kapture_data.descriptors.dtype descriptor_size = kapture_data.descriptors.dsize for image_path in image_list: descriptors_full_path = get_descriptors_fullpath( kapture_data.kapture_path, image_path) descriptors.append( image_descriptors_from_file(descriptors_full_path, descriptor_type, descriptor_size)) kapture_data.matches = kapture.Matches() if self._sequential_length is None: self._sequential_length = len(image_list) for i in tqdm(range(len(image_list))): for j in range(i + 1, min(len(image_list), i + self._sequential_length)): matches = self._matcher.match_descriptors( descriptors[i], descriptors[j]) if self._minimal_score is not None: mask = matches[:, 2] > self._minimal_score matches = matches[mask] kapture_data.matches.add(image_list[i], image_list[j]) matches_full_path = get_matches_fullpath( (image_list[i], image_list[j]), kapture_data.kapture_path) image_matches_to_file(matches_full_path, matches)
def export_image_list(kapture_path: str, output_path: str, export_camera_params: bool, force: bool) -> None: """ Export image list in a text file. :param kapture_path: top directory of the kapture :param output_path: path of the image list file :param export_camera_params: if True, add camera parameters after every file name :param force: Silently overwrite image list file if already exists. """ os.makedirs(os.path.dirname(output_path), exist_ok=True) safe_remove_file(output_path, force) kapture_to_export = kapture_from_dir(kapture_path) output_content = [] logger.info('starting conversion...') for _, sensor_id, filename in kapture.flatten( kapture_to_export.records_camera, is_sorted=True): line = filename if export_camera_params: camera = kapture_to_export.sensors[sensor_id] assert isinstance(camera, kapture.Camera) line += ' ' + ' '.join(camera.sensor_params) output_content.append(line) logger.info('writing exported data...') with open(output_path, 'w') as fid: fid.write('\n'.join(output_content))
def add_image_to_kapture(kdata_src, kdata_src_path, kdata_trg, img_name, pairs, add_pose=False): timestamp_sensor_id_from_image_name = { img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in kapture.flatten( kdata_src.records_camera) } timestamp, sensor_id = timestamp_sensor_id_from_image_name[img_name] kdata_trg.sensors[sensor_id] = kdata_src.sensors[sensor_id] kdata_trg.records_camera[timestamp, sensor_id] = img_name kdata_trg.keypoints.add(img_name) if kdata_trg.descriptors != None: kdata_trg.descriptors.add(img_name) if add_pose: kdata_trg.trajectories[timestamp, sensor_id] = kdata_src.trajectories[timestamp, sensor_id] if os.path.exists(kdata_src_path) and len(pairs) != 0: kdata_trg.matches = kapture.Matches() for i in pairs: image_matches_filepath = get_matches_fullpath((i[0], i[1]), kdata_src_path) if os.path.exists(image_matches_filepath): kdata_trg.matches.add(i[0], i[1]) kdata_trg.matches.normalize() return kdata_trg
def merge_table_key3( table_list, table_constructor, subdict_constructor=dict, ): """ Merge several records lists. Records is a dict (eg. wifi)). For record with the same timestamp and sensor identifier, keep only the first one. :param table_list: list of table to merge. :param table_constructor: the class type of table. :param subdict_constructor: used to create a new Dict type :return table_merged """ assert len(table_list) > 0 table_list = [table for table in table_list if table is not None] if not all(isinstance(table, table_constructor) for table in table_list): raise TypeError('unexpected type.') table_merged = table_constructor() for table in table_list: for key1, key2, key3, entry in kapture.flatten(table): if (key1, key2) not in table_merged: # if timestamp, sensor_id not there yet, create an instance of dict record table_merged[key1, key2] = subdict_constructor() table_merged[key1, key2].setdefault(key3, entry) return table_merged
def convert_results_to_kapture(query_path: str, results: str, outpath: str): """ convert file with name qw qx qy qz tx ty tz to kapture """ skip_heavy_useless = [ kapture.Trajectories, kapture.RecordsLidar, kapture.RecordsWifi, kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures, kapture.Matches, kapture.Points3d, kapture.Observations ] kapture_query = kapture_from_dir(query_path, skip_list=skip_heavy_useless) inverse_records_camera = { image_name: (timestamp, sensor_id) for timestamp, sensor_id, image_name in kapture.flatten( kapture_query.records_camera) } trajectories = kapture.Trajectories() with open(results) as fid: lines = fid.readlines() lines = [line.rstrip().split() for line in lines if line != '\n'] for line in lines: image_name = line[0] rotation = quaternion.quaternion(float(line[1]), float(line[2]), float(line[3]), float(line[4])) translation = [float(line[5]), float(line[6]), float(line[7])] timestamp, sensor_id = inverse_records_camera[image_name] trajectories[timestamp, sensor_id] = kapture.PoseTransform(rotation, translation) kapture_query.trajectories = trajectories kapture_to_dir(outpath, kapture_query)
def add_image_to_kapture(kdata_src, kdata_trg, img_name, pairs, keypoints_type, descriptors_type, add_pose=False): timestamp_sensor_id_from_image_name = {img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in kapture.flatten(kdata_src.records_camera)} timestamp, sensor_id = timestamp_sensor_id_from_image_name[img_name] kdata_trg.sensors[sensor_id] = kdata_src.sensors[sensor_id] kdata_trg.records_camera[timestamp, sensor_id] = img_name kdata_trg.keypoints[keypoints_type].add(img_name) if kdata_trg.descriptors is not None and descriptors_type in kdata_trg.descriptors: kdata_trg.descriptors[descriptors_type].add(img_name) if add_pose: kdata_trg.trajectories[timestamp, sensor_id] = kdata_src.trajectories[timestamp, sensor_id] if len(pairs) != 0: if kdata_trg.matches is None: kdata_trg.matches = {} kdata_trg.matches[keypoints_type] = kapture.Matches() for i in pairs: if i in kdata_src.matches[keypoints_type]: kdata_trg.matches[keypoints_type].add(i[0], i[1]) kdata_trg.matches[keypoints_type].normalize() return kdata_trg
def test_non_dict(self): test_values = 5 expected_list = [(test_values, )] actual_list = list(kapture.flatten(test_values, is_sorted=False)) actual_list_sorted = list(kapture.flatten(test_values, is_sorted=True)) self.assertEqual(actual_list, expected_list) self.assertEqual(actual_list_sorted, expected_list) test_values = [5.0, 7.3, 6.02] expected_list = [(5.0, ), (7.3, ), (6.02, )] expected_list_sorted = [(5.0, ), (6.02, ), (7.3, )] actual_list = list(kapture.flatten(test_values, is_sorted=False)) actual_list_sorted = list(kapture.flatten(test_values, is_sorted=True)) self.assertEqual(actual_list, expected_list) self.assertEqual(actual_list_sorted, expected_list_sorted) test_values = set([5.0, 7.3, 6.02]) expected_list_sorted = [(5.0, ), (6.02, ), (7.3, )] actual_list_sorted = list(kapture.flatten(test_values, is_sorted=True)) self.assertEqual(actual_list_sorted, expected_list_sorted) test_values = kapture.RecordWifi(3, 4, 7, 'c') expected_list = [(test_values, )] actual_list = list(kapture.flatten(test_values, is_sorted=False)) actual_list_sorted = list(kapture.flatten(test_values, is_sorted=True)) self.assertEqual(actual_list, expected_list) self.assertEqual(actual_list_sorted, expected_list)