def test_init(self): sensors = kapture.Sensors() self.assertEqual(0, len(sensors)) sensors['cam0'] = kapture.Sensor('unknown', []) self.assertEqual(1, len(sensors)) self.assertIn('cam0', sensors) self.assertIn('unknown', sensors['cam0'].sensor_type)
def test_kapture_format_version_from_disk(self): kapture_data = kapture.Kapture() kapture_data.sensors = kapture.Sensors() csv.kapture_to_dir(self._tempdir.name, kapture_data) version = csv.kapture_format_version(self._tempdir.name) self.assertEqual(csv.current_format_version(), version, "We have the current version")
def test_kapture_write(self): kdata = kapture.Kapture() # test it is not writing files for undefined parts csv.kapture_to_dir(self._tempdir.name, kdata) self.assertFalse( path.exists(path.join(self._tempdir.name, 'sensors', 'sensors.txt'))) self.assertFalse( path.exists( path.join(self._tempdir.name, 'sensors', 'trajectories.txt'))) self.assertFalse( path.exists(path.join(self._tempdir.name, 'sensors', 'rigs.txt'))) # test it is actually writing files for parts kdata.sensors = kapture.Sensors() kdata.trajectories = kapture.Trajectories() kdata.rigs = kapture.Rigs() csv.kapture_to_dir(self._tempdir.name, kdata) self.assertTrue( path.exists(path.join(self._tempdir.name, 'sensors', 'sensors.txt'))) self.assertTrue( path.exists( path.join(self._tempdir.name, 'sensors', 'trajectories.txt'))) self.assertTrue( path.exists(path.join(self._tempdir.name, 'sensors', 'rigs.txt')))
def test_kapture_write_read(self): kdata_expected = kapture.Kapture() kdata_expected.sensors = kapture.Sensors() kdata_expected.trajectories = kapture.Trajectories() kdata_expected.rigs = kapture.Rigs() csv.kapture_to_dir(self._tempdir.name, kdata_expected) kdata_actual = csv.kapture_from_dir(self._tempdir.name)
def get_cameras_from_database(database: COLMAPDatabase) -> kapture.Sensors: """ Creates kapture sensors from the colmap database. :param database: colmap database :return: kapture sensors """ logger.info('parsing cameras ...') kapture_cameras = kapture.Sensors() for camera_id, model_id, width, height, params, prior_focal_length in database.execute( 'SELECT camera_id, model, width, height, params, prior_focal_length FROM cameras;' ): if model_id not in CAMERA_MODEL_NAMES: logger.warning( f'unable to convert colmap camera model ({model_id}) for camera {camera_id}.' ) # use 0 as default model_id = 0 camera_id = get_camera_kapture_id_from_colmap_id(camera_id) model_name = CAMERA_MODEL_NAMES[model_id] # By setting the prior_focal_length flag to 0 or 1, # you can give a hint whether the reconstruction algorithm should trust the focal length value. params = blob_to_array(params, np.float64) params = [width, height] + params.tolist() kapture_camera = kapture.Camera(model_name, params) kapture_cameras[camera_id] = kapture_camera return kapture_cameras
def import_extended_cmu_seasons_intrinsics( intrinsics_file_path: str) -> kapture.Sensors: """ Read and convert intrinsics file Format: [Camera ID] [Distortion model] [image width] [image height] [fx] [fy] [cx] [cy] [k1] [k2] [p1] [p2] :param intrinsics_file_path: path to the CMU intrinsics file :return: kapture cameras """ cameras = kapture.Sensors() with open(intrinsics_file_path) as fin: table = fin.readlines() # remove comment lines table = (l1 for l1 in table if not l1.startswith('#')) # remove empty lines table = (l2 for l2 in table if l2.strip()) # trim trailing EOL table = (l3.rstrip("\n\r") for l3 in table) # split space table = (re.split(r'\s+', l4) for l4 in table) # remove empty split table = ([s for s in l5 if s] for l5 in table) for camera_id, distortion_model, *camera_params in table: cameras[camera_id] = kapture.Camera(distortion_model, list(camera_params)) return cameras
def test_sensor_file_version(self): cam0 = kapture.Camera(name='cam0', camera_type='SIMPLE_PINHOLE', camera_params=[640, 480, 100, 320, 240]) sensors = kapture.Sensors() sensors['cam0'] = cam0 csv.sensors_to_file(self._temp_filepath, sensors) version = csv.get_version_from_csv_file(self._temp_filepath) current_version = csv.current_format_version() self.assertEqual(current_version, version, "Version correctly stored")
def test_type_checking(self): sensors = kapture.Sensors() invalid_sensor_id = tuple('a', ) valid_sensor_id = 'cam0' invalid_sensor = int(0) valid_sensor = kapture.Sensor('camera') self.assertRaises(TypeError, sensors.__setitem__, valid_sensor_id, invalid_sensor) self.assertRaises(TypeError, sensors.__setitem__, invalid_sensor_id, valid_sensor) self.assertRaises(TypeError, sensors.__setitem__, invalid_sensor_id, invalid_sensor)
def import_image_folder( images_path: str, kapture_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip) -> None: """ Imports the images of a folder to a kapture. This creates only images and cameras. :param images_path: path to directory containing the images. :param kapture_path: path to kapture root directory. :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. """ os.makedirs(kapture_path, exist_ok=True) delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing) cameras = kapture.Sensors() images = kapture.RecordsCamera() file_list = [ os.path.relpath(os.path.join(dirpath, filename), images_path) for dirpath, dirs, filenames in os.walk(images_path) for filename in filenames ] file_list = sorted(file_list) logger.info('starting conversion...') for n, filename in enumerate(file_list): # test if file is a valid image try: # lazy load with Image.open(path.join(images_path, filename)) as im: width, height = im.size model_params = [width, height] except (OSError, PIL.UnidentifiedImageError): # It is not a valid image: skip it logger.info(f'Skipping invalid image file {filename}') continue camera_id = f'sensor{n}' images[(n, camera_id)] = path_secure(filename) # don't forget windows cameras[camera_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA, model_params) # import (copy) image files. logger.info('import image files ...') filename_list = [f for _, _, f in kapture.flatten(images)] import_record_data_from_dir_auto(images_path, kapture_path, filename_list, images_import_method) # pack into kapture format imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images) logger.info('writing imported data...') kapture_to_dir(kapture_path, imported_kapture)
def _import_cameras(silda_dir_path, snapshots, fallback_cam_model) -> kapture.Sensors: logger.info('Processing sensors ...') cameras = kapture.Sensors() # use hard coded intrinsics # evaluated using colmap # 1 OPENCV_FISHEYE 1024 1024 393.299 394.815 512 512 -0.223483 0.117325 -0.0326138 0.00361082 # fx, fy, cx, cy, omega # 1 FOV 1024 1024 300 300 512 512 0.899632 cam_id_list = sorted( set(cam_id for _, cam_id, _ in kapture.flatten(snapshots))) for cam_id in cam_id_list: # pick a image for that cam id random_image_intrinsic = next( f'{timestamp}_{cam_id}.intrinsics' # keep only filename (thats what silda expect) for timestamp, cid, filename in kapture.flatten(snapshots) if cid == cam_id) logger.debug( f'camera {cam_id} intrinsics : picking at random: ("{random_image_intrinsic}")' ) intrinsic_filepath = path.join(silda_dir_path, 'camera-intrinsics', random_image_intrinsic) logger.debug(f'loading file: "{intrinsic_filepath}"') silda_proj_params = np.loadtxt(intrinsic_filepath) # only retrieve principal point from intrinsics, # because the rest correspond to a fisheye model not available in colmap. principal_point = (silda_proj_params[0:2] * SILDA_IMAGE_SIZE).flatten().tolist() projection = fallback_cam_model if 'OPENCV_FISHEYE' == projection: focal_length = [393.299, 394.815] fisheye_coefficients = [ -0.223483, 0.117325, -0.0326138, 0.00361082 ] # // fx, fy, cx, cy, k1, k2, k3, k4 proj_params = focal_length + principal_point + fisheye_coefficients elif 'FOV' == projection: # use hard coded intrinsics from Torsten reconstruction, ie. : # 217.294036, 217.214703, 512.000000, 507.897400, -0.769113 focal_length = [217.294036, 217.214703] # principal_point = [512.000000, 507.897400] omega = [-0.769113] # fx, fy, cx, cy, omega proj_params = focal_length + principal_point + omega else: raise ValueError( 'Only accepts OPENCV_FISHEYE, or FOV as projection model.') camera = kapture.Camera(projection, SILDA_IMAGE_SIZE.tolist() + proj_params) cameras[cam_id] = camera return cameras
def test_init(self): lidar0 = kapture.Sensor('lidar', []) cam0 = kapture.Sensor('camera', []) sensors = kapture.Sensors() sensors['cam0'] = cam0 kapture_data = kapture.Kapture(sensors=sensors) self.assertEqual(sensors, kapture_data.sensors) self.assertEqual(sensors['cam0'], kapture_data.sensors['cam0']) # assign sensors = kapture_data.sensors self.assertIsInstance(sensors, kapture.Sensors) kapture_data.sensors = sensors kapture_data.sensors['lidar0'] = lidar0
def test_maupertuis_export(self): # export/import and check colmap_db_filepath = path.join(self._temp_dirpath, 'colmap.db') colmap_txt_filepath = path.join(self._temp_dirpath, 'dense') export_colmap( kapture_dirpath=self._kapture_dirpath, colmap_database_filepath=colmap_db_filepath, colmap_reconstruction_dirpath=colmap_txt_filepath, colmap_rig_filepath=None, force_overwrite_existing=True) kapture_data = import_colmap( kapture_dirpath=self._temp_dirpath, colmap_database_filepath=colmap_db_filepath, colmap_reconstruction_dirpath=colmap_txt_filepath, colmap_images_dirpath=None, force_overwrite_existing=True, no_geometric_filtering=True ) # check the numbers self.assertEqual(1, len(kapture_data.sensors)) self.assertEqual(4, len(kapture_data.trajectories)) self.assertEqual(4, len(kapture_data.records_camera)) self.assertIs(kapture_data.records_lidar, None) self.assertIs(kapture_data.records_wifi, None) self.assertIs(kapture_data.records_gnss, None) self.assertEqual(4, len(kapture_data.keypoints)) self.assertEqual(4, len(kapture_data.descriptors)) self.assertEqual(6, len(kapture_data.matches)) self.assertEqual(1039, len(kapture_data.points3d)) self.assertEqual(1039, len(kapture_data.observations)) # compare against golden kapture kapture_data_golden = kapture_from_dir(self._kapture_dirpath) # drop GPS, Wifi, Lidar kapture_data.records_lidar = None kapture_data.records_wifi = None kapture_data_golden.records_gnss = None kapture_data_golden.sensors = kapture.Sensors({ sensor_id: sensor for sensor_id, sensor in kapture_data_golden.sensors.items() if sensor.sensor_type == 'camera' }) # compare equivalence = kapture.algo.compare.equal_kapture(kapture_data, kapture_data_golden) self.assertTrue(equivalence)
def sensors_from_file(filepath: str) -> kapture.Sensors: """ Reads sensors from CSV file. :param filepath: input file path :return: sensors """ sensors = kapture.Sensors() with open(filepath) as file: table = table_from_file(file) # sensor_id, name, sensor_type, [sensor_params]+' for sensor_id, name, sensor_type, *sensor_params in table: sensor = kapture.create_sensor(sensor_type=sensor_type, sensor_params=sensor_params, name=name) sensors[sensor_id] = sensor return sensors
def test_sensors_write(self): cam0 = kapture.Camera(name='cam0', camera_type='SIMPLE_PINHOLE', camera_params=[640, 480, 100, 320, 240]) cam1 = kapture.Camera(name='cam1', camera_type='SIMPLE_PINHOLE', camera_params=[640, 480, 100, 320, 240]) formatted_expected = '\n'.join([csv.KAPTURE_FORMAT_1, '# sensor_id, name, sensor_type, [sensor_params]+', 'cam0, cam0, camera, SIMPLE_PINHOLE, 640, 480, 100, 320, 240', 'cam1, cam1, camera, SIMPLE_PINHOLE, 640, 480, 100, 320, 240', '']) sensors = kapture.Sensors() sensors['cam0'] = cam0 sensors['cam1'] = cam1 csv.sensors_to_file(self._temp_filepath, sensors) with open(self._temp_filepath, 'rt') as f: formatted_actual = ''.join(f.readlines()) self.assertEqual(formatted_actual, formatted_expected)
def test_maupertuis_import(self): kapture_data = import_colmap( self._temp_dirpath, self._database_filepath, self._reconstruction_path, self._images_filepath, force_overwrite_existing=True, images_import_strategy=TransferAction.copy, no_geometric_filtering=True) # check the numbers self.assertEqual(1, len(kapture_data.sensors)) self.assertEqual(4, len(kapture_data.trajectories)) self.assertEqual(4, len(kapture_data.records_camera)) self.assertIs(kapture_data.records_lidar, None) self.assertIs(kapture_data.records_wifi, None) self.assertIs(kapture_data.records_gnss, None) self.assertEqual(4, len(kapture_data.keypoints)) self.assertEqual(4, len(kapture_data.descriptors)) self.assertEqual(6, len(kapture_data.matches)) self.assertEqual(1039, len(kapture_data.points3d)) self.assertEqual(1039, len(kapture_data.observations)) # compare against golden kapture kapture_data_golden = kapture_from_dir(self._kapture_dirpath) # drop GPS, Wifi, Lidar kapture_data.records_lidar = None kapture_data.records_wifi = None kapture_data_golden.records_gnss = None kapture_data_golden.sensors = kapture.Sensors({ sensor_id: sensor for sensor_id, sensor in kapture_data_golden.sensors.items() if sensor.sensor_type == 'camera' }) # compare equivalence = kapture.algo.compare.equal_kapture( kapture_data, kapture_data_golden) self.assertTrue(equivalence) # Check images copy all_records_cameras = list(kapture.flatten( kapture_data.records_camera)) for _, _, name in all_records_cameras: image_path = get_image_fullpath(self._temp_dirpath, name) self.assertTrue(path.isfile(image_path), f"image link {image_path}")
def test_as_dict(self): kapture_data = kapture.Kapture() # test empty members = kapture_data.as_dict() self.assertEqual(members, {}) members = kapture_data.as_dict(keep_none=True) self.assertEqual(len(members), 18) self.assertTrue(all(member is None for member in members.values())) # test sensors only kapture_data.sensors = kapture.Sensors({'cam0': kapture.Sensor('camera', [])}) members = kapture_data.as_dict() self.assertEqual(len(members), 1) self.assertEqual(members, {'sensors': kapture_data.sensors}) members = kapture_data.as_dict(keep_none=True) self.assertEqual(len(members), 18) self.assertEqual(members['sensors'], kapture_data.sensors) self.assertTrue(all(member is None for name, member in members.items() if name != 'sensors'))
def set_camera(self, id, name, cam: Camera): self.default_cam = ('%s' % id, name) if self.kapture.sensors is None: self.kapture.sensors = kt.Sensors() mx = cam.cam_mx sc = self.scale params = [ cam.width * sc, cam.height * sc, mx[0, 0] * sc, mx[1, 1] * sc, mx[0, 2] * sc, mx[1, 2] * sc ] + [0.] * 8 if cam.dist_coefs is not None: for i, c in enumerate(cam.dist_coefs): params[6 + i] = c self.kapture.sensors[self.default_cam[0]] = kt.Camera( CameraType.FULL_OPENCV, camera_params=params, name=name)
def extract_gps_from_exif(kapture_data: kapture.Kapture, kapture_dirpath: str): """ Extract GPS coordinates from kapture dataset, returns the new sensor and gnss records. Gnss timestamps and sensor ids are guessed from timestamps and camera_id from images. The GNSS sensor_id are built prefixing 'GPS_'<cam_id>, with cam_id the sensor_id of the corresponding camera. :param kapture_data: input kapture data, must contains sensors and records_camera. :param kapture_dirpath: input path to kapture directory. :return: """ # only load sensors + records_data: disable_tqdm = logger.getEffectiveLevel() != logging.INFO # make up new gps ids cam_to_gps_id = { # cam_id -> gps_id cam_id: 'GPS_' + cam_id for cam_id, sensor in kapture_data.sensors.items() if sensor.sensor_type == 'camera' } # cam_id -> gps_id # set all gps to EPSG:4326 gps_epsg_codes = {gps_id: 'EPSG:4326' for gps_id in cam_to_gps_id.values()} # add new gps ids to sensors gnss_kapture_sensors = kapture.Sensors() for gps_id, epsg in gps_epsg_codes.items(): gnss_kapture_sensors[gps_id] = kapture.Sensor(sensor_type='gnss', sensor_params=[epsg]) image_filepaths = images_to_filepaths(kapture_data.records_camera, kapture_dirpath=kapture_dirpath) records_gnss = kapture.RecordsGnss() for timestamp, cam_id, image_name in tqdm(kapture.flatten( kapture_data.records_camera), disable=disable_tqdm): image_filepath = image_filepaths[image_name] logger.debug(f'extracting GPS tags from {image_filepath}') gps_id = cam_to_gps_id[cam_id] exif_data = read_exif(image_filepath) gps_record = convert_gps_to_kapture_record(exif_data) records_gnss[timestamp, gps_id] = gps_record return gnss_kapture_sensors, records_gnss
def merge_sensors( sensors_list: List[Optional[kapture.Sensors]]) -> kapture.Sensors: """ Merge several sensors lists. For sensor with the same identifier, keep only the first one. :param sensors_list: list of sensors :return: merge sensors """ assert len(sensors_list) > 0 merged_sensors = kapture.Sensors() for sensors in sensors_list: if sensors is None: continue for sensor_id in sensors.keys(): if sensor_id in merged_sensors: continue merged_sensors[sensor_id] = sensors[sensor_id] return merged_sensors
def sub_kapture_from_img_list(kdata, kdata_path, img_list, pairs): trajectories = kapture.Trajectories() sensors = kapture.Sensors() records = kapture.RecordsCamera() keypoints = kapture.Keypoints(kdata.keypoints._tname, kdata.keypoints._dtype, kdata.keypoints._dsize) if kdata.descriptors != None: descriptors = kapture.Descriptors(kdata.descriptors._tname, kdata.descriptors._dtype, kdata.descriptors._dsize) else: descriptors = None matches = kapture.Matches() timestamp_sensor_id_from_image_name = { img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in kapture.flatten( kdata.records_camera) } for img in img_list: timestamp, sensor_id = timestamp_sensor_id_from_image_name[img] pose = kdata.trajectories[timestamp][sensor_id] sensors[sensor_id] = kdata.sensors[sensor_id] records[timestamp, sensor_id] = img trajectories[timestamp, sensor_id] = pose keypoints.add(img) if kdata.descriptors != None: descriptors.add(img) for i in pairs: image_matches_filepath = get_matches_fullpath((i[0], i[1]), kdata_path) if os.path.exists(image_matches_filepath): matches.add(i[0], i[1]) matches.normalize() return kapture.Kapture(sensors=sensors, trajectories=trajectories, records_camera=records, descriptors=descriptors, keypoints=keypoints, matches=matches)
def merge_sensors(sensors_list: List[Optional[kapture.Sensors]], sensor_mappings: List[Dict[str, str]]) -> kapture.Sensors: """ Merge several sensors list into one list with new identifiers. :param sensors_list: list of sensors definitions to merge :param sensor_mappings: mapping of the sensor identifiers to their new identifiers :return: merged sensors definitions """ assert len(sensors_list) > 0 assert len(sensors_list) == len(sensor_mappings) merged_sensors = kapture.Sensors() for sensors, mapping in zip(sensors_list, sensor_mappings): if sensors is None: continue for sensor_id in sensors.keys(): new_id = mapping[sensor_id] merged_sensors[new_id] = sensors[sensor_id] return merged_sensors
def setUp(self): samples_folder = path.abspath( path.join(path.dirname(__file__), '../samples/')) self.aachen_folder = path.join(samples_folder, 'Aachen-Day-Night') self.aachen_models_folder = path.join(self.aachen_folder, '3D-models') self.images_folder = path.join(self.aachen_folder, 'images_upright') self.bundler_sensors = kapture.Sensors() self.bundler_sensors['sensor0'] = kapture.Camera( kapture.CameraType.RADIAL, [ 1600, 1067, 1.084590000e+03, 800, 533.5, 0.000000000e+00, 6.894198313e-08 ]) self.bundler_sensors['sensor1'] = kapture.Camera( kapture.CameraType.RADIAL, [ 1200, 1600, 1.556980000e+03, 600, 800, 0.000000000e+00, 3.565154420e-08 ]) self.bundler_sensors['sensor2'] = kapture.Camera( kapture.CameraType.RADIAL, [ 1600, 1067, 1.103400000e+03, 800, 533.5, 0.000000000e+00, 6.527248534e-08 ])
def sub_kapture_from_img_list(kdata, img_list, pairs, keypoints_type, descriptors_type): trajectories = kapture.Trajectories() sensors = kapture.Sensors() records = kapture.RecordsCamera() keypoints = kapture.Keypoints(kdata.keypoints[keypoints_type].type_name, kdata.keypoints[keypoints_type].dtype, kdata.keypoints[keypoints_type].dsize) if kdata.descriptors is not None and descriptors_type in kdata.descriptors: descriptors = kapture.Descriptors(kdata.descriptors[descriptors_type].type_name, kdata.descriptors[descriptors_type].dtype, kdata.descriptors[descriptors_type].dsize, kdata.descriptors[descriptors_type].keypoints_type, kdata.descriptors[descriptors_type].metric_type) else: descriptors = None matches = kapture.Matches() timestamp_sensor_id_from_image_name = {img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in kapture.flatten(kdata.records_camera)} for img in img_list: timestamp, sensor_id = timestamp_sensor_id_from_image_name[img] sensors[sensor_id] = kdata.sensors[sensor_id] records[timestamp, sensor_id] = img if (timestamp, sensor_id) in kdata.trajectories: pose = kdata.trajectories[timestamp][sensor_id] trajectories[timestamp, sensor_id] = pose keypoints.add(img) if kdata.descriptors is not None: descriptors.add(img) for i in pairs: if i in kdata.matches[keypoints_type]: matches.add(i[0], i[1]) matches.normalize() return kapture.Kapture(sensors=sensors, trajectories=trajectories, records_camera=records, descriptors={descriptors_type: descriptors}, keypoints={keypoints_type: keypoints}, matches={keypoints_type: matches})
def import_robotcar_cameras(intrinsics_dir_path: str) -> kapture.Sensors: """ Read and convert intrinsics files :param intrinsics_dir_path: :return: kapture.cameras """ cameras = kapture.Sensors() for root, dirs, files in os.walk(intrinsics_dir_path): for intrinsic_filename in files: (camera_id, _) = intrinsic_filename.split('_') intrinsic_file = open( path.join(intrinsics_dir_path, intrinsic_filename), 'r') (_, fx) = intrinsic_file.readline().split() (_, fy) = intrinsic_file.readline().split() (_, cx) = intrinsic_file.readline().split() (_, cy) = intrinsic_file.readline().split() intrinsic_file.close() # w, h, fx, fy, cx, cy model = kapture.CameraType.PINHOLE model_params = [1024, 1024, fx, fy, cx, cy] cameras[camera_id] = kapture.Camera(model, model_params) return cameras
def import_from_colmap_cameras_txt(colmap_cameras_filepath: str) -> kapture.Sensors: """ Imports Sensors from colmap cameras.txt :param colmap_cameras_filepath: input path to colmap cameras.txt file :return: kapture sensors """ sensors = kapture.Sensors() # cameras[cam_id] = camera with open(colmap_cameras_filepath, 'r') as colmap_cameras_filepath: lines = colmap_cameras_filepath.readlines() # eliminate comments lines = (line for line in lines if not line.startswith('#')) # split by space and or comma lines = (re.findall(colmap_reconstruction_split_pattern, line.rstrip()) for line in lines) # split fields for fields in lines: camera_id = get_camera_kapture_id_from_colmap_id(int(fields[0])) camera_type = str(fields[1]) image_size = [str(s) for s in fields[2:4]] projection_params = [str(f) for f in fields[4:]] camera = kapture.Camera(camera_type, image_size + projection_params) sensors[camera_id] = camera return sensors
def import_robotcar_colmap_location( robotcar_path: str, colmap_reconstruction_fullpath: path, kapture_path: str, rigs: kapture.Rigs, skip_reconstruction: bool) -> kapture.Kapture: """ Import robotcar data for one location from colmap reconstruction :param robotcar_path: path to the robotcar top directory :param colmap_reconstruction_fullpath: path to the colmap reconstruction directory :param kapture_path: path to the kapture top directory :param rigs: kapture rigs to modify :param skip_reconstruction: if True, will not add the reconstruction :return: a kapture object """ # First, import Colmap reconstruction for given location kapture_data = import_colmap( kapture_dirpath=kapture_path, colmap_reconstruction_dirpath=colmap_reconstruction_fullpath, colmap_images_dirpath=path.join(robotcar_path, "images"), skip_reconstruction=skip_reconstruction, images_import_strategy=TransferAction.skip ) # since filenames are incorrect # Post processing: # - use correct names for cameras # - model was built with PNG files, but we have JPG # - recover proper timestamps # - recover rig # Fix sensors.txt camera_mapping = { 'cam_00001': 'left', 'cam_00002': 'rear', 'cam_00003': 'right' } new_cameras = kapture.Sensors() for cam_id in kapture_data.sensors: new_cameras[camera_mapping[cam_id]] = kapture_data.sensors[cam_id] kapture_data.sensors = new_cameras if not skip_reconstruction: # Fix keypoints # Need to rename .png.kpt to .jpg.kpt files and that's all for root, dirs, files in os.walk(kapture_path): for file in files: if file.endswith('.png.kpt'): os.rename( path.join(root, file), path.join(root, file.replace(".png.kpt", ".jpg.kpt"))) # observations.txt: png -> jpg new_observations = kapture.Observations() for point3d_idx in kapture_data.observations: for image_path, keypoint_id in kapture_data.observations[ point3d_idx]: new_observations.add(point3d_idx, image_path.replace(".png", ".jpg"), int(keypoint_id)) kapture_data.observations = new_observations # records_camera.txt # timestamps, png->jpg new_records_camera = kapture.RecordsCamera() records_camera_pattern = re.compile(r'.*/(?P<timestamp>\d+)\.png') ts_mapping = {} for ts, shot in kapture_data.records_camera.items(): for cam_id, image_path in shot.items(): matches = records_camera_pattern.match(image_path) if not matches: continue matches = matches.groupdict() timestamp = int(matches['timestamp']) ts_mapping[ts] = timestamp new_path = image_path.replace(".png", ".jpg") new_records_camera[timestamp, camera_mapping[cam_id]] = new_path kapture_data.records_camera = new_records_camera # trajectories.txt new_trajectories = kapture.Trajectories() # First recover timestamps and camera names for ts, sensor_id in sorted(kapture_data.trajectories.key_pairs()): new_trajectories[ ts_mapping[ts], camera_mapping[sensor_id]] = kapture_data.trajectories[ts, sensor_id] kapture_data.trajectories = new_trajectories kapture_data.rigs = rigs return kapture_data
def import_7scenes(d7scenes_path: str, kapture_dir_path: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.skip, partition: Optional[str] = None ) -> None: """ Imports RGB-D Dataset 7-Scenes dataset and save them as kapture. :param d7scenes_path: path to the 7scenes sequence root path :param kapture_dir_path: path to kapture top directory :param force_overwrite_existing: Silently overwrite kapture files if already exists. :param images_import_method: choose how to import actual image files. :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt to exists. """ os.makedirs(kapture_dir_path, exist_ok=True) delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing) logger.info('loading all content ...') d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)') # populate all relevant files d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path)) for dp, _, fs in os.walk(d7scenes_path) for fn in fs) logger.info('populating 7-scenes files ...') d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict() for filename in sorted(d7s_filenames) if d7s_filename_re.search(filename)} # reorg as shot[seq, id] = {color: , depth: , pose: , ...} shots = {} for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()): shot_id = (file_attribs.get('sequence'), file_attribs['frame_id']) shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename # fake timestamps for timestamp, shot_id in enumerate(shots): shots[shot_id]['timestamp'] = timestamp # if given, filter partition if partition is not None: # read the authors split file partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition]) if not path.isfile(partition_filepath): raise FileNotFoundError(f'partition file is missing: {partition_filepath}.') with open(partition_filepath, 'rt') as file: split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()] assert len(split_sequences) > 0 # filter out shots = {(seq, frame): shot for (seq, frame), shot in shots.items() if seq in split_sequences} if len(shots) == 0: raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.') # eg. shots['seq-01', '000000'] = # { # 'color': 'seq-01/frame-000000.color.jpg', # 'depth': 'seq-01/frame-000000.depth.png', # 'pose': 'seq-01/frame-000000.pose.txt', # 'timestamp': 0} # images + depth maps logger.info('populating image and depth maps files ...') snapshots = kapture.RecordsCamera() depth_maps = kapture.RecordsDepth() for shot in shots.values(): snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color'] kapture_depth_map_filename = shot['depth'][:-len('.png')] # kapture depth files are not png depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg' # kapture depth files are not png depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename # poses logger.info('import poses files ...') trajectories = kapture.Trajectories() for shot in shots.values(): pose_filepath = path.join(d7scenes_path, shot['pose']) pose_mat = np.loadtxt(pose_filepath) # camera-to-world, 4×4 matrix in homogeneous coordinates rotation_mat = pose_mat[0:3, 0:3] position_vec = pose_mat[0:3, 3] rotation_quat = quaternion.from_rotation_matrix(rotation_mat) pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec) pose_cam_from_world = pose_world_from_cam.inverse() trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world # sensors """ From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585). ---- We use the extr. kinect camera parameters from https://projet.liris.cnrs.fr/voir/activities-dataset/kinect-calibration.html. """ sensors = kapture.Sensors() # camera_type = kapture.CameraType.OPENCV # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02, # 2.5673002693536984e-01, -9.3976085633794137e-01, -1.8605549188751580e-03, -2.2232238578189420e-03] # w, h, f, cx, cy, k1, k2, p1, p2, k3 camera_type = kapture.CameraType.SIMPLE_PINHOLE # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02] # w, h, fx, fy, cx, cy camera_params = [640, 480, 525, 320, 240] # w, h, f, cx, cy sensors[RGB_SENSOR_ID] = kapture.Camera( name=RGB_SENSOR_ID, camera_type=camera_type, camera_params=camera_params ) # depth_camera_type = kapture.CameraType.OPENCV # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02, # -1.8932947734719333e-01, 1.1358015104098631e+00, -4.4260345347128536e-03, -5.4869578635708153e-03, -2.2460143607712921e+00] # w, h, f, cx, cy, k1, k2, p1, p2, k3 depth_camera_type = kapture.CameraType.SIMPLE_PINHOLE # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02] # w, h, fx, fy, cx, cy depth_camera_params = [640, 480, 585, 320, 240] # w, h, f, cx, cy sensors[DEPTH_SENSOR_ID] = kapture.Camera( name=DEPTH_SENSOR_ID, camera_type=depth_camera_type, camera_params=depth_camera_params, sensor_type='depth' ) sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera( name=REG_DEPTH_SENSOR_ID, camera_type=depth_camera_type, camera_params=camera_params, sensor_type='depth' ) # bind camera and depth sensor into a rig R = np.array([[9.9996518012567637e-01, 2.6765126468950343e-03, -7.9041012313000904e-03], [-2.7409311281316700e-03, 9.9996302803027592e-01, -8.1504520778013286e-03], [7.8819942130445332e-03, 8.1718328771890631e-03, 9.9993554558014031e-01]]) T = np.array([-2.5558943178152542e-02, 1.0109636268061706e-04, 2.0318321729487039e-03]) Rt = np.vstack((np.hstack((R, T.reshape(3, 1))), np.array([0, 0, 0, 1]))) logger.info('building rig with camera and depth sensor ...') rigs = kapture.Rigs() rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T) rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T) rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform() # import (copy) image files. logger.info('copying image files ...') image_filenames = [f for _, _, f in kapture.flatten(snapshots)] import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method) # import (copy) depth map files. logger.info('converting depth files ...') depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path) hide_progress = logger.getEffectiveLevel() > logging.INFO for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress): if '.reg' in depth_map_filename: continue depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png') depth_map = np.array(Image.open(depth_map_filepath_7scenes)) # change invalid depth from 65535 to 0 depth_map[depth_map == 65535] = 0 # depth maps is in mm in 7scenes, convert it to meters depth_map = depth_map.astype(np.float32) * 1.0e-3 kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map) # register depth to rgb reg_depth_map = register_depth(get_K(depth_camera_type, depth_camera_params), get_K(camera_type, camera_params), Rt, depth_map, camera_params[0], camera_params[1]) kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map) # pack into kapture format imported_kapture = kapture.Kapture( records_camera=snapshots, records_depth=depth_maps, rigs=rigs, trajectories=trajectories, sensors=sensors) logger.info('writing imported data ...') kapture_to_dir(kapture_dir_path, imported_kapture)
def colmap_localize_from_loaded_data(kapture_data: kapture.Kapture, kapture_path: str, tar_handlers: Optional[TarCollection], colmap_path: str, input_database_path: str, input_reconstruction_path: str, colmap_binary: str, keypoints_type: Optional[str], use_colmap_matches_importer: bool, image_registrator_options: List[str], skip_list: List[str], force: bool) -> None: """ Localize images on a colmap model with the kapture data. :param kapture_data: kapture data to use :param kapture_path: path to the kapture to use :param tar_handler: collection of preloaded tar archives :param colmap_path: path to the colmap build :param input_database_path: path to the map colmap.db :param input_database_path: path to the map colmap.db :param input_reconstruction_path: path to the map reconstruction folder :param colmap_binary: path to the colmap binary executable :param keypoints_type: type of keypoints, name of the keypoints subfolder :param use_colmap_matches_importer: bool, :param image_registrator_options: options for the image registrator :param skip_list: list of steps to skip :param force: Silently overwrite kapture files if already exists. """ os.makedirs(colmap_path, exist_ok=True) if not (kapture_data.records_camera and kapture_data.sensors and kapture_data.keypoints and kapture_data.matches): raise ValueError('records_camera, sensors, keypoints, matches are mandatory') if kapture_data.trajectories: logger.warning("Input data contains trajectories: they will be ignored") kapture_data.trajectories.clear() else: kapture_data.trajectories = kapture.Trajectories() # COLMAP does not fully support rigs. if kapture_data.rigs is not None and kapture_data.trajectories is not None: # make sure, rigs are not used in trajectories. logger.info('remove rigs notation.') rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs) kapture_data.rigs.clear() # Prepare output # Set fixed name for COLMAP database colmap_db_path = path.join(colmap_path, 'colmap.db') image_list_path = path.join(colmap_path, 'images.list') reconstruction_path = path.join(colmap_path, "reconstruction") if 'delete_existing' not in skip_list: safe_remove_file(colmap_db_path, force) safe_remove_file(image_list_path, force) safe_remove_any_path(reconstruction_path, force) os.makedirs(reconstruction_path, exist_ok=True) # Copy colmap db to output if not os.path.exists(colmap_db_path): shutil.copy(input_database_path, colmap_db_path) # find correspondences between the colmap db and the kapture data images_all = {image_path: (ts, cam_id) for ts, shot in kapture_data.records_camera.items() for cam_id, image_path in shot.items()} colmap_db = COLMAPDatabase.connect(colmap_db_path) colmap_image_ids = database_extra.get_colmap_image_ids_from_db(colmap_db) colmap_images = database_extra.get_images_from_database(colmap_db) colmap_db.close() # dict ( kapture_camera -> colmap_camera_id ) colmap_camera_ids = {images_all[image_path][1]: colmap_cam_id for image_path, colmap_cam_id in colmap_images if image_path in images_all} images_to_add = {image_path: value for image_path, value in images_all.items() if image_path not in colmap_image_ids} flatten_images_to_add = [(ts, kapture_cam_id, image_path) for image_path, (ts, kapture_cam_id) in images_to_add.items()] if 'import_to_db' not in skip_list: logger.info("Step 1: Add precomputed keypoints and matches to colmap db") if keypoints_type is None: keypoints_type = try_get_only_key_from_collection(kapture_data.keypoints) assert keypoints_type is not None assert keypoints_type in kapture_data.keypoints assert keypoints_type in kapture_data.matches cameras_to_add = kapture.Sensors() for _, (_, kapture_cam_id) in images_to_add.items(): if kapture_cam_id not in colmap_camera_ids: kapture_cam = kapture_data.sensors[kapture_cam_id] cameras_to_add[kapture_cam_id] = kapture_cam colmap_db = COLMAPDatabase.connect(colmap_db_path) colmap_added_camera_ids = database_extra.add_cameras_to_database(cameras_to_add, colmap_db) colmap_camera_ids.update(colmap_added_camera_ids) colmap_added_image_ids = database_extra.add_images_to_database_from_flatten( colmap_db, flatten_images_to_add, kapture_data.trajectories, colmap_camera_ids) colmap_image_ids.update(colmap_added_image_ids) colmap_image_ids_reversed = {v: k for k, v in colmap_image_ids.items()} # colmap_id : name # add new features colmap_keypoints = database_extra.get_keypoints_set_from_database(colmap_db, colmap_image_ids_reversed) keypoints_all = kapture_data.keypoints[keypoints_type] keypoints_to_add = {name for name in keypoints_all if name not in colmap_keypoints} keypoints_to_add = kapture.Keypoints(keypoints_all.type_name, keypoints_all.dtype, keypoints_all.dsize, keypoints_to_add) database_extra.add_keypoints_to_database(colmap_db, keypoints_to_add, keypoints_type, kapture_path, tar_handlers, colmap_image_ids) # add new matches colmap_matches = kapture.Matches(database_extra.get_matches_set_from_database(colmap_db, colmap_image_ids_reversed)) colmap_matches.normalize() matches_all = kapture_data.matches[keypoints_type] matches_to_add = kapture.Matches({pair for pair in matches_all if pair not in colmap_matches}) # print(list(matches_to_add)) database_extra.add_matches_to_database(colmap_db, matches_to_add, keypoints_type, kapture_path, tar_handlers, colmap_image_ids, export_two_view_geometry=not use_colmap_matches_importer) colmap_db.close() if use_colmap_matches_importer: logger.info('Step 2: Run geometric verification') logger.debug('running colmap matches_importer...') if keypoints_type is None: keypoints_type = try_get_only_key_from_collection(kapture_data.matches) assert keypoints_type is not None assert keypoints_type in kapture_data.matches # compute two view geometry colmap_lib.run_matches_importer_from_kapture_matches( colmap_binary, colmap_use_cpu=True, colmap_gpu_index=None, colmap_db_path=colmap_db_path, kapture_matches=kapture_data.matches[keypoints_type], force=force) else: logger.info('Step 2: Run geometric verification - skipped') if 'image_registrator' not in skip_list: logger.info("Step 3: Run image_registrator") # run image_registrator colmap_lib.run_image_registrator( colmap_binary, colmap_db_path, input_reconstruction_path, reconstruction_path, image_registrator_options ) # run model_converter if 'model_converter' not in skip_list: logger.info("Step 4: Export reconstruction results to txt") colmap_lib.run_model_converter( colmap_binary, reconstruction_path, reconstruction_path )
def import_openmvg_cameras(input_json) -> kapture.Sensors: # noqa: C901 kapture_cameras = kapture.Sensors() if input_json.get(JSON_KEY.INTRINSICS): polymorphic_id_to_value = {} logger.info('Importing intrinsics') for sensor in input_json[JSON_KEY.INTRINSICS]: value = sensor[JSON_KEY.VALUE] if JSON_KEY.POLYMORPHIC_NAME in value: # new type name: store it for next instances polymorphic_id = value[JSON_KEY.POLYMORPHIC_ID] & GET_ID_MASK polymorphic_id_to_value[polymorphic_id] = value[ JSON_KEY.POLYMORPHIC_NAME] logger.debug("New camera_type: " + polymorphic_id_to_value[polymorphic_id]) else: if JSON_KEY.POLYMORPHIC_ID not in value: raise ValueError( f'{JSON_KEY.POLYMORPHIC_ID} is missing (intrinsics)') polymorphic_id = value[JSON_KEY.POLYMORPHIC_ID] if polymorphic_id not in polymorphic_id_to_value: raise ValueError(f'Unknown polymorphic_id {polymorphic_id}') camera_model = CameraModel(polymorphic_id_to_value[polymorphic_id]) camera_data = value[JSON_KEY.PTR_WRAPPER][JSON_KEY.DATA] if camera_model == CameraModel.pinhole: # w, h, f, cx, cy camera = kapture.Camera(kapture.CameraType.SIMPLE_PINHOLE, [ int(camera_data[JSON_KEY.WIDTH]), int(camera_data[JSON_KEY.HEIGHT]), camera_data[JSON_KEY.FOCAL_LENGTH], camera_data[JSON_KEY.PRINCIPAL_POINT][0], camera_data[JSON_KEY.PRINCIPAL_POINT][1], ]) elif camera_model == CameraModel.pinhole_radial_k1: # w, h, f, cx, cy, k camera = kapture.Camera(kapture.CameraType.SIMPLE_RADIAL, [ int(camera_data[JSON_KEY.WIDTH]), int(camera_data[JSON_KEY.HEIGHT]), camera_data[JSON_KEY.FOCAL_LENGTH], camera_data[JSON_KEY.PRINCIPAL_POINT][0], camera_data[JSON_KEY.PRINCIPAL_POINT][1], camera_data[JSON_KEY.DISTO_K1][0] ]) elif camera_model == CameraModel.pinhole_radial_k3: # w, h, f, cx, cy, k1, k2, k3 camera = kapture.Camera(kapture.CameraType.RADIAL, [ int(camera_data[JSON_KEY.WIDTH]), int(camera_data[JSON_KEY.HEIGHT]), camera_data[JSON_KEY.FOCAL_LENGTH], camera_data[JSON_KEY.PRINCIPAL_POINT][0], camera_data[JSON_KEY.PRINCIPAL_POINT][1], camera_data[JSON_KEY.DISTO_K3][0], camera_data[JSON_KEY.DISTO_K3][1] ]) # camera_data["disto_k3"][2] ignored: radial model has two distortion param, while openMVG's has three elif camera_model == CameraModel.pinhole_brown_t2: # w, h, f, cx, cy, k1, k2, k3, t1, t2 if float(camera_data[JSON_KEY.DISTO_T2][2]) != 0: # if k3 not null, use FULL_OPENCV, otherwise OPENCV # w, h, fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6 value0 = camera_data[JSON_KEY.VALUE0] disto_t2 = camera_data[JSON_KEY.DISTO_T2] camera = kapture.Camera(kapture.CameraType.FULL_OPENCV, [ int(value0[JSON_KEY.WIDTH]), int(value0[JSON_KEY.HEIGHT]), value0[JSON_KEY.FOCAL_LENGTH], value0[JSON_KEY.FOCAL_LENGTH], value0[JSON_KEY.PRINCIPAL_POINT][0], value0[JSON_KEY.PRINCIPAL_POINT][1], disto_t2[0], disto_t2[1], disto_t2[3], disto_t2[4], disto_t2[2], 0, 0, 0 ]) else: # w, h, fx, fy, cx, cy, k1, k2, p1, p2 value0 = camera_data[JSON_KEY.VALUE0] disto_t2 = camera_data[JSON_KEY.DISTO_T2] camera = kapture.Camera(kapture.CameraType.OPENCV, [ int(value0[JSON_KEY.WIDTH]), int(value0[JSON_KEY.HEIGHT]), value0[JSON_KEY.FOCAL_LENGTH], value0[JSON_KEY.FOCAL_LENGTH], value0[JSON_KEY.PRINCIPAL_POINT][0], value0[JSON_KEY.PRINCIPAL_POINT][1], disto_t2[0], disto_t2[1], disto_t2[3], disto_t2[4] ]) elif camera_model == CameraModel.fisheye: logger.warning( "OpenMVG fisheye models are not compatible with OpenCV." " Using SIMPLE_RADIAL_FISHEYE and forcing distortion to 0") # w, h, f, cx, cy, k value0 = camera_data[JSON_KEY.VALUE0] camera = kapture.Camera( kapture.CameraType.SIMPLE_RADIAL_FISHEYE, [ int(value0[JSON_KEY.WIDTH]), int(value0[JSON_KEY.HEIGHT]), value0[JSON_KEY.FOCAL_LENGTH], value0[JSON_KEY.PRINCIPAL_POINT][0], value0[JSON_KEY.PRINCIPAL_POINT][1], 0 ]) else: raise ValueError(f'Camera model {camera_model} not supported') kapture_cameras[str(sensor[JSON_KEY.KEY])] = camera return kapture_cameras
def test_evaluation(self): position = [1.658, 0, 0] position_a = [2.658, 0, 0] position_b = [1.758, 0, 0] position_c = [10.1, 0, 0] position_d = [2., 0, 0] position_e = [6.658, 0, 0] rotation = quaternion.from_euler_angles(np.deg2rad(110.0), 0, 0) rotation_a = quaternion.from_euler_angles(np.deg2rad(111.0), 0, 0) rotation_b = quaternion.from_euler_angles(np.deg2rad(108.0), 0, 0) rotation_c = quaternion.from_euler_angles(np.deg2rad(10.0), 0, 0) rotation_d = quaternion.from_euler_angles(np.deg2rad(110.0), 0, 0) pose_gt = kapture.PoseTransform(r=rotation, t=position).inverse() pose_a = kapture.PoseTransform(r=rotation_a, t=position_a).inverse() pose_b = kapture.PoseTransform(r=rotation_b, t=position_b).inverse() pose_c = kapture.PoseTransform(r=rotation_c, t=position_c).inverse() pose_d = kapture.PoseTransform(r=rotation_d, t=position_d).inverse() pose_e = kapture.PoseTransform(r=None, t=[-x for x in position_e]) kdata = kapture.Kapture(sensors=kapture.Sensors(), records_camera=kapture.RecordsCamera(), trajectories=kapture.Trajectories()) kdata.sensors['cam0'] = kapture.Camera( kapture.CameraType.UNKNOWN_CAMERA, [25, 13]) kdata.records_camera[(0, 'cam0')] = 'a' kdata.records_camera[(1, 'cam0')] = 'b' kdata.records_camera[(2, 'cam0')] = 'c' kdata.records_camera[(3, 'cam0')] = 'd' kdata.records_camera[(4, 'cam0')] = 'e' kdata.trajectories[(0, 'cam0')] = pose_a kdata.trajectories[(1, 'cam0')] = pose_b kdata.trajectories[(2, 'cam0')] = pose_c kdata.trajectories[(3, 'cam0')] = pose_d kdata2 = copy.deepcopy(kdata) kdata2.trajectories[(4, 'cam0')] = pose_e kdata2.records_camera[(5, 'cam0')] = 'f' kdata_gt = copy.deepcopy(kdata2) kdata_gt.trajectories[(0, 'cam0')] = pose_gt kdata_gt.trajectories[(1, 'cam0')] = pose_gt kdata_gt.trajectories[(2, 'cam0')] = pose_gt kdata_gt.trajectories[(3, 'cam0')] = pose_gt kdata_gt.trajectories[(4, 'cam0')] = pose_gt kdata_gt.trajectories[(5, 'cam0')] = pose_gt kdata_list = [kdata, kdata2, kdata_gt] intersection = {'a', 'b', 'c', 'd', 'e'} result1 = evaluate(kdata, kdata_gt, intersection) self.assertEqual(len(result1), 5) self.assertEqual(result1[0][0], 'a') self.assertAlmostEqual(result1[0][1], 1.0) self.assertAlmostEqual(result1[0][2], 1.0) self.assertEqual(result1[1][0], 'b') self.assertAlmostEqual(result1[1][1], 0.1) self.assertAlmostEqual(result1[1][2], 2.0) self.assertEqual(result1[2][0], 'c') self.assertAlmostEqual(result1[2][1], 8.442) self.assertAlmostEqual(result1[2][2], 100.0) self.assertEqual(result1[3][0], 'd') self.assertAlmostEqual(result1[3][1], 0.342) self.assertAlmostEqual(result1[3][2], 0.0) self.assertEqual(result1[4][0], 'e') self.assertTrue(math.isnan(result1[4][1])) self.assertTrue(math.isnan(result1[4][2])) result2 = evaluate(kdata2, kdata_gt, intersection) self.assertEqual(len(result2), 5) self.assertEqual(result2[0][0], 'a') self.assertAlmostEqual(result2[0][1], 1.0) self.assertAlmostEqual(result2[0][2], 1.0) self.assertEqual(result2[1][0], 'b') self.assertAlmostEqual(result2[1][1], 0.1) self.assertAlmostEqual(result2[1][2], 2.0) self.assertEqual(result2[2][0], 'c') self.assertAlmostEqual(result2[2][1], 8.442) self.assertAlmostEqual(result2[2][2], 100.0) self.assertEqual(result2[3][0], 'd') self.assertAlmostEqual(result2[3][1], 0.342) self.assertAlmostEqual(result2[3][2], 0.0) self.assertEqual(result2[4][0], 'e') self.assertAlmostEqual(result2[4][1], 5.0) self.assertTrue(math.isnan(result2[4][2])) bins1 = fill_bins(result1, [(0.9, 5), (10, 105)]) self.assertEqual(len(bins1), 2) self.assertEqual(bins1[0][0], 0.9) self.assertEqual(bins1[0][1], 5) self.assertEqual(bins1[0][2], 2) self.assertEqual(bins1[1][0], 10) self.assertEqual(bins1[1][1], 105) self.assertEqual(bins1[1][2], 4) bins2 = fill_bins(result1, [(0.9, 5), (10, 105)]) self.assertEqual(len(bins2), 2) self.assertEqual(bins2[0][0], 0.9) self.assertEqual(bins2[0][1], 5) self.assertEqual(bins2[0][2], 2) self.assertEqual(bins2[1][0], 10) self.assertEqual(bins2[1][1], 105) self.assertEqual(bins2[1][2], 4) bins3 = fill_bins(result2, [(0.9, math.nan), (10, math.nan)]) self.assertEqual(len(bins3), 2) self.assertEqual(bins3[0][0], 0.9) self.assertTrue(math.isnan(bins3[0][1])) self.assertEqual(bins3[0][2], 2) self.assertEqual(bins3[1][0], 10) self.assertTrue(math.isnan(bins3[1][1])) self.assertEqual(bins3[1][2], 5) bins4 = fill_bins(result2, [(0.9, -1), (10, -1)]) self.assertEqual(len(bins4), 2) self.assertEqual(bins4[0][0], 0.9) self.assertEqual(bins4[0][1], -1) self.assertEqual(bins4[0][2], 2) self.assertEqual(bins4[1][0], 10) self.assertEqual(bins4[1][1], -1) self.assertEqual(bins4[1][2], 5)