Ejemplo n.º 1
0
 def test_kapture_write_read(self):
     kdata_expected = kapture.Kapture()
     kdata_expected.sensors = kapture.Sensors()
     kdata_expected.trajectories = kapture.Trajectories()
     kdata_expected.rigs = kapture.Rigs()
     csv.kapture_to_dir(self._tempdir.name, kdata_expected)
     kdata_actual = csv.kapture_from_dir(self._tempdir.name)
Ejemplo n.º 2
0
    def test_init_wifi(self):
        records_wifi = kapture.RecordsWifi()
        timestamp0, timestamp1 = 0, 1
        device_id0, device_id1 = 'AC01324954_WIFI', 'AC01324955_WIFI'
        bssid, ssid = '68:72:51:80:52:df', 'M1X_PicoM2'
        rssi = -33
        freq = 2417
        scan_time_start, scan_time_end = 1555398770280, 1555398770290
        # assign

        wifi_data = kapture.RecordWifi({bssid: kapture.RecordWifiSignal(ssid=ssid, rssi=rssi, frequency=freq,
                                                                        scan_time_start=scan_time_start,
                                                                        scan_time_end=scan_time_end)})
        records_wifi[timestamp0, device_id0] = wifi_data
        kapture_data = kapture.Kapture(records_wifi=records_wifi)
        self.assertEqual(1, len(kapture_data.records_wifi.keys()))
        self.assertEqual(1, len(kapture_data.records_wifi.key_pairs()))
        self.assertIn(timestamp0, kapture_data.records_wifi)
        self.assertIn(device_id0, kapture_data.records_wifi[timestamp0])
        self.assertIn((timestamp0, device_id0), kapture_data.records_wifi)
        self.assertEqual(wifi_data, kapture_data.records_wifi[timestamp0, device_id0])
        kapture_data.records_wifi[timestamp1, device_id1] = wifi_data
        self.assertEqual(2, len(kapture_data.records_wifi.keys()))
        self.assertEqual(2, len(kapture_data.records_wifi.key_pairs()))
        kapture_data.records_wifi[timestamp0][device_id1] = wifi_data
        self.assertEqual(2, len(kapture_data.records_wifi.keys()))
        self.assertEqual(3, len(kapture_data.records_wifi.key_pairs()))
Ejemplo n.º 3
0
 def test_kapture_format_version_from_disk(self):
     kapture_data = kapture.Kapture()
     kapture_data.sensors = kapture.Sensors()
     csv.kapture_to_dir(self._tempdir.name, kapture_data)
     version = csv.kapture_format_version(self._tempdir.name)
     self.assertEqual(csv.current_format_version(), version,
                      "We have the current version")
Ejemplo n.º 4
0
 def test_init_wifi(self):
     records_wifi = kapture.RecordsWifi()
     timestamp0, timestamp1 = 0, 1
     device_id0, device_id1 = 'AC01324954_WIFI', 'AC01324955_WIFI'
     bssid = '68:72:51:80:52:df'
     rssi = -33
     freq = 2417
     scan_time = 1555398770280
     visible_name = 'M1X_PicoM2'
     # assign
     wifi_data = {}
     wifi_data[bssid] = kapture.RecordWifi(rssi, freq, scan_time,
                                           visible_name)
     records_wifi[timestamp0, device_id0] = wifi_data
     kapture_data = kapture.Kapture(records_wifi=records_wifi)
     self.assertEqual(1, len(kapture_data.records_wifi.keys()))
     self.assertEqual(1, len(kapture_data.records_wifi.key_pairs()))
     self.assertIn(timestamp0, kapture_data.records_wifi)
     self.assertIn(device_id0, kapture_data.records_wifi[timestamp0])
     self.assertIn((timestamp0, device_id0), kapture_data.records_wifi)
     self.assertEqual(wifi_data, kapture_data.records_wifi[timestamp0,
                                                           device_id0])
     kapture_data.records_wifi[timestamp1, device_id1] = wifi_data
     self.assertEqual(2, len(kapture_data.records_wifi.keys()))
     self.assertEqual(2, len(kapture_data.records_wifi.key_pairs()))
     kapture_data.records_wifi[timestamp0][device_id1] = wifi_data
     self.assertEqual(2, len(kapture_data.records_wifi.keys()))
     self.assertEqual(3, len(kapture_data.records_wifi.key_pairs()))
Ejemplo n.º 5
0
def generate_priors_for_reconstruction(
        kapture_data: kapture.Kapture, database: COLMAPDatabase,
        path_to_priors_for_reconstruction: str) -> None:
    """
    Generate priors for a reconstruction using cameras, images and trajectories.

    :param kapture_data: kapture data
    :param database: colmap database.
    :param path_to_priors_for_reconstruction: path to the priors file
    """
    colmap_camera_ids = get_colmap_camera_ids_from_db(
        database, kapture_data.records_camera)
    colmap_image_ids = get_colmap_image_ids_from_db(database)
    kapture_data_copy = kapture.Kapture(
        sensors=kapture_data.sensors,
        records_camera=kapture_data.records_camera,
        trajectories=kapture_data.trajectories,
        rigs=kapture_data.rigs)
    # in priors, do not copy keypoints, points3d
    export_to_colmap_txt(
        path_to_priors_for_reconstruction,
        kapture_data_copy,
        "",  # kapture_data_copy do not have binaries so path is irrelevant
        colmap_camera_ids,
        colmap_image_ids)
Ejemplo n.º 6
0
    def test_kapture_write(self):
        kdata = kapture.Kapture()

        # test it is not writing files for undefined parts
        csv.kapture_to_dir(self._tempdir.name, kdata)
        self.assertFalse(
            path.exists(path.join(self._tempdir.name, 'sensors',
                                  'sensors.txt')))
        self.assertFalse(
            path.exists(
                path.join(self._tempdir.name, 'sensors', 'trajectories.txt')))
        self.assertFalse(
            path.exists(path.join(self._tempdir.name, 'sensors', 'rigs.txt')))

        # test it is actually writing files for parts
        kdata.sensors = kapture.Sensors()
        kdata.trajectories = kapture.Trajectories()
        kdata.rigs = kapture.Rigs()
        csv.kapture_to_dir(self._tempdir.name, kdata)
        self.assertTrue(
            path.exists(path.join(self._tempdir.name, 'sensors',
                                  'sensors.txt')))
        self.assertTrue(
            path.exists(
                path.join(self._tempdir.name, 'sensors', 'trajectories.txt')))
        self.assertTrue(
            path.exists(path.join(self._tempdir.name, 'sensors', 'rigs.txt')))
Ejemplo n.º 7
0
    def test_init_camera(self):
        timestamp0, timestamp1 = 0, 1
        device_id0, device_id1 = 'cam0', 'cam1'
        records_camera = kapture.RecordsCamera()
        records_camera[timestamp0, device_id0] = 'cam0/image000.jpg'
        kapture_data = kapture.Kapture(records_camera=records_camera)
        self.assertEqual(1, len(kapture_data.records_camera.keys()))
        self.assertEqual(1, len(kapture_data.records_camera.key_pairs()))
        self.assertIn(timestamp0, kapture_data.records_camera)
        self.assertIn(device_id0, kapture_data.records_camera[timestamp0])
        self.assertIn((timestamp0, device_id0), kapture_data.records_camera)
        self.assertEqual('cam0/image000.jpg',
                         kapture_data.records_camera[timestamp0, device_id0])
        records_camera[timestamp1, device_id0] = 'cam0/image001.jpg'
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(2, len(kapture_data.records_camera.key_pairs()))
        kapture_data.records_camera[timestamp0][
            device_id1] = 'cam1/image000.jpg'
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(3, len(kapture_data.records_camera.key_pairs()))
        records_camera[timestamp1][device_id1] = 'cam1/image001.jpg'
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(4, len(kapture_data.records_camera.key_pairs()))
        self.assertEqual('cam0/image000.jpg',
                         kapture_data.records_camera[timestamp0, device_id0])
        self.assertEqual('cam1/image000.jpg',
                         kapture_data.records_camera[timestamp0, device_id1])
        self.assertEqual('cam0/image001.jpg',
                         kapture_data.records_camera[timestamp1, device_id0])
        self.assertEqual('cam1/image001.jpg',
                         kapture_data.records_camera[timestamp1, device_id1])

        self.assertNotIn((timestamp1, 'cam2'), kapture_data.records_camera)
        self.assertNotIn((2, device_id0), kapture_data.records_camera)
Ejemplo n.º 8
0
def import_colmap_database(colmap_database_filepath: str,
                           kapture_dir_path: str,
                           no_geometric_filtering: bool = False,
                           skip_reconstruction: bool = False,
                           keypoint_name: str = 'SIFT',
                           descriptor_name: str = 'SIFT') -> kapture.Kapture:
    """
    Converts colmap database file to kapture data.
    If kapture_dir_path is given, it creates keypoints, descriptors, matches files (if any).

    :param colmap_database_filepath: path to colmap database file.
    :param kapture_dir_path: path to kapture directory. Is used to store keypoints, descriptors and matches files.
                            If not given (None), is equivalent to skip_reconstruction == True.
    :param no_geometric_filtering:
    :param keypoint_name: name of the keypoints detector (by default, in colmap, its SIFT, but can be imported)
    :param descriptor_name: name of the keypoints descriptor (by default, in colmap, its SIFT, but can be imported)
    :param skip_reconstruction: skip the import of the kapture/reconstruction part,
                            ie. Keypoints, Descriptors, Matches.
    :return: kapture object
    """
    kapture_data = kapture.Kapture()

    logger.debug(f'loading colmap database {colmap_database_filepath}')
    db = COLMAPDatabase.connect(colmap_database_filepath)

    # Generate cameras
    logger.debug('parsing cameras in database.')
    kapture_data.sensors = get_cameras_from_database(db)

    # Images, Trajectories
    logger.debug('parsing images and trajectories in database.')
    kapture_data.records_camera, kapture_data.trajectories = get_images_and_trajectories_from_database(
        db)

    if kapture_dir_path is not None and not skip_reconstruction:
        os.makedirs(kapture_dir_path, exist_ok=True)

        # keypoints
        logger.debug('parsing keypoints in database...')
        kapture_data.keypoints = get_keypoints_from_database(
            db, kapture_data.records_camera, kapture_dir_path, keypoint_name)

        # descriptors
        logger.debug('parsing descriptors in database...')
        kapture_data.descriptors = get_descriptors_from_database(
            db, kapture_data.records_camera, kapture_dir_path, descriptor_name)

        # matches
        logger.debug('parsing matches in database...')
        kapture_data.matches = get_matches_from_database(
            db, kapture_data.records_camera, kapture_dir_path,
            no_geometric_filtering)

    db.close()
    return kapture_data
def import_image_folder(
        images_path: str,
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports the images of a folder to a kapture. This creates only images and cameras.

    :param images_path: path to directory containing the images.
    :param kapture_path: path to kapture root directory.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path,
                                  force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()

    file_list = [
        os.path.relpath(os.path.join(dirpath, filename), images_path)
        for dirpath, dirs, filenames in os.walk(images_path)
        for filename in filenames
    ]
    file_list = sorted(file_list)

    logger.info('starting conversion...')
    for n, filename in enumerate(file_list):
        # test if file is a valid image
        try:
            # lazy load
            with Image.open(path.join(images_path, filename)) as im:
                width, height = im.size
                model_params = [width, height]
        except (OSError, PIL.UnidentifiedImageError):
            # It is not a valid image: skip it
            logger.info(f'Skipping invalid image file {filename}')
            continue

        camera_id = f'sensor{n}'
        images[(n, camera_id)] = path_secure(filename)  # don't forget windows
        cameras[camera_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA,
                                            model_params)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(images_path, kapture_path, filename_list,
                                     images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
Ejemplo n.º 10
0
def import_colmap_from_reconstruction_files(reconstruction_dirpath: str,
                                            kapture_dirpath: Optional[str],
                                            skip: Set[Type[Union[kapture.Keypoints,
                                                                 kapture.Points3d,
                                                                 kapture.Observations]]]
                                            ) -> kapture.Kapture:
    """
    Converts colmap reconstruction files to kapture data.
    If kapture_dirpath is given, keypoints files are created, and potentially their observations.

    :param reconstruction_dirpath:
    :param kapture_dirpath: path to kapture directory. Is used to store keypoints files.
                            If not given (None), keypoints are automatically skipped.
    :param skip: can skip independently : Keypoints, Points3d or Observations.
                Note that Points3d and Observations are in the same file, so you should skip both to gain its reading.
    :return: kapture object
    """
    logger.debug(f'loading colmap reconstruction from:\n\t"{reconstruction_dirpath}"')
    if skip:
        logger.debug(f'loading colmap reconstruction skipping {", ".join(s.__name__ for s in skip)}')

    kapture_data = kapture.Kapture()
    reconstruction_file_paths = (path.join(reconstruction_dirpath, filename)
                                 for filename in ['cameras.txt', 'images.txt', 'points3D.txt'])
    colmap_cameras_filepath, colmap_images_filepath, colmap_points3d_filepath = reconstruction_file_paths

    proceed_keypoints = kapture.Keypoints not in skip and kapture_dirpath is not None
    proceed_points3d = kapture.Points3d not in skip and path.exists(colmap_points3d_filepath)
    proceed_observations = kapture.Observations not in skip and path.exists(colmap_points3d_filepath)

    if path.exists(colmap_cameras_filepath):
        logging.debug(f'parsing cameras from:\n\t"{path.basename(colmap_cameras_filepath)}"')
        kapture_data.sensors = import_from_colmap_cameras_txt(colmap_cameras_filepath)

    if path.exists(colmap_images_filepath):
        logging.debug(f'loading images from:\n\t"{path.basename(colmap_images_filepath)}"')
        kapture_dirpath_for_keypoints = kapture_dirpath if proceed_keypoints else None
        images, trajectories, keypoints = import_from_colmap_images_txt(
            colmap_images_filepath, kapture_dirpath_for_keypoints)

        kapture_data.records_camera = images
        kapture_data.trajectories = trajectories
        kapture_data.keypoints = keypoints

    if proceed_points3d or proceed_observations:
        assert kapture_data.records_camera is not None
        image_id_2_names = {ts: image_name
                            for ts, cam_id, image_name in kapture.flatten(kapture_data.records_camera, True)}
        logger.debug(f'parsing 3d points and observations from:\n\t"{path.basename(colmap_points3d_filepath)}"')
        points3d, observations = import_from_colmap_points3d_txt(colmap_points3d_filepath, image_id_2_names)
        kapture_data.points3d = points3d if proceed_points3d else None
        kapture_data.observations = observations if proceed_observations else None

    return kapture_data
Ejemplo n.º 11
0
 def test_init(self):
     lidar0 = kapture.Sensor('lidar', [])
     cam0 = kapture.Sensor('camera', [])
     sensors = kapture.Sensors()
     sensors['cam0'] = cam0
     kapture_data = kapture.Kapture(sensors=sensors)
     self.assertEqual(sensors, kapture_data.sensors)
     self.assertEqual(sensors['cam0'], kapture_data.sensors['cam0'])
     # assign
     sensors = kapture_data.sensors
     self.assertIsInstance(sensors, kapture.Sensors)
     kapture_data.sensors = sensors
     kapture_data.sensors['lidar0'] = lidar0
Ejemplo n.º 12
0
    def test_init_camera(self):
        timestamp0, timestamp1 = 0, 1
        device_id0, device_id1 = 'cam0', 'cam1'
        record_cam0_image0 = 'cam0/image000.jpg'
        record_cam0_image1 = 'cam0/image001.jpg'
        record_cam1_image0 = 'cam1/image000.jpg'
        record_cam1_image1 = 'cam1/image001.jpg'
        # Test insertions
        records_camera = kapture.RecordsCamera()
        records_camera[timestamp0, device_id0] = record_cam0_image0
        kapture_data = kapture.Kapture(records_camera=records_camera)
        self.assertEqual(1, len(kapture_data.records_camera.keys()))
        self.assertEqual(1, len(kapture_data.records_camera.key_pairs()))
        self.assertIn(timestamp0, kapture_data.records_camera)
        self.assertIn(device_id0, kapture_data.records_camera[timestamp0])
        self.assertIn((timestamp0, device_id0), kapture_data.records_camera)
        self.assertEqual(record_cam0_image0,
                         kapture_data.records_camera[timestamp0, device_id0])
        records_camera[timestamp1, device_id0] = record_cam0_image1
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(2, len(kapture_data.records_camera.key_pairs()))
        kapture_data.records_camera[timestamp0][
            device_id1] = record_cam1_image0
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(3, len(kapture_data.records_camera.key_pairs()))
        records_camera[timestamp1][device_id1] = record_cam1_image1
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(4, len(kapture_data.records_camera.key_pairs()))
        self.assertEqual(record_cam0_image0,
                         kapture_data.records_camera[timestamp0, device_id0])
        self.assertEqual(record_cam1_image0,
                         kapture_data.records_camera[timestamp0, device_id1])
        self.assertEqual(record_cam0_image1,
                         kapture_data.records_camera[timestamp1, device_id0])
        self.assertEqual(record_cam1_image1,
                         kapture_data.records_camera[timestamp1, device_id1])

        self.assertNotIn((timestamp1, 'cam2'), kapture_data.records_camera)
        self.assertNotIn((2, device_id0), kapture_data.records_camera)
        self.assertEqual(kapture_data.records_camera.sensors_ids,
                         {device_id0, device_id1})

        # Test deletion
        del kapture_data.records_camera[(timestamp0, device_id0)]
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(3, len(kapture_data.records_camera.key_pairs()))
        del kapture_data.records_camera[(timestamp0, device_id1)]
        self.assertEqual(1, len(kapture_data.records_camera.keys()))
        self.assertEqual(2, len(kapture_data.records_camera.key_pairs()))
Ejemplo n.º 13
0
    def save_to_kapture(self, trajectory_rig_id: Optional[str] = None) -> None:
        """
        Save the data in kapture format.

        :param trajectory_rig_id: the rig identifier of the trajectory points
        """
        # Convert pose info to trajectories
        if len(self.poses_info) > 0 and trajectory_rig_id is None:
            raise ValueError("Must provide rig identifier for trajectory")
        trajectories = kapture.Trajectories() if len(
            self.poses_info) > 0 else None
        for pose_info in self.poses_info:
            t = pose_info.timestamp.to_nsec()
            ros_translation = pose_info.pose6d.position
            translation = [
                ros_translation.x, ros_translation.y, ros_translation.z
            ]
            ros_rotation = pose_info.pose6d.orientation
            rotation = np.quaternion(ros_rotation.w, ros_rotation.x,
                                     ros_rotation.y, ros_rotation.z)
            # Transform the pose from the ROS body coordinate system defined here
            # https://www.ros.org/reps/rep-0103.html#axis-orientation
            # to the Kapture coordinate system

            # ros pose seems to be the inverse of the extrinsic matrix
            # i.e world position and rig orientation with respect to the world axis
            pose6d = kapture.PoseTransform.compose([
                pose_kapture_from_ros,
                kapture.PoseTransform(rotation, translation).inverse(),
                pose_ros_from_kapture
            ])
            trajectories[(t, trajectory_rig_id)] = pose6d
        self.logger.info(f'Saving {len(list(flatten(trajectories)))} poses')
        # Convert image info to kapture image
        records_camera = kapture.RecordsCamera()
        for image_info in self.images_info:
            t = image_info.timestamp.to_nsec()
            records_camera[(t, image_info.camera_name)] = image_info.filename
        self.logger.info(
            f'Saving {len(list(flatten(records_camera)))} camera records')

        kapture_data = kapture.Kapture(rigs=self._rigs,
                                       sensors=self._sensors,
                                       records_camera=records_camera,
                                       trajectories=trajectories)
        self.logger.info(f'Saving to kapture {self._kapture_path}')
        kcsv.kapture_to_dir(self._kapture_path, kapture_data)
        self.logger.info('Done')
Ejemplo n.º 14
0
    def test_as_dict(self):
        kapture_data = kapture.Kapture()
        # test empty
        members = kapture_data.as_dict()
        self.assertEqual(members, {})
        members = kapture_data.as_dict(keep_none=True)
        self.assertEqual(len(members), 18)
        self.assertTrue(all(member is None for member in members.values()))

        # test sensors only
        kapture_data.sensors = kapture.Sensors({'cam0': kapture.Sensor('camera', [])})
        members = kapture_data.as_dict()
        self.assertEqual(len(members), 1)
        self.assertEqual(members, {'sensors': kapture_data.sensors})
        members = kapture_data.as_dict(keep_none=True)
        self.assertEqual(len(members), 18)
        self.assertEqual(members['sensors'], kapture_data.sensors)
        self.assertTrue(all(member is None for name, member in members.items() if name != 'sensors'))
Ejemplo n.º 15
0
def import_openmvg_sfm_data_json(
        sfm_data_json: Dict[str, Union[str, Dict]],
        kapture_images_path: str,
        image_action=TransferAction.skip) -> kapture.Kapture:
    """
    Imports an openMVG sfm_data json structure to a kapture object.
    Also copy, move or link the images files if necessary.

    :param sfm_data_json: the openmvg JSON parsed as a dictionary
    :param kapture_images_path: top directory to create the kapture images tree
    :param image_action: action to apply on images: link, copy, move or do nothing.
    :return: the constructed kapture object
    """

    data_root_path: str = ''

    if sfm_data_json[JSON_KEY.ROOT_PATH]:
        data_root_path = sfm_data_json[JSON_KEY.ROOT_PATH]
    elif image_action == TransferAction.skip:
        logger.warning('No root_path in sfm_data.')
    else:  # It is needed to execute an action with the image file
        raise ValueError(
            f"Missing root_path to do image action '{image_action.name}'")
    openmvg_images_dir = path.basename(data_root_path)

    # Imports all the data from the json file to kapture objects
    kapture_cameras = import_openmvg_cameras(sfm_data_json)
    device_identifiers = {int: str}  # Pose id -> device id
    timestamp_for_pose = {int: int}  # Pose id -> timestamp
    # Imports the images as records_camera, but also fill in the devices_identifiers and timestamp_for_pose dictionaries
    records_camera = import_openmvg_images(sfm_data_json, image_action,
                                           kapture_images_path,
                                           openmvg_images_dir, data_root_path,
                                           device_identifiers,
                                           timestamp_for_pose)
    trajectories = import_openmvg_trajectories(sfm_data_json,
                                               device_identifiers,
                                               timestamp_for_pose)

    kapture_data = kapture.Kapture(sensors=kapture_cameras,
                                   records_camera=records_camera,
                                   trajectories=trajectories)
    return kapture_data
def sub_kapture_from_img_list(kdata, kdata_path, img_list, pairs):
    trajectories = kapture.Trajectories()
    sensors = kapture.Sensors()
    records = kapture.RecordsCamera()
    keypoints = kapture.Keypoints(kdata.keypoints._tname,
                                  kdata.keypoints._dtype,
                                  kdata.keypoints._dsize)
    if kdata.descriptors != None:
        descriptors = kapture.Descriptors(kdata.descriptors._tname,
                                          kdata.descriptors._dtype,
                                          kdata.descriptors._dsize)
    else:
        descriptors = None
    matches = kapture.Matches()

    timestamp_sensor_id_from_image_name = {
        img_name: (timestamp, sensor_id)
        for timestamp, sensor_id, img_name in kapture.flatten(
            kdata.records_camera)
    }
    for img in img_list:
        timestamp, sensor_id = timestamp_sensor_id_from_image_name[img]
        pose = kdata.trajectories[timestamp][sensor_id]
        sensors[sensor_id] = kdata.sensors[sensor_id]
        records[timestamp, sensor_id] = img
        trajectories[timestamp, sensor_id] = pose
        keypoints.add(img)
        if kdata.descriptors != None:
            descriptors.add(img)

    for i in pairs:
        image_matches_filepath = get_matches_fullpath((i[0], i[1]), kdata_path)
        if os.path.exists(image_matches_filepath):
            matches.add(i[0], i[1])
    matches.normalize()

    return kapture.Kapture(sensors=sensors,
                           trajectories=trajectories,
                           records_camera=records,
                           descriptors=descriptors,
                           keypoints=keypoints,
                           matches=matches)
def sub_kapture_from_img_list(kdata, img_list, pairs, keypoints_type, descriptors_type):
    trajectories = kapture.Trajectories()
    sensors = kapture.Sensors()
    records = kapture.RecordsCamera()
    keypoints = kapture.Keypoints(kdata.keypoints[keypoints_type].type_name,
                                  kdata.keypoints[keypoints_type].dtype,
                                  kdata.keypoints[keypoints_type].dsize)
    if kdata.descriptors is not None and descriptors_type in kdata.descriptors:
        descriptors = kapture.Descriptors(kdata.descriptors[descriptors_type].type_name,
                                          kdata.descriptors[descriptors_type].dtype,
                                          kdata.descriptors[descriptors_type].dsize,
                                          kdata.descriptors[descriptors_type].keypoints_type,
                                          kdata.descriptors[descriptors_type].metric_type)
    else:
        descriptors = None
    matches = kapture.Matches()

    timestamp_sensor_id_from_image_name = {img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in
                                           kapture.flatten(kdata.records_camera)}
    for img in img_list:
        timestamp, sensor_id = timestamp_sensor_id_from_image_name[img]
        sensors[sensor_id] = kdata.sensors[sensor_id]
        records[timestamp, sensor_id] = img
        if (timestamp, sensor_id) in kdata.trajectories:
            pose = kdata.trajectories[timestamp][sensor_id]
            trajectories[timestamp, sensor_id] = pose
        keypoints.add(img)
        if kdata.descriptors is not None:
            descriptors.add(img)

    for i in pairs:
        if i in kdata.matches[keypoints_type]:
            matches.add(i[0], i[1])
    matches.normalize()

    return kapture.Kapture(sensors=sensors, trajectories=trajectories, records_camera=records,
                           descriptors={descriptors_type: descriptors},
                           keypoints={keypoints_type: keypoints},
                           matches={keypoints_type: matches})
Ejemplo n.º 18
0
    def __init__(self,
                 path,
                 reset=False,
                 img_format=IMG_FORMAT_JPG,
                 min_pt3d_obs=3,
                 min_pt3d_ratio=0.2,
                 jpg_qlt=95,
                 scale=1.0):
        self.path = path
        self.default_cam = ('1', self._IMAGE_FOLDER)
        self.default_kp_type = 'gftt'
        self.img_format = img_format
        self.min_pt3d_obs = min_pt3d_obs
        self.min_pt3d_ratio = min_pt3d_ratio
        self.jpg_qlt = jpg_qlt
        self.scale = scale

        if os.path.exists(self.path):
            self.kapture = kapture_from_dir(self.path)
            if reset:
                shutil.rmtree(self.path)

        if not os.path.exists(self.path):
            self.kapture = kt.Kapture()
Ejemplo n.º 19
0
def pycolmap_localize_from_loaded_data(
        kapture_data: kapture.Kapture, kapture_path: str,
        tar_handlers: TarCollection, kapture_query_data: kapture.Kapture,
        output_path: str, pairsfile_path: str, max_error: float,
        min_inlier_ratio: float, min_num_iterations: int,
        max_num_iterations: int, confidence: float,
        keypoints_type: Optional[str],
        duplicate_strategy: DuplicateCorrespondencesStrategy,
        rerank_strategy: RerankCorrespondencesStrategy,
        write_detailed_report: bool, force: bool) -> None:
    """
    Localize images using pycolmap.

    :param kapture_data: loaded kapture data (incl. points3d)
    :param kapture_path: path to the kapture to use
    :param tar_handlers: collection of pre-opened tar archives
    :param kapture_data: loaded kapture data (mapping and query images)
    :param output_path: path to the write the localization results
    :param pairsfile_path: pairs to use
    :param max_error: RANSAC inlier threshold in pixel
    :param min_inlier_ratio: abs_pose_options.ransac_options.min_inlier_ratio
    :param min_num_iterations: abs_pose_options.ransac_options.min_num_trials
    :param max_num_iterations: abs_pose_options.ransac_options.max_num_trials
    :param confidence: abs_pose_options.ransac_options.confidence
    :param keypoints_type: types of keypoints (and observations) to use
    :param duplicate_strategy: strategy to handle duplicate correspondences (either kpt_id and/or pt3d_id)
    :param rerank_strategy: strategy to reorder pairs before handling duplicate correspondences
    :param write_detailed_report: if True, write a json file with inliers, reprojection error for each query
    :param force: Silently overwrite kapture files if already exists
    """
    assert has_pycolmap
    if not (kapture_data.records_camera and kapture_data.sensors
            and kapture_data.keypoints and kapture_data.matches
            and kapture_data.points3d and kapture_data.observations):
        raise ValueError('records_camera, sensors, keypoints, matches, '
                         'points3d, observations are mandatory for map+query')

    if not (kapture_query_data.records_camera and kapture_query_data.sensors):
        raise ValueError('records_camera, sensors are mandatory for query')

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(
            kapture_data.keypoints)
    assert keypoints_type is not None
    assert keypoints_type in kapture_data.keypoints
    assert keypoints_type in kapture_data.matches

    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    if kapture_query_data.trajectories is not None:
        logger.warning(
            "Input query data contains trajectories: they will be ignored")
        kapture_query_data.trajectories.clear()

    os.umask(0o002)
    os.makedirs(output_path, exist_ok=True)
    delete_existing_kapture_files(output_path, force_erase=force)

    # load pairsfile
    pairs = {}
    with open(pairsfile_path, 'r') as fid:
        table = kapture.io.csv.table_from_file(fid)
        for img_query, img_map, _ in table:
            if img_query not in pairs:
                pairs[img_query] = []
            pairs[img_query].append(img_map)

    kapture_data.matches[keypoints_type].normalize()
    keypoints_filepaths = keypoints_to_filepaths(
        kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path,
        tar_handlers)
    obs_for_keypoints_type = {
        point_id: per_keypoints_type_subdict[keypoints_type]
        for point_id, per_keypoints_type_subdict in
        kapture_data.observations.items()
        if keypoints_type in per_keypoints_type_subdict
    }
    point_id_from_obs = {
        (img_name, kp_id): point_id
        for point_id in obs_for_keypoints_type.keys()
        for img_name, kp_id in obs_for_keypoints_type[point_id]
    }
    query_images = [(timestamp, sensor_id, image_name)
                    for timestamp, sensor_id, image_name in kapture.flatten(
                        kapture_query_data.records_camera)]

    # kapture for localized images + pose
    trajectories = kapture.Trajectories()
    for timestamp, sensor_id, image_name in tqdm(
            query_images,
            disable=logging.getLogger().level >= logging.CRITICAL):
        if image_name not in pairs:
            continue
        # N number of correspondences
        # points2D - Nx2 array with pixel coordinates
        # points3D - Nx3 array with world coordinates
        points2D = []
        points3D = []
        keypoints_filepath = keypoints_filepaths[image_name]
        kapture_keypoints_query = image_keypoints_from_file(
            filepath=keypoints_filepath,
            dsize=kapture_data.keypoints[keypoints_type].dsize,
            dtype=kapture_data.keypoints[keypoints_type].dtype)
        query_cam = kapture_query_data.sensors[sensor_id]
        assert isinstance(query_cam, kapture.Camera)

        col_cam_id, width, height, params, _ = get_colmap_camera(query_cam)
        cfg = {
            'model': CAMERA_MODEL_NAME_ID[col_cam_id][0],
            'width': int(width),
            'height': int(height),
            'params': params
        }

        points2D, _, points3D, stats = get_correspondences(
            kapture_data, keypoints_type, kapture_path, tar_handlers,
            image_name, pairs[image_name], point_id_from_obs,
            kapture_keypoints_query, None, duplicate_strategy, rerank_strategy)

        # compute absolute pose
        # inlier_threshold - RANSAC inlier threshold in pixels
        # answer - dictionary containing the RANSAC output
        ret = pycolmap.absolute_pose_estimation(points2D, points3D, cfg,
                                                max_error, min_inlier_ratio,
                                                min_num_iterations,
                                                max_num_iterations, confidence)
        # add pose to output kapture
        if ret['success'] and ret['num_inliers'] > 0:
            pose = kapture.PoseTransform(ret['qvec'], ret['tvec'])
            if write_detailed_report:
                num_2dpoints = len(points2D)
                points2D_final, K, distortion = get_camera_matrix_from_kapture(
                    np.array(points2D, dtype=np.float), query_cam)
                points2D_final = list(points2D_final.reshape(
                    (num_2dpoints, 2)))
                inliers = np.where(ret['inliers'])[0].tolist()
                reprojection_error = compute_reprojection_error(
                    pose, ret['num_inliers'], inliers, points2D_final,
                    points3D, K, distortion)
                cache = {
                    "num_correspondences": len(points3D),
                    "num_inliers": inliers,
                    "inliers": ret['inliers'],
                    "reprojection_error": reprojection_error,
                    "stats": stats
                }
                cache_path = os.path.join(output_path,
                                          f'pycolmap_cache/{image_name}.json')
                save_to_json(cache, cache_path)
            trajectories[timestamp, sensor_id] = pose

    kapture_data_localized = kapture.Kapture(
        sensors=kapture_query_data.sensors,
        trajectories=trajectories,
        records_camera=kapture_query_data.records_camera,
        rigs=kapture_query_data.rigs)
    kapture.io.csv.kapture_to_dir(output_path, kapture_data_localized)
def import_robotcar_seasons(
        robotcar_path: str,
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip,
        skip_reconstruction: bool = False,
        rig_collapse: bool = False,
        use_colmap_intrinsics: bool = False,
        import_v1: bool = False) -> None:
    """
    Read the RobotCar Seasons data, creates several kaptures with training and query data.
    :param robotcar_path: path to the robotcar top directory
    :param kapture_path: path to the kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param skip_reconstruction: if True, will skip the reconstruction part from the training data
    :param rig_collapse: if True, will collapse the rig
    """

    kapture_path = path.join(kapture_path, "base")
    os.makedirs(kapture_path, exist_ok=True)

    cameras = import_robotcar_cameras(path.join(robotcar_path, 'intrinsics'))
    rigs = import_robotcar_rig(path.join(robotcar_path, 'extrinsics'))

    logger.info("Importing test data")
    # Test data
    image_pattern = re.compile(
        r'(?P<condition>.+)/(?P<camera>\w+)/(?P<timestamp>\d+)\.jpg')
    queries_path = path.join(robotcar_path, '3D-models', 'individual',
                             'queries_per_location')
    kapture_imported_query = {}
    for root, dirs, files in os.walk(queries_path):
        for query_file in files:
            records_camera = kapture.RecordsCamera()
            # Get list of query images
            with open(path.join(queries_path, query_file)) as f:
                for line in f:
                    matches = image_pattern.match(line)
                    image_path = line.strip()
                    if not matches:
                        logger.warning(f"Error matching line in {image_path}")
                        continue
                    matches = matches.groupdict()
                    timestamp = int(matches['timestamp'])
                    camera = str(matches['camera'])
                    condition = str(matches['condition'])
                    records_camera[timestamp, camera] = image_path

                (query_name, _) = query_file.split('.')
                kapture_test = kapture.Kapture(sensors=cameras,
                                               rigs=rigs,
                                               records_camera=records_camera)
                kapture_imported_query[int(
                    query_name.split('_')[-1])] = kapture_test

    # Reference map data
    logger.info("Importing reference map")
    colmap_reconstructions_path = path.join(robotcar_path, '3D-models',
                                            'individual',
                                            'colmap_reconstructions')
    kapture_imported_mapping = {}
    for root, dirs, files in os.walk(colmap_reconstructions_path):
        for colmap_reconstruction in dirs:
            (loc_id, _) = colmap_reconstruction.split('_')
            kapture_reconstruction_dir = path.join(kapture_path,
                                                   f"{int(loc_id):02d}",
                                                   "mapping")
            delete_existing_kapture_files(kapture_reconstruction_dir,
                                          force_erase=force_overwrite_existing)
            logger.info(f'Converting reconstruction {loc_id} to kapture  ...')
            kapture_reconstruction_data = import_robotcar_colmap_location(
                robotcar_path,
                path.join(colmap_reconstructions_path, colmap_reconstruction),
                kapture_reconstruction_dir, rigs, skip_reconstruction)
            # replace intrinsics with the ones found in the text files
            if not use_colmap_intrinsics:
                kapture_reconstruction_data.sensors = cameras
            kapture_imported_mapping[int(loc_id)] = kapture_reconstruction_data

    if not import_v1:
        queries_per_location = {
            image_name: (ts, cam_id, loc_id)
            for loc_id, kdata_test in kapture_imported_query.items() for ts,
            cam_id, image_name in kapture.flatten(kdata_test.records_camera)
        }
        kapture_imported_training = {}  # stores kapture for each submap
        # read robotcar_v2_train.txt
        v2_train_data = read_robotcar_v2_train(robotcar_path)
        for image_name, pose in v2_train_data.items():
            ts, cam_id, loc_id = queries_per_location[image_name]
            assert cam_id == 'rear'
            # create kapture object for submap if it doesn't exist
            if loc_id not in kapture_imported_training:
                kapture_loc_id = kapture.Kapture(sensors=cameras, rigs=rigs)
                kapture_loc_id.records_camera = kapture.RecordsCamera()
                kapture_loc_id.trajectories = kapture.Trajectories()
                kapture_imported_training[loc_id] = kapture_loc_id
            kapture_imported_training[loc_id].records_camera[
                ts, cam_id] = image_name
            kapture_imported_training[loc_id].trajectories[ts, cam_id] = pose
            matches = image_pattern.match(image_name)
            if not matches:
                logger.warning(f"Error matching line in {image_name}")
                continue
            matches = matches.groupdict()
            condition = str(matches['condition'])
            timestamp = str(matches['timestamp'])
            camera = str(matches['camera'])
            # added left and right images in records_camera
            left_image_name = condition + '/' + 'left' + '/' + timestamp + '.jpg'
            right_image_name = condition + '/' + 'right' + '/' + timestamp + '.jpg'
            kapture_imported_training[loc_id].records_camera[
                ts, 'left'] = left_image_name
            kapture_imported_training[loc_id].records_camera[
                ts, 'right'] = right_image_name

            # remove entries from query
            del kapture_imported_query[loc_id].records_camera[ts][cam_id]
            del kapture_imported_query[loc_id].records_camera[ts]['left']
            del kapture_imported_query[loc_id].records_camera[ts]['right']
            del kapture_imported_query[loc_id].records_camera[ts]

        # all remaining query images are kept; reading robotcar_v2_test.txt is not necessary

    # apply rig collapse
    if rig_collapse:
        logger.info('replacing camera poses with rig poses.')
        for kdata_mapping in kapture_imported_mapping.values():
            kapture.rigs_recover_inplace(kdata_mapping.trajectories, rigs,
                                         'rear')
        for kdata_training in kapture_imported_training.values():
            kapture.rigs_recover_inplace(kdata_training.trajectories, rigs,
                                         'rear')

    # IO operations
    robotcar_image_path = path.join(robotcar_path, "images")
    for loc_id, kdata_query in kapture_imported_query.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing test data: {loc_id_str}')
        kapture_test_dir = path.join(kapture_path, loc_id_str, "query")
        delete_existing_kapture_files(kapture_test_dir,
                                      force_erase=force_overwrite_existing)
        if not kdata_query.records_camera:  # all images were removed
            continue
        kapture_to_dir(kapture_test_dir, kdata_query)
        query_images = [
            f for _, _, f in kapture.flatten(kdata_query.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path, kapture_test_dir,
                                         query_images, images_import_method)

    for loc_id, kdata_mapping in kapture_imported_mapping.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing mapping data: {loc_id_str}')
        kapture_reconstruction_dir = path.join(kapture_path, f"{loc_id:02d}",
                                               "mapping")
        delete_existing_kapture_files(kapture_reconstruction_dir,
                                      force_erase=force_overwrite_existing)
        kapture_to_dir(kapture_reconstruction_dir, kdata_mapping)
        mapping_images = [
            f for _, _, f in kapture.flatten(kdata_mapping.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_reconstruction_dir,
                                         mapping_images, images_import_method)

    for loc_id, kdata_training in kapture_imported_training.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing training data: {loc_id_str}')
        kapture_training_dir = path.join(kapture_path, f"{loc_id:02d}",
                                         "training")
        delete_existing_kapture_files(kapture_training_dir,
                                      force_erase=force_overwrite_existing)
        kapture_to_dir(kapture_training_dir, kdata_training)
        mapping_images = [
            f for _, _, f in kapture.flatten(kdata_training.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_training_dir, mapping_images,
                                         images_import_method)
Ejemplo n.º 21
0
def merge_keep_ids(
        kapture_list: List[
            kapture.
            Kapture],  # noqa: C901: function a bit long but not too complex
        skip_list: List[Type],
        data_paths: List[str],
        kapture_path: str,
        images_import_method: TransferAction) -> kapture.Kapture:
    """
    Merge multiple kapture while keeping ids (sensor_id) identical in merged and inputs.

    :param kapture_list: list of kapture to merge.
    :param skip_list: optional types not to merge. sensors and rigs are unskippable
    :param data_paths: list of path to root path directory in same order as mentioned in kapture_list.
    :param kapture_path: directory root path to the merged kapture.
    :param images_import_method: method to transfer image files
    :return: merged kapture
    """
    merged_kapture = kapture.Kapture()

    # get the union of all sensors
    new_sensors = merge_sensors(
        [every_kapture.sensors for every_kapture in kapture_list])
    # if merge_sensors returned an empty object, keep merged_kapture.sensors to None
    merged_kapture.sensors = get_new_if_not_empty(new_sensors,
                                                  merged_kapture.sensors)

    # get the union of all rigs
    new_rigs = merge_rigs(
        [every_kapture.rigs for every_kapture in kapture_list])
    # if merge_rigs returned an empty object, keep merged_kapture.rigs to None
    merged_kapture.rigs = get_new_if_not_empty(new_rigs, merged_kapture.rigs)

    # all fields below can be skipped with skip_list
    # we do not assign the properties when the merge evaluate to false, we keep it as None
    if kapture.Trajectories not in skip_list:
        new_trajectories = merge_trajectories(
            [every_kapture.trajectories for every_kapture in kapture_list])
        merged_kapture.trajectories = get_new_if_not_empty(
            new_trajectories, merged_kapture.trajectories)

    if kapture.RecordsCamera not in skip_list:
        new_records_camera = merge_records_camera(
            [every_kapture.records_camera for every_kapture in kapture_list])
        merged_kapture.records_camera = get_new_if_not_empty(
            new_records_camera, merged_kapture.records_camera)

        merge_records_data(
            [[
                image_name for _, _, image_name in kapture.flatten(
                    every_kapture.records_camera)
            ] if every_kapture.records_camera is not None else []
             for every_kapture in kapture_list], [
                 get_image_fullpath(data_path, image_filename=None)
                 for data_path in data_paths
             ], kapture_path, images_import_method)

    if kapture.RecordsLidar not in skip_list:
        new_records_lidar = merge_records_lidar(
            [every_kapture.records_lidar for every_kapture in kapture_list])
        merged_kapture.records_lidar = get_new_if_not_empty(
            new_records_lidar, merged_kapture.records_lidar)
    if kapture.RecordsWifi not in skip_list:
        new_records_wifi = merge_records_wifi(
            [every_kapture.records_wifi for every_kapture in kapture_list])
        merged_kapture.records_wifi = get_new_if_not_empty(
            new_records_wifi, merged_kapture.records_wifi)
    if kapture.RecordsBluetooth not in skip_list:
        new_records_bluetooth = merge_records_bluetooth([
            every_kapture.records_bluetooth for every_kapture in kapture_list
        ])
        merged_kapture.records_bluetooth = get_new_if_not_empty(
            new_records_bluetooth, merged_kapture.records_bluetooth)
    if kapture.RecordsGnss not in skip_list:
        new_records_gnss = merge_records_gnss(
            [every_kapture.records_gnss for every_kapture in kapture_list])
        merged_kapture.records_gnss = get_new_if_not_empty(
            new_records_gnss, merged_kapture.records_gnss)
    if kapture.RecordsAccelerometer not in skip_list:
        new_records_accelerometer = merge_records_accelerometer([
            every_kapture.records_accelerometer
            for every_kapture in kapture_list
        ])
        merged_kapture.records_accelerometer = get_new_if_not_empty(
            new_records_accelerometer, merged_kapture.records_accelerometer)
    if kapture.RecordsGyroscope not in skip_list:
        new_records_gyroscope = merge_records_gyroscope([
            every_kapture.records_gyroscope for every_kapture in kapture_list
        ])
        merged_kapture.records_gyroscope = get_new_if_not_empty(
            new_records_gyroscope, merged_kapture.records_gyroscope)
    if kapture.RecordsMagnetic not in skip_list:
        new_records_magnetic = merge_records_magnetic(
            [every_kapture.records_magnetic for every_kapture in kapture_list])
        merged_kapture.records_magnetic = get_new_if_not_empty(
            new_records_magnetic, merged_kapture.records_magnetic)

    # for the reconstruction, except points and observations, the files are copied with shutil.copy
    # if kapture_path evaluates to False, all copies will be skipped (but classes will be filled normally)
    if kapture.Keypoints not in skip_list:
        keypoints = [every_kapture.keypoints for every_kapture in kapture_list]
        keypoints_not_none = [k for k in keypoints if k is not None]
        if len(keypoints_not_none) > 0:
            new_keypoints = merge_keypoints(keypoints, data_paths,
                                            kapture_path)
            merged_kapture.keypoints = get_new_if_not_empty(
                new_keypoints, merged_kapture.keypoints)
    if kapture.Descriptors not in skip_list:
        descriptors = [
            every_kapture.descriptors for every_kapture in kapture_list
        ]
        descriptors_not_none = [k for k in descriptors if k is not None]
        if len(descriptors_not_none) > 0:
            new_descriptors = merge_descriptors(descriptors, data_paths,
                                                kapture_path)
            merged_kapture.descriptors = get_new_if_not_empty(
                new_descriptors, merged_kapture.descriptors)
    if kapture.GlobalFeatures not in skip_list:
        global_features = [
            every_kapture.global_features for every_kapture in kapture_list
        ]
        global_features_not_none = [
            k for k in global_features if k is not None
        ]
        if len(global_features_not_none) > 0:
            new_global_features = merge_global_features(
                global_features, data_paths, kapture_path)
            merged_kapture.global_features = get_new_if_not_empty(
                new_global_features, merged_kapture.global_features)
    if kapture.Matches not in skip_list:
        matches = [every_kapture.matches for every_kapture in kapture_list]
        new_matches = merge_matches(matches, data_paths, kapture_path)
        merged_kapture.matches = get_new_if_not_empty(new_matches,
                                                      merged_kapture.matches)

    if kapture.Points3d not in skip_list and kapture.Observations not in skip_list:
        points_and_obs = [(every_kapture.points3d, every_kapture.observations)
                          for every_kapture in kapture_list]
        new_points, new_observations = merge_points3d_and_observations(
            points_and_obs)
        merged_kapture.points3d = get_new_if_not_empty(new_points,
                                                       merged_kapture.points3d)
        merged_kapture.observations = get_new_if_not_empty(
            new_observations, merged_kapture.observations)
    elif kapture.Points3d not in skip_list:
        points = [every_kapture.points3d for every_kapture in kapture_list]
        new_points = merge_points3d(points)
        merged_kapture.points3d = get_new_if_not_empty(new_points,
                                                       merged_kapture.points3d)
    return merged_kapture
Ejemplo n.º 22
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):
            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename
        kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg'  # kapture depth files are not png
        depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).    
    ----
    We use the extr. kinect camera parameters from https://projet.liris.cnrs.fr/voir/activities-dataset/kinect-calibration.html. 
    """
    sensors = kapture.Sensors()
    # camera_type = kapture.CameraType.OPENCV
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02,
    #                  2.5673002693536984e-01, -9.3976085633794137e-01, -1.8605549188751580e-03, -2.2232238578189420e-03]  # w, h, f, cx, cy, k1, k2, p1, p2, k3
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02]  # w, h, fx, fy, cx, cy
    camera_params = [640, 480, 525, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    # depth_camera_type = kapture.CameraType.OPENCV
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02,
    #                        -1.8932947734719333e-01, 1.1358015104098631e+00, -4.4260345347128536e-03, -5.4869578635708153e-03, -2.2460143607712921e+00] # w, h, f, cx, cy, k1, k2, p1, p2, k3
    depth_camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02] # w, h, fx, fy, cx, cy
    depth_camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=depth_camera_params,
        sensor_type='depth'
    )
    sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera(
        name=REG_DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    R = np.array([[9.9996518012567637e-01, 2.6765126468950343e-03, -7.9041012313000904e-03],
                  [-2.7409311281316700e-03, 9.9996302803027592e-01, -8.1504520778013286e-03],
                  [7.8819942130445332e-03, 8.1718328771890631e-03, 9.9993554558014031e-01]])
    T = np.array([-2.5558943178152542e-02, 1.0109636268061706e-04, 2.0318321729487039e-03])
    Rt = np.vstack((np.hstack((R, T.reshape(3, 1))), np.array([0, 0, 0, 1])))
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        if '.reg' in depth_map_filename:
            continue
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)
        # register depth to rgb
        reg_depth_map = register_depth(get_K(depth_camera_type, depth_camera_params), get_K(camera_type, camera_params),
                                       Rt, depth_map, camera_params[0], camera_params[1])
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
def run_colmap_gv_from_loaded_data(kapture_none_matches: kapture.Kapture,
                                   kapture_colmap_matches: kapture.Kapture,
                                   kapture_none_matches_dirpath: str,
                                   kapture_colmap_matches_dirpath: str,
                                   tar_handlers_none_matches: Optional[TarCollection],
                                   tar_handlers_colmap_matches: Optional[TarCollection],
                                   colmap_binary: str,
                                   keypoints_type: Optional[str],
                                   skip_list: List[str],
                                   force: bool):
    logger.info('run_colmap_gv...')
    if not (kapture_none_matches.records_camera and kapture_none_matches.sensors and
            kapture_none_matches.keypoints and kapture_none_matches.matches):
        raise ValueError('records_camera, sensors, keypoints, matches are mandatory')

    # COLMAP does not fully support rigs.
    if kapture_none_matches.rigs is not None and kapture_none_matches.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_none_matches.trajectories, kapture_none_matches.rigs)

    # Set fixed name for COLMAP database
    colmap_db_path = os.path.join(kapture_colmap_matches_dirpath, 'colmap.db')
    if 'delete_existing' not in skip_list:
        safe_remove_file(colmap_db_path, force)

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(kapture_none_matches.matches)
    assert keypoints_type is not None
    assert keypoints_type in kapture_none_matches.keypoints
    assert keypoints_type in kapture_none_matches.matches

    if 'matches_importer' not in skip_list:
        logger.debug('compute matches difference.')
        if kapture_colmap_matches.matches is not None and keypoints_type in kapture_colmap_matches.matches:
            colmap_matches = kapture_colmap_matches.matches[keypoints_type]
        else:
            colmap_matches = kapture.Matches()
        matches_to_verify = kapture.Matches(kapture_none_matches.matches[keypoints_type].difference(colmap_matches))
        kapture_data_to_export = kapture.Kapture(sensors=kapture_none_matches.sensors,
                                                 trajectories=kapture_none_matches.trajectories,
                                                 records_camera=kapture_none_matches.records_camera,
                                                 keypoints={
                                                     keypoints_type: kapture_none_matches.keypoints[keypoints_type]
                                                 },
                                                 matches={
                                                     keypoints_type: matches_to_verify
                                                 })
        # creates a new database with matches
        logger.debug('export matches difference to db.')
        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        database_extra.kapture_to_colmap(kapture_data_to_export, kapture_none_matches_dirpath,
                                         tar_handlers_none_matches,
                                         colmap_db,
                                         keypoints_type,
                                         None,
                                         export_two_view_geometry=False)
        # close db before running colmap processes in order to avoid locks
        colmap_db.close()

        logger.debug('run matches_importer command.')
        colmap_lib.run_matches_importer_from_kapture_matches(
            colmap_binary,
            colmap_use_cpu=True,
            colmap_gpu_index=None,
            colmap_db_path=colmap_db_path,
            kapture_matches=matches_to_verify,
            force=force
        )

    if 'import' not in skip_list:
        logger.debug('import verified matches.')
        os.umask(0o002)
        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        kapture_data = kapture.Kapture()
        kapture_data.records_camera, _ = get_images_and_trajectories_from_database(colmap_db)
        kapture_data.matches = {
            keypoints_type: get_matches_from_database(colmap_db, kapture_data.records_camera,
                                                      kapture_colmap_matches_dirpath,
                                                      tar_handlers_colmap_matches,
                                                      keypoints_type,
                                                      no_geometric_filtering=False)
        }
        colmap_db.close()

        if kapture_colmap_matches.matches is None:
            kapture_colmap_matches.matches = {}
        if keypoints_type not in kapture_colmap_matches.matches:
            kapture_colmap_matches.matches[keypoints_type] = kapture.Matches()
        kapture_colmap_matches.matches[keypoints_type].update(kapture_data.matches[keypoints_type])

    if 'delete_db' not in skip_list:
        logger.debug('delete intermediate colmap db.')
        os.remove(colmap_db_path)
Ejemplo n.º 24
0
def import_extended_cmu_seasons(
        cmu_path: str,
        top_kaptures_path: str,
        slice_range: List[int],
        import_all_files: bool = False,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Import extended CMU data to kapture. Will make training and query kaptures for every CMU slice.

    :param cmu_path: path to the top directory of the CMU dataset files
    :param top_kaptures_path: top directory for the kaptures to create
    :param slice_range: range of CMU slices to import
    :param import_all_files: if Tre, will import all files
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """

    os.makedirs(top_kaptures_path, exist_ok=True)

    cameras = import_extended_cmu_seasons_intrinsics(
        path.join(cmu_path, 'intrinsics.txt'))

    for slice_n in slice_range:
        # prepare paths
        slice_path = os.path.join(cmu_path, f'slice{slice_n}')
        training_images_path = os.path.join(slice_path, 'database')
        query_images_path = os.path.join(slice_path, 'query')
        gt_trajectories_path = os.path.join(
            slice_path, f'ground-truth-database-images-slice{slice_n}.txt')
        query_image_list = os.path.join(slice_path,
                                        f'test-images-slice{slice_n}.txt')
        query_gt_path = os.path.join(slice_path, 'camera-poses')
        query_gt_list = [
            os.path.join(query_gt_path, f) for f in os.listdir(query_gt_path)
        ]

        # Import training images
        kapture_training_path = path.join(top_kaptures_path, f'slice{slice_n}',
                                          "mapping")
        delete_existing_kapture_files(kapture_training_path,
                                      force_overwrite_existing)
        training_records_camera, training_trajectories = import_extended_cmu_seasons_images(
            gt_trajectories_path)
        training_kapture = kapture.Kapture(
            sensors=cameras,
            records_camera=training_records_camera,
            trajectories=training_trajectories)
        if import_all_files:
            _add_images_from_folder(training_images_path, training_kapture)
        kapture_to_dir(kapture_training_path, training_kapture)
        # finally import images
        if images_import_method != TransferAction.skip:
            filename_list = [
                f
                for _, _, f in kapture.flatten(training_kapture.records_camera)
            ]
            logger.info(f'importing {len(filename_list)} image files ...')
            import_record_data_from_dir_auto(training_images_path,
                                             kapture_training_path,
                                             filename_list,
                                             images_import_method)
        # Import query images
        kapture_query_path = path.join(top_kaptures_path, f'slice{slice_n}',
                                       "query")
        delete_existing_kapture_files(kapture_query_path,
                                      force_erase=force_overwrite_existing)
        query_records_camera, query_trajectories = import_extended_cmu_seasons_images(
            query_image_list)
        query_kapture = kapture.Kapture(sensors=cameras,
                                        records_camera=query_records_camera,
                                        trajectories=query_trajectories)

        # import query gt when possible
        query_gt_kapture = []
        for query_gt_path in query_gt_list:
            query_gt_records_camera, query_gt_trajectories = import_extended_cmu_seasons_images(
                query_gt_path)
            query_gt_kapture.append(
                kapture.Kapture(sensors=cameras,
                                records_camera=query_gt_records_camera,
                                trajectories=query_gt_trajectories))
        data_to_merge = [query_kapture] + query_gt_kapture
        query_kapture = merge_keep_ids(
            data_to_merge,
            skip_list=[],
            data_paths=["" for _ in range(len(data_to_merge))],
            kapture_path="",
            images_import_method=TransferAction.skip)
        if import_all_files:
            _add_images_from_folder(query_images_path, query_kapture)
        kapture_to_dir(kapture_query_path, query_kapture)
        # finally import images
        if images_import_method != TransferAction.skip:
            filename_list = [
                f for _, _, f in kapture.flatten(query_kapture.records_camera)
            ]
            logger.info(f'importing {len(filename_list)} image files ...')
            import_record_data_from_dir_auto(query_images_path,
                                             kapture_query_path, filename_list,
                                             images_import_method)
Ejemplo n.º 25
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):

            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).
    """
    sensors = kapture.Sensors()
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
Ejemplo n.º 26
0
def import_robotcar_seasons(
        robotcar_path:
    str,  # noqa: C901: function a bit long but not too complex
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip,
        import_feature_db: bool = False,
        skip_reconstruction: bool = False,
        rig_collapse: bool = False,
        use_colmap_intrinsics: bool = False,
        import_v1: bool = False) -> None:
    """
    Read the RobotCar Seasons data, creates several kaptures with training and query data.

    :param robotcar_path: path to the robotcar top directory
    :param kapture_path: path to the kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param import_feature_db: if True, will import the features from the database
    :param skip_reconstruction: if True, will skip the reconstruction part from the training data
    :param rig_collapse: if True, will collapse the rig
    :param use_colmap_intrinsics: if True, will use the colmap intrinsics
    :param import_v1: if True, will use the version 1 of the format
    """

    os.makedirs(kapture_path, exist_ok=True)

    cameras = import_robotcar_cameras(path.join(robotcar_path, 'intrinsics'))
    rigs = import_robotcar_rig(path.join(robotcar_path, 'extrinsics'))

    logger.info("Importing test data")
    # Test data
    image_pattern = re.compile(
        r'(?P<condition>.+)/(?P<camera>\w+)/(?P<timestamp>\d+)\.jpg')
    queries_path = path.join(robotcar_path, '3D-models', 'individual',
                             'queries_per_location')
    kapture_imported_query = {}
    for root, dirs, files in os.walk(queries_path):
        for query_file in files:
            records_camera = kapture.RecordsCamera()
            # Get list of query images
            with open(path.join(queries_path, query_file)) as f:
                for line in f:
                    matches = image_pattern.match(line)
                    image_path = line.strip()
                    if not matches:
                        logger.warning(f"Error matching line in {image_path}")
                        continue
                    matches = matches.groupdict()
                    timestamp = int(matches['timestamp'])
                    camera = str(matches['camera'])
                    # condition = str(matches['condition']) : not used ?
                    records_camera[timestamp, camera] = image_path

                (query_name, _) = query_file.split('.')
                kapture_test = kapture.Kapture(sensors=cameras,
                                               rigs=rigs,
                                               records_camera=records_camera)
                kapture_imported_query[int(
                    query_name.split('_')[-1])] = kapture_test

    # Training data
    logger.info("Importing training data")
    colmap_reconstructions_path = path.join(robotcar_path, '3D-models',
                                            'individual',
                                            'colmap_reconstructions')
    kapture_imported_training = {}
    for root, dirs, files in os.walk(colmap_reconstructions_path):
        for colmap_reconstruction in dirs:
            (loc_id, _) = colmap_reconstruction.split('_')
            kapture_reconstruction_dir = path.join(kapture_path,
                                                   f"{int(loc_id):02d}",
                                                   "mapping")
            delete_existing_kapture_files(kapture_reconstruction_dir,
                                          force_overwrite_existing)
            logger.info(f'Converting reconstruction {loc_id} to kapture  ...')
            kapture_reconstruction_data = import_robotcar_colmap_location(
                robotcar_path,
                path.join(colmap_reconstructions_path, colmap_reconstruction),
                kapture_reconstruction_dir, rigs, skip_reconstruction)
            # replace intrinsics with the ones found in the text files
            if not use_colmap_intrinsics:
                kapture_reconstruction_data.sensors = cameras
            kapture_imported_training[int(
                loc_id)] = kapture_reconstruction_data

    if not import_v1:
        _import_robotcar_v2_train(robotcar_path, kapture_imported_query,
                                  kapture_imported_training, image_pattern)

    # apply rig collapse
    if rig_collapse:
        logger.info('replacing camera poses with rig poses.')
        for kapture_mapping in kapture_imported_training.values():
            kapture.rigs_recover_inplace(kapture_mapping.trajectories, rigs,
                                         ['rear'])

    # IO operations
    robotcar_image_path = path.join(robotcar_path, "images")
    for loc_id, kapture_query in kapture_imported_query.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing test data: {loc_id_str}')
        kapture_test_dir = path.join(kapture_path, loc_id_str, "query")
        delete_existing_kapture_files(kapture_test_dir,
                                      force_overwrite_existing)
        if not kapture_query.records_camera:  # all images were removed
            continue
        kapture_to_dir(kapture_test_dir, kapture_query)
        query_images = [
            f for _, _, f in kapture.flatten(kapture_query.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path, kapture_test_dir,
                                         query_images, images_import_method)

    for loc_id, kapture_mapping in kapture_imported_training.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing mapping data: {loc_id_str}')
        kapture_reconstruction_dir = path.join(kapture_path, f"{loc_id:02d}",
                                               "mapping")
        kapture_to_dir(kapture_reconstruction_dir, kapture_mapping)
        mapping_images = [
            f for _, _, f in kapture.flatten(kapture_mapping.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_reconstruction_dir,
                                         mapping_images, images_import_method)

    if import_feature_db:
        _import_colmap_overcast_reference(robotcar_path, kapture_path,
                                          force_overwrite_existing)
Ejemplo n.º 27
0
    def test_evaluation(self):
        position = [1.658, 0, 0]
        position_a = [2.658, 0, 0]
        position_b = [1.758, 0, 0]
        position_c = [10.1, 0, 0]
        position_d = [2., 0, 0]
        position_e = [6.658, 0, 0]

        rotation = quaternion.from_euler_angles(np.deg2rad(110.0), 0, 0)
        rotation_a = quaternion.from_euler_angles(np.deg2rad(111.0), 0, 0)
        rotation_b = quaternion.from_euler_angles(np.deg2rad(108.0), 0, 0)
        rotation_c = quaternion.from_euler_angles(np.deg2rad(10.0), 0, 0)
        rotation_d = quaternion.from_euler_angles(np.deg2rad(110.0), 0, 0)

        pose_gt = kapture.PoseTransform(r=rotation, t=position).inverse()
        pose_a = kapture.PoseTransform(r=rotation_a, t=position_a).inverse()
        pose_b = kapture.PoseTransform(r=rotation_b, t=position_b).inverse()
        pose_c = kapture.PoseTransform(r=rotation_c, t=position_c).inverse()
        pose_d = kapture.PoseTransform(r=rotation_d, t=position_d).inverse()
        pose_e = kapture.PoseTransform(r=None, t=[-x for x in position_e])

        kdata = kapture.Kapture(sensors=kapture.Sensors(),
                                records_camera=kapture.RecordsCamera(),
                                trajectories=kapture.Trajectories())
        kdata.sensors['cam0'] = kapture.Camera(
            kapture.CameraType.UNKNOWN_CAMERA, [25, 13])
        kdata.records_camera[(0, 'cam0')] = 'a'
        kdata.records_camera[(1, 'cam0')] = 'b'
        kdata.records_camera[(2, 'cam0')] = 'c'
        kdata.records_camera[(3, 'cam0')] = 'd'
        kdata.records_camera[(4, 'cam0')] = 'e'

        kdata.trajectories[(0, 'cam0')] = pose_a
        kdata.trajectories[(1, 'cam0')] = pose_b
        kdata.trajectories[(2, 'cam0')] = pose_c
        kdata.trajectories[(3, 'cam0')] = pose_d

        kdata2 = copy.deepcopy(kdata)
        kdata2.trajectories[(4, 'cam0')] = pose_e
        kdata2.records_camera[(5, 'cam0')] = 'f'

        kdata_gt = copy.deepcopy(kdata2)
        kdata_gt.trajectories[(0, 'cam0')] = pose_gt
        kdata_gt.trajectories[(1, 'cam0')] = pose_gt
        kdata_gt.trajectories[(2, 'cam0')] = pose_gt
        kdata_gt.trajectories[(3, 'cam0')] = pose_gt
        kdata_gt.trajectories[(4, 'cam0')] = pose_gt
        kdata_gt.trajectories[(5, 'cam0')] = pose_gt

        kdata_list = [kdata, kdata2, kdata_gt]
        intersection = {'a', 'b', 'c', 'd', 'e'}

        result1 = evaluate(kdata, kdata_gt, intersection)
        self.assertEqual(len(result1), 5)
        self.assertEqual(result1[0][0], 'a')
        self.assertAlmostEqual(result1[0][1], 1.0)
        self.assertAlmostEqual(result1[0][2], 1.0)
        self.assertEqual(result1[1][0], 'b')
        self.assertAlmostEqual(result1[1][1], 0.1)
        self.assertAlmostEqual(result1[1][2], 2.0)
        self.assertEqual(result1[2][0], 'c')
        self.assertAlmostEqual(result1[2][1], 8.442)
        self.assertAlmostEqual(result1[2][2], 100.0)
        self.assertEqual(result1[3][0], 'd')
        self.assertAlmostEqual(result1[3][1], 0.342)
        self.assertAlmostEqual(result1[3][2], 0.0)
        self.assertEqual(result1[4][0], 'e')
        self.assertTrue(math.isnan(result1[4][1]))
        self.assertTrue(math.isnan(result1[4][2]))

        result2 = evaluate(kdata2, kdata_gt, intersection)
        self.assertEqual(len(result2), 5)
        self.assertEqual(result2[0][0], 'a')
        self.assertAlmostEqual(result2[0][1], 1.0)
        self.assertAlmostEqual(result2[0][2], 1.0)
        self.assertEqual(result2[1][0], 'b')
        self.assertAlmostEqual(result2[1][1], 0.1)
        self.assertAlmostEqual(result2[1][2], 2.0)
        self.assertEqual(result2[2][0], 'c')
        self.assertAlmostEqual(result2[2][1], 8.442)
        self.assertAlmostEqual(result2[2][2], 100.0)
        self.assertEqual(result2[3][0], 'd')
        self.assertAlmostEqual(result2[3][1], 0.342)
        self.assertAlmostEqual(result2[3][2], 0.0)
        self.assertEqual(result2[4][0], 'e')
        self.assertAlmostEqual(result2[4][1], 5.0)
        self.assertTrue(math.isnan(result2[4][2]))

        bins1 = fill_bins(result1, [(0.9, 5), (10, 105)])
        self.assertEqual(len(bins1), 2)
        self.assertEqual(bins1[0][0], 0.9)
        self.assertEqual(bins1[0][1], 5)
        self.assertEqual(bins1[0][2], 2)
        self.assertEqual(bins1[1][0], 10)
        self.assertEqual(bins1[1][1], 105)
        self.assertEqual(bins1[1][2], 4)

        bins2 = fill_bins(result1, [(0.9, 5), (10, 105)])
        self.assertEqual(len(bins2), 2)
        self.assertEqual(bins2[0][0], 0.9)
        self.assertEqual(bins2[0][1], 5)
        self.assertEqual(bins2[0][2], 2)
        self.assertEqual(bins2[1][0], 10)
        self.assertEqual(bins2[1][1], 105)
        self.assertEqual(bins2[1][2], 4)

        bins3 = fill_bins(result2, [(0.9, math.nan), (10, math.nan)])
        self.assertEqual(len(bins3), 2)
        self.assertEqual(bins3[0][0], 0.9)
        self.assertTrue(math.isnan(bins3[0][1]))
        self.assertEqual(bins3[0][2], 2)
        self.assertEqual(bins3[1][0], 10)
        self.assertTrue(math.isnan(bins3[1][1]))
        self.assertEqual(bins3[1][2], 5)

        bins4 = fill_bins(result2, [(0.9, -1), (10, -1)])
        self.assertEqual(len(bins4), 2)
        self.assertEqual(bins4[0][0], 0.9)
        self.assertEqual(bins4[0][1], -1)
        self.assertEqual(bins4[0][2], 2)
        self.assertEqual(bins4[1][0], 10)
        self.assertEqual(bins4[1][1], -1)
        self.assertEqual(bins4[1][2], 5)
Ejemplo n.º 28
0
def kapture_from_dir(
        kapture_dirpath: str,
        matches_pairsfile_path: Optional[str] = None,
        skip_list: List[Type[Union[kapture.Rigs,
                                   kapture.Trajectories,
                                   kapture.RecordsCamera,
                                   kapture.RecordsLidar,
                                   kapture.RecordsWifi,
                                   kapture.RecordsGnss,
                                   kapture.Keypoints,
                                   kapture.Descriptors,
                                   kapture.GlobalFeatures,
                                   kapture.Matches,
                                   kapture.Points3d,
                                   kapture.Observations,

        ]]] = []
) -> kapture.Kapture:
    """
    Reads and return kapture data from directory.

    :param kapture_dirpath: kapture directory root path
    :param matches_pairsfile_path: text file in the csv format; where each line is image_name1, image_name2, score
    :param skip_list: Input option for expert only. Skip the load of specified parts.
    :return: kapture data read
    """
    if not path.isdir(kapture_dirpath):
        raise FileNotFoundError(f'No kapture directory {kapture_dirpath}')
    csv_filepaths = {dtype: path.join(kapture_dirpath, filename)
                     for dtype, filename in CSV_FILENAMES.items()}
    data_dirpaths = {dtype: path.join(kapture_dirpath, dir_name)
                     for dtype, dir_name in kapture.io.features.FEATURES_DATA_DIRNAMES.items()}

    # keep only those in load_only and that exists
    kapture_data_paths = {**data_dirpaths, **csv_filepaths}  # make sure files take precedence over dirs
    kapture_loadable_data = {
        kapture_type
        for kapture_type in KAPTURE_LOADABLE_TYPES
        if kapture_type not in skip_list and path.exists(kapture_data_paths[kapture_type])
    }

    kapture_data = kapture.Kapture()
    # sensors
    sensor_ids = None
    sensors_file_path = csv_filepaths[kapture.Sensors]
    if sensors_file_path:
        logger.debug(f'loading sensors {sensors_file_path} ...')
        kapture_data.__version__ = get_version_from_csv_file(sensors_file_path)
        kapture_data.sensors = sensors_from_file(sensors_file_path)
        sensor_ids = set(kapture_data.sensors.keys()) if kapture_data.sensors is not None else set()

    if sensor_ids is None:
        # no need to continue, everything else depends on sensors
        raise FileNotFoundError(f'File {sensors_file_path} is missing or empty in {kapture_dirpath}')

    # rigs
    if kapture.Rigs in kapture_loadable_data:
        rigs_file_path = csv_filepaths[kapture.Rigs]
        logger.debug(f'loading rigs {rigs_file_path} ...')
        assert sensor_ids is not None
        kapture_data.rigs = rigs_from_file(rigs_file_path, sensor_ids)
        # update sensor_ids with rig_id
        sensor_ids.update(kapture_data.rigs.keys())

    # trajectories
    if kapture.Trajectories in kapture_loadable_data:
        trajectories_file_path = csv_filepaths[kapture.Trajectories]
        logger.debug(f'loading trajectories {trajectories_file_path} ...')
        assert sensor_ids is not None
        kapture_data.trajectories = trajectories_from_file(trajectories_file_path, sensor_ids)

    # records camera
    if kapture.RecordsCamera in kapture_loadable_data:
        records_camera_file_path = csv_filepaths[kapture.RecordsCamera]
        logger.debug(f'loading images {records_camera_file_path} ...')
        assert kapture_data.sensors is not None
        camera_sensor_ids = set([sensor_id
                                 for sensor_id in kapture_data.sensors.keys()
                                 if kapture_data.sensors[sensor_id].sensor_type == 'camera'])
        kapture_data.records_camera = records_camera_from_file(csv_filepaths[kapture.RecordsCamera], camera_sensor_ids)

    # records lidar
    if kapture.RecordsLidar in kapture_loadable_data:
        records_lidar_file_path = csv_filepaths[kapture.RecordsLidar]
        logger.debug(f'loading lidar {records_lidar_file_path} ...')
        assert kapture_data.sensors is not None
        lidar_sensor_ids = set([sensor_id
                                for sensor_id in kapture_data.sensors.keys()
                                if kapture_data.sensors[sensor_id].sensor_type == 'lidar'])
        assert lidar_sensor_ids is not None
        kapture_data.records_lidar = records_lidar_from_file(records_lidar_file_path, lidar_sensor_ids)

    # records Wifi
    if kapture.RecordsWifi in kapture_loadable_data:
        records_wifi_file_path = csv_filepaths[kapture.RecordsWifi]
        logger.debug(f'loading wifi {records_wifi_file_path} ...')
        assert kapture_data.sensors is not None
        wifi_sensor_ids = set([sensor_id
                               for sensor_id in kapture_data.sensors.keys()
                               if kapture_data.sensors[sensor_id].sensor_type == 'wifi'])
        assert wifi_sensor_ids is not None
        kapture_data.records_wifi = records_wifi_from_file(records_wifi_file_path, wifi_sensor_ids)

    # records GNSS
    if kapture.RecordsGnss in kapture_loadable_data:
        records_gnss_file_path = csv_filepaths[kapture.RecordsGnss]
        logger.debug(f'loading GNSS {records_gnss_file_path} ...')
        assert kapture_data.sensors is not None
        epsg_codes = {sensor_id: sensor.sensor_params[0]
                      for sensor_id, sensor in kapture_data.sensors.items()
                      if sensor.sensor_type == 'gnss'}
        if len(epsg_codes) > 0:
            kapture_data.records_gnss = records_gnss_from_file(records_gnss_file_path, epsg_codes)
        else:
            logger.warning('no declared GNSS sensors: all GNSS data will be ignored')

    # features
    image_filenames = set(image_name
                          for _, _, image_name in
                          kapture.flatten(kapture_data.records_camera)) \
        if kapture_data.records_camera is not None else set()

    # keypoints
    if kapture.Keypoints in kapture_loadable_data:
        logger.debug(f'loading keypoints {data_dirpaths[kapture.Keypoints]} ...')
        assert kapture_data.records_camera is not None
        kapture_data.keypoints = keypoints_from_dir(kapture_dirpath, image_filenames)

    # descriptors
    if kapture.Descriptors in kapture_loadable_data:
        logger.debug(f'loading descriptors {data_dirpaths[kapture.Descriptors]} ...')
        assert kapture_data.records_camera is not None
        kapture_data.descriptors = descriptors_from_dir(kapture_dirpath, image_filenames)

    # global_features
    if kapture.GlobalFeatures in kapture_loadable_data:
        logger.debug(f'loading global features {data_dirpaths[kapture.GlobalFeatures]} ...')
        assert kapture_data.records_camera is not None
        kapture_data.global_features = global_features_from_dir(kapture_dirpath, image_filenames)

    # matches
    if kapture.Matches in kapture_loadable_data:
        logger.debug(f'loading matches {data_dirpaths[kapture.Matches]} ...')
        assert kapture_data.records_camera is not None
        kapture_data.matches = matches_from_dir(kapture_dirpath, image_filenames, matches_pairsfile_path)

    # points3d
    if kapture.Points3d in kapture_loadable_data:
        points3d_file_path = csv_filepaths[kapture.Points3d]
        logger.debug(f'loading points 3d {points3d_file_path} ...')
        kapture_data.points3d = points3d_from_file(points3d_file_path)

    # observations
    if kapture.Observations in kapture_loadable_data:
        observations_file_path = csv_filepaths[kapture.Observations]
        logger.debug(f'loading observations {observations_file_path} ...')
        assert kapture_data.keypoints is not None
        assert kapture_data.points3d is not None
        kapture_data.observations = observations_from_file(observations_file_path, kapture_data.keypoints)

    return kapture_data
Ejemplo n.º 29
0
def merge_remap(kapture_list: List[kapture.Kapture], skip_list: List[Type],
                data_paths: List[str], kapture_path: str,
                images_import_method: TransferAction) -> kapture.Kapture:
    """
    Merge multiple kapture while keeping ids (sensor_id) identical in merged and inputs.

    :param kapture_list: list of kapture to merge.
    :param skip_list: input optional types to not merge. sensors and rigs are unskippable
    :param data_paths: list of path to root path directory in same order as mentioned in kapture_list.
    :param kapture_path: directory root path to the merged kapture.
    :return:
    """
    merged_kapture = kapture.Kapture()

    # find new sensor ids / rig ids
    sensor_offset = 0
    rigs_offset = 0
    sensors_mapping = []
    rigs_mapping = []
    for every_kapture in kapture_list:
        if every_kapture.sensors is not None:
            sensors_mapping.append(
                get_sensors_mapping(every_kapture.sensors, sensor_offset))
            sensor_offset += len(every_kapture.sensors)
        else:
            sensors_mapping.append({})

        if every_kapture.rigs is not None:
            rigs_mapping.append(
                get_rigs_mapping(every_kapture.rigs, rigs_offset))
            rigs_offset += len(every_kapture.rigs)
        else:
            rigs_mapping.append({})

    # concatenate all sensors with the remapped ids
    new_sensors = merge_sensors(
        [a_kapture.sensors for a_kapture in kapture_list], sensors_mapping)
    if new_sensors:  # if merge_sensors returned an empty object, keep merged_kapture.sensors to None
        merged_kapture.sensors = new_sensors

    # concatenate all rigs with the remapped ids
    new_rigs = merge_rigs([a_kapture.rigs for a_kapture in kapture_list],
                          rigs_mapping, sensors_mapping)
    if new_rigs:  # if merge_rigs returned an empty object, keep merged_kapture.rigs to None
        merged_kapture.rigs = new_rigs

    # all fields below can be skipped with skip_list
    # we do not assign the properties when the merge evaluate to false, we keep it as None
    if kapture.Trajectories not in skip_list:
        new_trajectories = merge_trajectories(
            [a_kapture.trajectories for a_kapture in kapture_list],
            rigs_mapping, sensors_mapping)
        if new_trajectories:
            merged_kapture.trajectories = new_trajectories

    if kapture.RecordsCamera not in skip_list:
        new_records_camera = merge_records_camera(
            [a_kapture.records_camera for a_kapture in kapture_list],
            sensors_mapping)
        if new_records_camera:
            merged_kapture.records_camera = new_records_camera

        merge_records_data(
            [[
                image_name for _, _, image_name in kapture.flatten(
                    every_kapture.records_camera)
            ] if every_kapture.records_camera is not None else []
             for every_kapture in kapture_list], [
                 get_image_fullpath(data_path, image_filename=None)
                 for data_path in data_paths
             ], kapture_path, images_import_method)
    if kapture.RecordsLidar not in skip_list:
        new_records_lidar = merge_records_lidar(
            [a_kapture.records_lidar for a_kapture in kapture_list],
            sensors_mapping)
        if new_records_lidar:
            merged_kapture.records_lidar = new_records_lidar
    if kapture.RecordsWifi not in skip_list:
        new_records_wifi = merge_records_wifi(
            [a_kapture.records_wifi for a_kapture in kapture_list],
            sensors_mapping)
        if new_records_wifi:
            merged_kapture.records_wifi = new_records_wifi
    if kapture.RecordsGnss not in skip_list:
        new_records_gnss = merge_records_gnss(
            [a_kapture.records_gnss for a_kapture in kapture_list],
            sensors_mapping)
        if new_records_gnss:
            merged_kapture.records_gnss = new_records_gnss

    # for the reconstruction, except points and observations, the files are copied with shutil.copy
    # if kapture_path evaluates to False, all copies will be skipped (but classes will be filled normally)
    if kapture.Keypoints not in skip_list:
        keypoints = [a_kapture.keypoints for a_kapture in kapture_list]
        keypoints_not_none = [k for k in keypoints if k is not None]
        if len(keypoints_not_none) > 0:
            new_keypoints = merge_keypoints(keypoints, data_paths,
                                            kapture_path)
            if new_keypoints:
                merged_kapture.keypoints = new_keypoints
    if kapture.Descriptors not in skip_list:
        descriptors = [a_kapture.descriptors for a_kapture in kapture_list]
        descriptors_not_none = [k for k in descriptors if k is not None]
        if len(descriptors_not_none) > 0:
            new_descriptors = merge_descriptors(descriptors, data_paths,
                                                kapture_path)
            if new_descriptors:
                merged_kapture.descriptors = new_descriptors
    if kapture.GlobalFeatures not in skip_list:
        global_features = [
            a_kapture.global_features for a_kapture in kapture_list
        ]
        global_features_not_none = [
            k for k in global_features if k is not None
        ]
        if len(global_features_not_none) > 0:
            new_global_features = merge_global_features(
                global_features, data_paths, kapture_path)
            if new_global_features:
                merged_kapture.global_features = new_global_features
    if kapture.Matches not in skip_list:
        matches = [a_kapture.matches for a_kapture in kapture_list]
        new_matches = merge_matches(matches, data_paths, kapture_path)
        if new_matches:
            merged_kapture.matches = new_matches

    if kapture.Points3d not in skip_list and kapture.Observations not in skip_list:
        points_and_obs = [(a_kapture.points3d, a_kapture.observations)
                          for a_kapture in kapture_list]
        new_points, new_observations = merge_points3d_and_observations(
            points_and_obs)
        if new_points:
            merged_kapture.points3d = new_points
        if new_observations:
            merged_kapture.observations = new_observations
    elif kapture.Points3d not in skip_list:
        points = [a_kapture.points3d for a_kapture in kapture_list]
        new_points = merge_points3d(points)
        if new_points:
            merged_kapture.points3d = new_points
    return merged_kapture
Ejemplo n.º 30
0
def pyransaclib_localize_from_loaded_data(
        kapture_data: kapture.Kapture, kapture_path: str,
        tar_handlers: TarCollection, kapture_query_data: kapture.Kapture,
        output_path: str, pairsfile_path: str, inlier_threshold: float,
        number_lo_steps: int, min_num_iterations: int, max_num_iterations: int,
        refine_poses: bool, keypoints_type: Optional[str],
        duplicate_strategy: DuplicateCorrespondencesStrategy,
        rerank_strategy: RerankCorrespondencesStrategy,
        write_detailed_report: bool, force: bool) -> None:
    """
    Localize images using pyransaclib.

    :param kapture_data: loaded kapture data (incl. points3d)
    :param kapture_path: path to the kapture to use
    :param tar_handlers: collection of pre-opened tar archives
    :param kapture_data: loaded kapture data (mapping and query images)
    :param output_path: path to the write the localization results
    :param pairsfile_path: pairs to use
    :param inlier_threshold: RANSAC inlier threshold in pixel
    :param number_lo_steps: number of local optimization iterations in LO-MSAC. Use 0 to use MSAC
    :param min_num_iterations: minimum number of ransac loops
    :param max_num_iterations: maximum number of ransac loops
    :param refine_poses: refine poses with pycolmap
    :param keypoints_type: types of keypoints (and observations) to use
    :param force: Silently overwrite kapture files if already exists.
    """
    assert has_pyransaclib
    if refine_poses:
        assert has_pycolmap
    if not (kapture_data.records_camera and kapture_data.sensors
            and kapture_data.keypoints and kapture_data.matches
            and kapture_data.points3d and kapture_data.observations):
        raise ValueError('records_camera, sensors, keypoints, matches, '
                         'points3d, observations are mandatory for map+query')

    if not (kapture_query_data.records_camera and kapture_query_data.sensors):
        raise ValueError('records_camera, sensors are mandatory for query')

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(
            kapture_data.keypoints)
    assert keypoints_type is not None
    assert keypoints_type in kapture_data.keypoints
    assert keypoints_type in kapture_data.matches

    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    if kapture_query_data.trajectories is not None:
        logger.warning(
            "Input query data contains trajectories: they will be ignored")
        kapture_query_data.trajectories.clear()

    os.umask(0o002)
    os.makedirs(output_path, exist_ok=True)
    delete_existing_kapture_files(output_path, force_erase=force)

    # load pairsfile
    pairs = {}
    with open(pairsfile_path, 'r') as fid:
        table = kapture.io.csv.table_from_file(fid)
        for img_query, img_map, _ in table:
            if img_query not in pairs:
                pairs[img_query] = []
            pairs[img_query].append(img_map)

    kapture_data.matches[keypoints_type].normalize()
    keypoints_filepaths = keypoints_to_filepaths(
        kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path,
        tar_handlers)
    obs_for_keypoints_type = {
        point_id: per_keypoints_type_subdict[keypoints_type]
        for point_id, per_keypoints_type_subdict in
        kapture_data.observations.items()
        if keypoints_type in per_keypoints_type_subdict
    }
    point_id_from_obs = {
        (img_name, kp_id): point_id
        for point_id in obs_for_keypoints_type.keys()
        for img_name, kp_id in obs_for_keypoints_type[point_id]
    }
    query_images = [(timestamp, sensor_id, image_name)
                    for timestamp, sensor_id, image_name in kapture.flatten(
                        kapture_query_data.records_camera)]

    # kapture for localized images + pose
    trajectories = kapture.Trajectories()
    progress_bar = tqdm(total=len(query_images),
                        disable=logging.getLogger().level >= logging.CRITICAL)
    for timestamp, sensor_id, image_name in query_images:
        if image_name not in pairs:
            continue
        keypoints_filepath = keypoints_filepaths[image_name]
        kapture_keypoints_query = image_keypoints_from_file(
            filepath=keypoints_filepath,
            dsize=kapture_data.keypoints[keypoints_type].dsize,
            dtype=kapture_data.keypoints[keypoints_type].dtype)
        query_cam = kapture_query_data.sensors[sensor_id]
        assert isinstance(query_cam, kapture.Camera)
        num_keypoints = kapture_keypoints_query.shape[0]
        kapture_keypoints_query, K, distortion = get_camera_matrix_from_kapture(
            kapture_keypoints_query, query_cam)
        kapture_keypoints_query = kapture_keypoints_query.reshape(
            (num_keypoints, 2))

        cv2_keypoints_query = np.copy(kapture_keypoints_query)
        if np.count_nonzero(distortion) > 0:
            epsilon = np.finfo(np.float64).eps
            stop_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                             cv2.TERM_CRITERIA_EPS, 500, epsilon)
            cv2_keypoints_query = cv2.undistortPointsIter(
                cv2_keypoints_query,
                K,
                distortion,
                R=None,
                P=K,
                criteria=stop_criteria)
        cv2_keypoints_query = cv2_keypoints_query.reshape((num_keypoints, 2))
        # center keypoints
        for i in range(cv2_keypoints_query.shape[0]):
            cv2_keypoints_query[i, 0] = cv2_keypoints_query[i, 0] - K[0, 2]
            cv2_keypoints_query[i, 1] = cv2_keypoints_query[i, 1] - K[1, 2]

        kpts_query = kapture_keypoints_query if (
            refine_poses or write_detailed_report) else None
        points2D, points2D_undistorted, points3D, stats = get_correspondences(
            kapture_data, keypoints_type, kapture_path, tar_handlers,
            image_name, pairs[image_name], point_id_from_obs, kpts_query,
            cv2_keypoints_query, duplicate_strategy, rerank_strategy)
        # compute absolute pose
        # inlier_threshold - RANSAC inlier threshold in pixels
        # answer - dictionary containing the RANSAC output
        ret = pyransaclib.ransaclib_localization(image_name, K[0, 0], K[1, 1],
                                                 points2D_undistorted,
                                                 points3D, inlier_threshold,
                                                 number_lo_steps,
                                                 min_num_iterations,
                                                 max_num_iterations)

        # add pose to output kapture
        if ret['success'] and ret['num_inliers'] > 0:
            pose = kapture.PoseTransform(ret['qvec'], ret['tvec'])

            if refine_poses:
                inlier_mask = np.zeros((len(points2D), ), dtype=bool)
                inlier_mask[ret['inliers']] = True
                inlier_mask = inlier_mask.tolist()
                col_cam_id, width, height, params, _ = get_colmap_camera(
                    query_cam)
                cfg = {
                    'model': CAMERA_MODEL_NAME_ID[col_cam_id][0],
                    'width': int(width),
                    'height': int(height),
                    'params': params
                }
                ret_refine = pycolmap.pose_refinement(pose.t_raw, pose.r_raw,
                                                      points2D, points3D,
                                                      inlier_mask, cfg)
                if ret_refine['success']:
                    pose = kapture.PoseTransform(ret_refine['qvec'],
                                                 ret_refine['tvec'])
                    logger.debug(
                        f'{image_name} refinement success, new pose: {pose}')

            if write_detailed_report:
                reprojection_error = compute_reprojection_error(
                    pose, ret['num_inliers'], ret['inliers'], points2D,
                    points3D, K, distortion)
                cache = {
                    "num_correspondences": len(points3D),
                    "num_inliers": ret['num_inliers'],
                    "inliers": ret['inliers'],
                    "reprojection_error": reprojection_error,
                    "stats": stats
                }
                cache_path = os.path.join(
                    output_path, f'pyransaclib_cache/{image_name}.json')
                save_to_json(cache, cache_path)
            trajectories[timestamp, sensor_id] = pose

        progress_bar.update(1)
    progress_bar.close()

    kapture_data_localized = kapture.Kapture(
        sensors=kapture_query_data.sensors,
        trajectories=trajectories,
        records_camera=kapture_query_data.records_camera,
        rigs=kapture_query_data.rigs)
    kapture.io.csv.kapture_to_dir(output_path, kapture_data_localized)