示例#1
0
    def test_type_checking(self):
        records_camera = kapture.RecordsCamera()
        valid_ts, valid_id, valid_record = 0, 'cam0', 'cam0/image0.jpg'
        invalid_ts, invalid_id, invalid_record = '0', float(
            0), kapture.PoseTransform()
        self.assertRaises(TypeError, records_camera.__setitem__,
                          (invalid_ts, valid_id), valid_record)
        self.assertRaises(TypeError, records_camera.__setitem__,
                          (valid_ts, invalid_id), valid_record)
        self.assertRaises(TypeError, records_camera.__setitem__,
                          (valid_ts, valid_id), invalid_record)
        self.assertRaises(TypeError, records_camera.__setitem__,
                          (invalid_ts, invalid_id), invalid_record)

        self.assertRaises(TypeError, records_camera.__setitem__, invalid_ts,
                          {valid_id: valid_record})
        self.assertRaises(TypeError, records_camera.__setitem__, valid_ts,
                          {invalid_id: valid_record})
        self.assertRaises(TypeError, records_camera.__setitem__, valid_ts,
                          {valid_id: invalid_record})
        self.assertRaises(TypeError, records_camera.__setitem__, invalid_ts,
                          valid_record)

        self.assertRaises(TypeError, records_camera.__contains__, invalid_ts,
                          valid_id)
        self.assertRaises(TypeError, records_camera.__contains__, valid_ts,
                          invalid_id)
        self.assertRaises(TypeError, records_camera.__contains__, invalid_ts,
                          invalid_id)

        self.assertRaises(TypeError, records_camera.__delitem__, invalid_ts)
        self.assertRaises(TypeError, records_camera.__delitem__,
                          (valid_ts, invalid_id))
示例#2
0
    def test_init_camera(self):
        timestamp0, timestamp1 = 0, 1
        device_id0, device_id1 = 'cam0', 'cam1'
        records_camera = kapture.RecordsCamera()
        records_camera[timestamp0, device_id0] = 'cam0/image000.jpg'
        kapture_data = kapture.Kapture(records_camera=records_camera)
        self.assertEqual(1, len(kapture_data.records_camera.keys()))
        self.assertEqual(1, len(kapture_data.records_camera.key_pairs()))
        self.assertIn(timestamp0, kapture_data.records_camera)
        self.assertIn(device_id0, kapture_data.records_camera[timestamp0])
        self.assertIn((timestamp0, device_id0), kapture_data.records_camera)
        self.assertEqual('cam0/image000.jpg',
                         kapture_data.records_camera[timestamp0, device_id0])
        records_camera[timestamp1, device_id0] = 'cam0/image001.jpg'
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(2, len(kapture_data.records_camera.key_pairs()))
        kapture_data.records_camera[timestamp0][
            device_id1] = 'cam1/image000.jpg'
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(3, len(kapture_data.records_camera.key_pairs()))
        records_camera[timestamp1][device_id1] = 'cam1/image001.jpg'
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(4, len(kapture_data.records_camera.key_pairs()))
        self.assertEqual('cam0/image000.jpg',
                         kapture_data.records_camera[timestamp0, device_id0])
        self.assertEqual('cam1/image000.jpg',
                         kapture_data.records_camera[timestamp0, device_id1])
        self.assertEqual('cam0/image001.jpg',
                         kapture_data.records_camera[timestamp1, device_id0])
        self.assertEqual('cam1/image001.jpg',
                         kapture_data.records_camera[timestamp1, device_id1])

        self.assertNotIn((timestamp1, 'cam2'), kapture_data.records_camera)
        self.assertNotIn((2, device_id0), kapture_data.records_camera)
示例#3
0
    def test_export(self):
        # the rig
        rigs = kapture.Rigs()
        rigs['rig0', get_camera_kapture_id_from_colmap_id(0)] = kapture.PoseTransform()
        rigs['rig0', get_camera_kapture_id_from_colmap_id(1)] = kapture.PoseTransform()
        # the records
        records_camera = kapture.RecordsCamera()
        records_camera[0, get_camera_kapture_id_from_colmap_id(0)] = 'camL/0000.jpg'
        records_camera[0, get_camera_kapture_id_from_colmap_id(1)] = 'camR/0000.jpg'
        records_camera[1, get_camera_kapture_id_from_colmap_id(0)] = 'camL/0001.jpg'
        records_camera[1, get_camera_kapture_id_from_colmap_id(1)] = 'camR/0001.jpg'
        # expect
        expected_rigs = [{
            "cameras": [
                {
                    "camera_id": 0,
                    "image_prefix": "camL"
                },
                {
                    "camera_id": 1,
                    "image_prefix": "camR"
                }
            ],
            "ref_camera_id": 0
        }]

        colmap_camera_ids = {get_camera_kapture_id_from_colmap_id(i): i for i in range(2)}
        colmap_rigs = export_colmap_rig_json(rigs, records_camera, colmap_camera_ids)
        self.assertEqual(colmap_rigs, expected_rigs)
示例#4
0
    def test_import(self):
        colmap_rigs = [{
            "cameras": [{
                "camera_id": 0,
                "image_prefix": "camL"
            }, {
                "camera_id": 1,
                "image_prefix": "camR"
            }],
            "ref_camera_id":
            0
        }]

        rigs_kapture, reconstructed_images, reconstructed_trajectories = import_colmap_rig_json(
            rigs_colmap=colmap_rigs)
        self.assertEqual([('rig0', get_camera_kapture_id_from_colmap_id(0)),
                          ('rig0', get_camera_kapture_id_from_colmap_id(1))],
                         rigs_kapture.key_pairs())
        self.assertIsNone(reconstructed_images)
        self.assertIsNone(reconstructed_trajectories)

        # the records
        images = kapture.RecordsCamera()
        images[0, get_camera_kapture_id_from_colmap_id(0)] = 'camL/0000.jpg'
        images[1, get_camera_kapture_id_from_colmap_id(1)] = 'camR/0000.jpg'
        images[2, get_camera_kapture_id_from_colmap_id(0)] = 'camL/0001.jpg'
        images[3, get_camera_kapture_id_from_colmap_id(1)] = 'camR/0001.jpg'
        rigs_kapture, reconstructed_images, reconstructed_trajectories = import_colmap_rig_json(
            rigs_colmap=colmap_rigs, images=images)
        # check timestamps has been recovered.
        self.assertEqual([(0, get_camera_kapture_id_from_colmap_id(0)),
                          (0, get_camera_kapture_id_from_colmap_id(1)),
                          (1, get_camera_kapture_id_from_colmap_id(0)),
                          (1, get_camera_kapture_id_from_colmap_id(1))],
                         reconstructed_images.key_pairs())

        # trajectories
        trajectories = kapture.Trajectories()
        trajectories[
            0,
            get_camera_kapture_id_from_colmap_id(0)] = kapture.PoseTransform()
        trajectories[
            1,
            get_camera_kapture_id_from_colmap_id(1)] = kapture.PoseTransform()
        trajectories[
            2,
            get_camera_kapture_id_from_colmap_id(0)] = kapture.PoseTransform()
        trajectories[
            3,
            get_camera_kapture_id_from_colmap_id(1)] = kapture.PoseTransform()
        rigs_kapture, reconstructed_images, reconstructed_trajectories = import_colmap_rig_json(
            rigs_colmap=colmap_rigs, images=images, trajectories=trajectories)
        self.assertEqual([(0, get_camera_kapture_id_from_colmap_id(0)),
                          (0, get_camera_kapture_id_from_colmap_id(1)),
                          (1, get_camera_kapture_id_from_colmap_id(0)),
                          (1, get_camera_kapture_id_from_colmap_id(1))],
                         reconstructed_trajectories.key_pairs())
示例#5
0
def import_extended_cmu_seasons_images(
    image_list_file_path: str
) -> Tuple[kapture.RecordsCamera, kapture.Trajectories]:
    """
    Read image list, name.jpg or name.jpg qw qx qy qz cx cy cz

    :param image_list_file_path: path to the image list file
    :return: kapture images and trajectories
    """

    records_camera = kapture.RecordsCamera()
    trajectories = kapture.Trajectories()

    # name.jpg qw qx qy qz cx cy cz
    # or
    # name.jpg
    with open(image_list_file_path) as fin:
        table = fin.readlines()
        # remove comment lines
        table = (line for line in table if not line.startswith('#'))
        # remove empty lines
        table = (line for line in table if line.strip())
        # trim trailing EOL
        table = (line.rstrip("\n\r") for line in table)
        # split space
        table = (re.split(r'\s+', line) for line in table)
        # remove empty split
        table = ([s for s in line if s] for line in table)

    image_pattern = re.compile(ECMU_IMAGE_PATTERN)
    for line in table:
        image_name = line[0]
        timestamp, camera_id = _parse_image_name(image_name, image_pattern)
        if camera_id is None or timestamp is None:
            continue

        records_camera[(timestamp, camera_id)] = image_name
        if len(line) > 1:  # also contains trajectory
            qw, qx, qy, qz, cx, cy, cz = line[1:]
            quaternion_array = float_array_or_none([qw, qx, qy, qz])
            assert quaternion_array is not None
            center_array = float_array_or_none([cx, cy, cz])
            assert center_array is not None
            rotation = quaternion.from_float_array(quaternion_array)
            # C = -R^T * t -> t = -R * C
            translation = np.matmul(
                quaternion.as_rotation_matrix(rotation),
                -1 * np.array(center_array, dtype=np.float))
            pose = kapture.PoseTransform(r=rotation, t=translation)
            trajectories[(timestamp, camera_id)] = pose

    # if no trajectories were added (query), prefer None
    if not trajectories:
        trajectories = None

    return records_camera, trajectories
def import_image_folder(
        images_path: str,
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports the images of a folder to a kapture. This creates only images and cameras.

    :param images_path: path to directory containing the images.
    :param kapture_path: path to kapture root directory.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path,
                                  force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()

    file_list = [
        os.path.relpath(os.path.join(dirpath, filename), images_path)
        for dirpath, dirs, filenames in os.walk(images_path)
        for filename in filenames
    ]
    file_list = sorted(file_list)

    logger.info('starting conversion...')
    for n, filename in enumerate(file_list):
        # test if file is a valid image
        try:
            # lazy load
            with Image.open(path.join(images_path, filename)) as im:
                width, height = im.size
                model_params = [width, height]
        except (OSError, PIL.UnidentifiedImageError):
            # It is not a valid image: skip it
            logger.info(f'Skipping invalid image file {filename}')
            continue

        camera_id = f'sensor{n}'
        images[(n, camera_id)] = path_secure(filename)  # don't forget windows
        cameras[camera_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA,
                                            model_params)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(images_path, kapture_path, filename_list,
                                     images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
示例#7
0
    def test_init_camera(self):
        timestamp0, timestamp1 = 0, 1
        device_id0, device_id1 = 'cam0', 'cam1'
        record_cam0_image0 = 'cam0/image000.jpg'
        record_cam0_image1 = 'cam0/image001.jpg'
        record_cam1_image0 = 'cam1/image000.jpg'
        record_cam1_image1 = 'cam1/image001.jpg'
        # Test insertions
        records_camera = kapture.RecordsCamera()
        records_camera[timestamp0, device_id0] = record_cam0_image0
        kapture_data = kapture.Kapture(records_camera=records_camera)
        self.assertEqual(1, len(kapture_data.records_camera.keys()))
        self.assertEqual(1, len(kapture_data.records_camera.key_pairs()))
        self.assertIn(timestamp0, kapture_data.records_camera)
        self.assertIn(device_id0, kapture_data.records_camera[timestamp0])
        self.assertIn((timestamp0, device_id0), kapture_data.records_camera)
        self.assertEqual(record_cam0_image0,
                         kapture_data.records_camera[timestamp0, device_id0])
        records_camera[timestamp1, device_id0] = record_cam0_image1
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(2, len(kapture_data.records_camera.key_pairs()))
        kapture_data.records_camera[timestamp0][
            device_id1] = record_cam1_image0
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(3, len(kapture_data.records_camera.key_pairs()))
        records_camera[timestamp1][device_id1] = record_cam1_image1
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(4, len(kapture_data.records_camera.key_pairs()))
        self.assertEqual(record_cam0_image0,
                         kapture_data.records_camera[timestamp0, device_id0])
        self.assertEqual(record_cam1_image0,
                         kapture_data.records_camera[timestamp0, device_id1])
        self.assertEqual(record_cam0_image1,
                         kapture_data.records_camera[timestamp1, device_id0])
        self.assertEqual(record_cam1_image1,
                         kapture_data.records_camera[timestamp1, device_id1])

        self.assertNotIn((timestamp1, 'cam2'), kapture_data.records_camera)
        self.assertNotIn((2, device_id0), kapture_data.records_camera)
        self.assertEqual(kapture_data.records_camera.sensors_ids,
                         {device_id0, device_id1})

        # Test deletion
        del kapture_data.records_camera[(timestamp0, device_id0)]
        self.assertEqual(2, len(kapture_data.records_camera.keys()))
        self.assertEqual(3, len(kapture_data.records_camera.key_pairs()))
        del kapture_data.records_camera[(timestamp0, device_id1)]
        self.assertEqual(1, len(kapture_data.records_camera.keys()))
        self.assertEqual(2, len(kapture_data.records_camera.key_pairs()))
示例#8
0
    def save_to_kapture(self, trajectory_rig_id: Optional[str] = None) -> None:
        """
        Save the data in kapture format.

        :param trajectory_rig_id: the rig identifier of the trajectory points
        """
        # Convert pose info to trajectories
        if len(self.poses_info) > 0 and trajectory_rig_id is None:
            raise ValueError("Must provide rig identifier for trajectory")
        trajectories = kapture.Trajectories() if len(
            self.poses_info) > 0 else None
        for pose_info in self.poses_info:
            t = pose_info.timestamp.to_nsec()
            ros_translation = pose_info.pose6d.position
            translation = [
                ros_translation.x, ros_translation.y, ros_translation.z
            ]
            ros_rotation = pose_info.pose6d.orientation
            rotation = np.quaternion(ros_rotation.w, ros_rotation.x,
                                     ros_rotation.y, ros_rotation.z)
            # Transform the pose from the ROS body coordinate system defined here
            # https://www.ros.org/reps/rep-0103.html#axis-orientation
            # to the Kapture coordinate system

            # ros pose seems to be the inverse of the extrinsic matrix
            # i.e world position and rig orientation with respect to the world axis
            pose6d = kapture.PoseTransform.compose([
                pose_kapture_from_ros,
                kapture.PoseTransform(rotation, translation).inverse(),
                pose_ros_from_kapture
            ])
            trajectories[(t, trajectory_rig_id)] = pose6d
        self.logger.info(f'Saving {len(list(flatten(trajectories)))} poses')
        # Convert image info to kapture image
        records_camera = kapture.RecordsCamera()
        for image_info in self.images_info:
            t = image_info.timestamp.to_nsec()
            records_camera[(t, image_info.camera_name)] = image_info.filename
        self.logger.info(
            f'Saving {len(list(flatten(records_camera)))} camera records')

        kapture_data = kapture.Kapture(rigs=self._rigs,
                                       sensors=self._sensors,
                                       records_camera=records_camera,
                                       trajectories=trajectories)
        self.logger.info(f'Saving to kapture {self._kapture_path}')
        kcsv.kapture_to_dir(self._kapture_path, kapture_data)
        self.logger.info('Done')
示例#9
0
def _import_images(input_json, image_action, kapture_images_path,
                   openmvg_images_dir, root_path, device_identifiers,
                   timestamp_for_pose):
    records_camera = kapture.RecordsCamera()
    if input_json.get(VIEWS):
        views = input_json[VIEWS]
        if image_action == TransferAction.root_link:
            # Do a unique images directory link
            # kapture/<records_dir>/openmvg_top_images_directory -> openmvg_root_path
            kapture_records_path = get_image_fullpath(kapture_images_path)
            os.makedirs(kapture_records_path, exist_ok=True)
            os.symlink(root_path,
                       path.join(kapture_records_path, openmvg_images_dir))
        logger.info(f'Importing {len(views)} images')
        # Progress bar only in debug or info level
        if image_action != TransferAction.skip and image_action != TransferAction.root_link \
                and logger.getEffectiveLevel() <= logging.INFO:
            progress_bar = tqdm(total=len(views))
        else:
            progress_bar = None
        for view in views:
            input_data = view[VALUE][PTR_WRAPPER][DATA]
            pose_id = input_data[ID_POSE]
            # All two values should be the same (?)
            if input_data[ID_VIEW]:
                timestamp = input_data[ID_VIEW]
            else:
                timestamp = view[KEY]
            device_id = str(input_data[ID_INTRINSIC]
                            )  # device_id must be a string for kapture
            device_identifiers[pose_id] = device_id
            timestamp_for_pose[pose_id] = timestamp

            kapture_filename = _import_image_file(input_data,
                                                  openmvg_images_dir,
                                                  root_path,
                                                  kapture_images_path,
                                                  image_action)

            progress_bar and progress_bar.update(1)

            key = (timestamp, device_id)  # tuple of int,str
            records_camera[key] = path_secure(kapture_filename)
        progress_bar and progress_bar.close()
    return records_camera
def sub_kapture_from_img_list(kdata, kdata_path, img_list, pairs):
    trajectories = kapture.Trajectories()
    sensors = kapture.Sensors()
    records = kapture.RecordsCamera()
    keypoints = kapture.Keypoints(kdata.keypoints._tname,
                                  kdata.keypoints._dtype,
                                  kdata.keypoints._dsize)
    if kdata.descriptors != None:
        descriptors = kapture.Descriptors(kdata.descriptors._tname,
                                          kdata.descriptors._dtype,
                                          kdata.descriptors._dsize)
    else:
        descriptors = None
    matches = kapture.Matches()

    timestamp_sensor_id_from_image_name = {
        img_name: (timestamp, sensor_id)
        for timestamp, sensor_id, img_name in kapture.flatten(
            kdata.records_camera)
    }
    for img in img_list:
        timestamp, sensor_id = timestamp_sensor_id_from_image_name[img]
        pose = kdata.trajectories[timestamp][sensor_id]
        sensors[sensor_id] = kdata.sensors[sensor_id]
        records[timestamp, sensor_id] = img
        trajectories[timestamp, sensor_id] = pose
        keypoints.add(img)
        if kdata.descriptors != None:
            descriptors.add(img)

    for i in pairs:
        image_matches_filepath = get_matches_fullpath((i[0], i[1]), kdata_path)
        if os.path.exists(image_matches_filepath):
            matches.add(i[0], i[1])
    matches.normalize()

    return kapture.Kapture(sensors=sensors,
                           trajectories=trajectories,
                           records_camera=records,
                           descriptors=descriptors,
                           keypoints=keypoints,
                           matches=matches)
示例#11
0
文件: csv.py 项目: zebrajack/kapture
def records_camera_from_file(filepath: str, camera_ids: Optional[Set[str]] = None) -> kapture.RecordsCamera:
    """
    Reads records_camera from CSV file.

    :param filepath: input file path
    :param camera_ids: input set of valid camera device ids.
                        If the records_camera contains unknown devices, they will be ignored.
                        If not given, all cameras are loaded.
    :return: camera records
    """
    records_camera = kapture.RecordsCamera()
    with open(filepath) as file:
        table = table_from_file(file)
        # timestamp, device_id, image_path
        for timestamp, device_id, image_path in table:
            if camera_ids is not None and device_id not in camera_ids:
                # just ignore
                continue
            records_camera[(int(timestamp), str(device_id))] = image_path
    return records_camera
示例#12
0
def merge_records_camera(
    records_camera_list: List[Optional[kapture.RecordsCamera]]
) -> kapture.RecordsCamera:
    """
    Merge several camera records lists. For camera record with the same timestamp and sensor identifier,
     keep only the first one.

    :param records_camera_list: list of camera records
    :return: merged camera records
    """
    assert len(records_camera_list) > 0

    merged_records_camera = kapture.RecordsCamera()
    for records_camera in records_camera_list:
        if records_camera is None:
            continue
        for timestamp, sensor_id, filename in kapture.flatten(records_camera):
            if (timestamp, sensor_id) in merged_records_camera:
                continue
            merged_records_camera[(timestamp, sensor_id)] = filename
    return merged_records_camera
def sub_kapture_from_img_list(kdata, img_list, pairs, keypoints_type, descriptors_type):
    trajectories = kapture.Trajectories()
    sensors = kapture.Sensors()
    records = kapture.RecordsCamera()
    keypoints = kapture.Keypoints(kdata.keypoints[keypoints_type].type_name,
                                  kdata.keypoints[keypoints_type].dtype,
                                  kdata.keypoints[keypoints_type].dsize)
    if kdata.descriptors is not None and descriptors_type in kdata.descriptors:
        descriptors = kapture.Descriptors(kdata.descriptors[descriptors_type].type_name,
                                          kdata.descriptors[descriptors_type].dtype,
                                          kdata.descriptors[descriptors_type].dsize,
                                          kdata.descriptors[descriptors_type].keypoints_type,
                                          kdata.descriptors[descriptors_type].metric_type)
    else:
        descriptors = None
    matches = kapture.Matches()

    timestamp_sensor_id_from_image_name = {img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in
                                           kapture.flatten(kdata.records_camera)}
    for img in img_list:
        timestamp, sensor_id = timestamp_sensor_id_from_image_name[img]
        sensors[sensor_id] = kdata.sensors[sensor_id]
        records[timestamp, sensor_id] = img
        if (timestamp, sensor_id) in kdata.trajectories:
            pose = kdata.trajectories[timestamp][sensor_id]
            trajectories[timestamp, sensor_id] = pose
        keypoints.add(img)
        if kdata.descriptors is not None:
            descriptors.add(img)

    for i in pairs:
        if i in kdata.matches[keypoints_type]:
            matches.add(i[0], i[1])
    matches.normalize()

    return kapture.Kapture(sensors=sensors, trajectories=trajectories, records_camera=records,
                           descriptors={descriptors_type: descriptors},
                           keypoints={keypoints_type: keypoints},
                           matches={keypoints_type: matches})
示例#14
0
def get_images_and_trajectories_from_database(
    database: COLMAPDatabase
) -> Tuple[kapture.RecordsCamera, kapture.Trajectories]:
    """
    Creates records_camera and trajectories from colmap images table
    In trajectories, timestamps are made up from colmap image id.

    :param database: colmap database
    :return: kapture records_camera and trajectories
    """
    logging.info('parsing images ...')
    kapture_images = kapture.RecordsCamera()
    kapture_trajectories = kapture.Trajectories()
    hide_progressbar = logger.getEffectiveLevel() > logging.INFO
    for image_id, name, camera_id, prior_qw, prior_qx, prior_qy, prior_qz, prior_tx, prior_ty, prior_tz \
            in tqdm(database.execute('SELECT image_id, name, camera_id, '
                                     'prior_qw, prior_qx, prior_qy, prior_qz, '
                                     'prior_tx, prior_ty, prior_tz  FROM images;'
                                     ), disable=hide_progressbar):
        # images
        timestamp = int(image_id)
        camera_id = get_camera_kapture_id_from_colmap_id(camera_id)
        kapture_images[timestamp, camera_id] = name
        # trajectories
        prior_q = [prior_qw, prior_qx, prior_qy, prior_qz]
        prior_t = [prior_tx, prior_ty, prior_tz]
        # do not register the pose part if its invalid.
        is_undefined = all(v is None for v in prior_q + prior_t)
        if is_undefined:
            # just ignore
            continue
        prior_pose = kapture.PoseTransform(prior_q, prior_t)
        kapture_trajectories[timestamp, camera_id] = prior_pose

    if len(kapture_trajectories) == 0:
        # if there is no pose at all, just don't bother.
        kapture_trajectories = None

    return kapture_images, kapture_trajectories
示例#15
0
def merge_records_camera(
        records_camera_list: List[Optional[kapture.RecordsCamera]],
        sensor_mappings: List[Dict[str, str]]) -> kapture.RecordsCamera:
    """
    Merge several camera records list into one list with new identifiers for the sensors.

    :param records_camera_list: list of camera records to merge
    :param sensor_mappings: mapping of the sensor identifiers to their new identifiers
    :return: merged camera records
    """
    assert len(records_camera_list) > 0
    assert len(records_camera_list) == len(sensor_mappings)

    merged_records_camera = kapture.RecordsCamera()
    for records_camera, sensor_mapping in zip(records_camera_list,
                                              sensor_mappings):
        if records_camera is None:
            continue
        for timestamp, sensor_id, filename in kapture.flatten(records_camera):
            new_sensor_id = sensor_mapping[sensor_id]
            merged_records_camera[(timestamp, new_sensor_id)] = filename
    return merged_records_camera
def import_robotcar_colmap_location(
        robotcar_path: str, colmap_reconstruction_fullpath: path,
        kapture_path: str, rigs: kapture.Rigs,
        skip_reconstruction: bool) -> kapture.Kapture:
    """
    Import robotcar data for one location from colmap reconstruction
    :param robotcar_path: path to the robotcar top directory
    :param colmap_reconstruction_fullpath: path to the colmap reconstruction directory
    :param kapture_path: path to the kapture top directory
    :param rigs: kapture rigs to modify
    :param skip_reconstruction: if True, will not add the reconstruction
    :return: a kapture object
    """

    # First, import Colmap reconstruction for given location
    kapture_data = import_colmap(
        kapture_dirpath=kapture_path,
        colmap_reconstruction_dirpath=colmap_reconstruction_fullpath,
        colmap_images_dirpath=path.join(robotcar_path, "images"),
        skip_reconstruction=skip_reconstruction,
        images_import_strategy=TransferAction.skip
    )  # since filenames are incorrect

    # Post processing:
    # - use correct names for cameras
    # - model was built with PNG files, but we have JPG
    # - recover proper timestamps
    # - recover rig

    # Fix sensors.txt
    camera_mapping = {
        'cam_00001': 'left',
        'cam_00002': 'rear',
        'cam_00003': 'right'
    }
    new_cameras = kapture.Sensors()
    for cam_id in kapture_data.sensors:
        new_cameras[camera_mapping[cam_id]] = kapture_data.sensors[cam_id]
    kapture_data.sensors = new_cameras

    if not skip_reconstruction:
        # Fix keypoints
        # Need to rename .png.kpt to .jpg.kpt files and that's all
        for root, dirs, files in os.walk(kapture_path):
            for file in files:
                if file.endswith('.png.kpt'):
                    os.rename(
                        path.join(root, file),
                        path.join(root, file.replace(".png.kpt", ".jpg.kpt")))

        # observations.txt: png -> jpg
        new_observations = kapture.Observations()
        for point3d_idx in kapture_data.observations:
            for image_path, keypoint_id in kapture_data.observations[
                    point3d_idx]:
                new_observations.add(point3d_idx,
                                     image_path.replace(".png", ".jpg"),
                                     int(keypoint_id))
        kapture_data.observations = new_observations

    # records_camera.txt
    # timestamps, png->jpg
    new_records_camera = kapture.RecordsCamera()
    records_camera_pattern = re.compile(r'.*/(?P<timestamp>\d+)\.png')
    ts_mapping = {}
    for ts, shot in kapture_data.records_camera.items():
        for cam_id, image_path in shot.items():
            matches = records_camera_pattern.match(image_path)
            if not matches:
                continue
            matches = matches.groupdict()
            timestamp = int(matches['timestamp'])
            ts_mapping[ts] = timestamp
            new_path = image_path.replace(".png", ".jpg")
            new_records_camera[timestamp, camera_mapping[cam_id]] = new_path
    kapture_data.records_camera = new_records_camera

    # trajectories.txt
    new_trajectories = kapture.Trajectories()
    # First recover timestamps and camera names
    for ts, sensor_id in sorted(kapture_data.trajectories.key_pairs()):
        new_trajectories[
            ts_mapping[ts],
            camera_mapping[sensor_id]] = kapture_data.trajectories[ts,
                                                                   sensor_id]

    kapture_data.trajectories = new_trajectories
    kapture_data.rigs = rigs

    return kapture_data
示例#17
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):
            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename
        kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg'  # kapture depth files are not png
        depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).    
    ----
    We use the extr. kinect camera parameters from https://projet.liris.cnrs.fr/voir/activities-dataset/kinect-calibration.html. 
    """
    sensors = kapture.Sensors()
    # camera_type = kapture.CameraType.OPENCV
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02,
    #                  2.5673002693536984e-01, -9.3976085633794137e-01, -1.8605549188751580e-03, -2.2232238578189420e-03]  # w, h, f, cx, cy, k1, k2, p1, p2, k3
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02]  # w, h, fx, fy, cx, cy
    camera_params = [640, 480, 525, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    # depth_camera_type = kapture.CameraType.OPENCV
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02,
    #                        -1.8932947734719333e-01, 1.1358015104098631e+00, -4.4260345347128536e-03, -5.4869578635708153e-03, -2.2460143607712921e+00] # w, h, f, cx, cy, k1, k2, p1, p2, k3
    depth_camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02] # w, h, fx, fy, cx, cy
    depth_camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=depth_camera_params,
        sensor_type='depth'
    )
    sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera(
        name=REG_DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    R = np.array([[9.9996518012567637e-01, 2.6765126468950343e-03, -7.9041012313000904e-03],
                  [-2.7409311281316700e-03, 9.9996302803027592e-01, -8.1504520778013286e-03],
                  [7.8819942130445332e-03, 8.1718328771890631e-03, 9.9993554558014031e-01]])
    T = np.array([-2.5558943178152542e-02, 1.0109636268061706e-04, 2.0318321729487039e-03])
    Rt = np.vstack((np.hstack((R, T.reshape(3, 1))), np.array([0, 0, 0, 1])))
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        if '.reg' in depth_map_filename:
            continue
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)
        # register depth to rgb
        reg_depth_map = register_depth(get_K(depth_camera_type, depth_camera_params), get_K(camera_type, camera_params),
                                       Rt, depth_map, camera_params[0], camera_params[1])
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
示例#18
0
def import_colmap_rig_json(
    rigs_colmap: list,
    images: Optional[kapture.RecordsCamera] = None,
    trajectories: Optional[kapture.Trajectories] = None
) -> Tuple[kapture.Rigs, kapture.RecordsCamera,
           Optional[kapture.Trajectories]]:
    """
    Build a kapture rig from colmap json file.

    :param rigs_colmap: colmap data describing the rig.
    :param images: input/output camera recordings: timestamps are modified to match
    :param trajectories: input/output trajectories: timestamps are modified to match
    :return: rigs, images and trajectories
    """
    rigs_kapture = kapture.Rigs()
    # camera_id (kapture) -> file prefix
    camera_prefixes = {}
    """ rigs_colmap
    [{
        "cameras": [
            {"camera_id": 1, "image_prefix": "leftraw/"},
            {"camera_id": 2, "image_prefix": "rightraw/"}
        ],
        "ref_camera_id": 1
    }]
    """

    for rig_idx_colmap, rig_colmap in enumerate(rigs_colmap):
        rig_id_kapture = f'rig{rig_idx_colmap}'  # make up a rig ID from its idx in colmap.
        for cam_colmap in rig_colmap['cameras']:
            # colmap_cam_id -> kapture_cam_id
            camera_id_colmap = cam_colmap['camera_id']
            camera_id_kapture = get_camera_kapture_id_from_colmap_id(
                camera_id_colmap)
            camera_prefixes[camera_id_kapture] = cam_colmap['image_prefix']
            # colmap does not store rig geometry, but only the fact there is one.
            pose_unknown = kapture.PoseTransform(r=None, t=None)
            rigs_kapture[rig_id_kapture, camera_id_kapture] = pose_unknown

    reconstructed_images = None
    reconstructed_trajectories = None
    if images:
        # image_filepath => (prefix, suffix)
        filepath_to_split_fix = {}
        # if there are images, modify their timestamps to match
        # first pass: gather actual timestamps from suffix
        # camera_suffixes = set()
        for timestamp, camera_id_kapture, image_filepath in kapture.flatten(
                images):
            if camera_id_kapture not in camera_prefixes:
                raise KeyError(
                    'unknown camera_id {}'.format(camera_id_kapture))
            camera_prefix = camera_prefixes[camera_id_kapture]
            if not image_filepath.startswith(camera_prefix):
                raise ValueError('inconsistent camera name')
            filepath_to_split_fix[image_filepath] = (
                image_filepath[0:len(camera_prefix)],
                image_filepath[len(camera_prefix):])

        suffixes = sorted(set(suf
                              for _, suf in filepath_to_split_fix.values()))
        suffix_to_timestamp = {
            suffix: idx
            for idx, suffix in enumerate(suffixes)
        }
        idx_to_timestamp = {
            colmap_idx: suffix_to_timestamp[filepath_to_split_fix[filepath][1]]
            for colmap_idx, _, filepath in kapture.flatten(images)
        }

        # second pass: reconstruct images with timestamp (frame number) instead of colmap idx
        reconstructed_images = kapture.RecordsCamera()
        for colmap_idx, camera_id_kapture, image_filepath in kapture.flatten(
                images):
            timestamp = idx_to_timestamp[colmap_idx]
            reconstructed_images[timestamp, camera_id_kapture] = image_filepath

        # third pass: [optional] reconstruct trajectories
        if trajectories:
            reconstructed_trajectories = kapture.Trajectories()
            for colmap_idx, camera_id_kapture, pose in kapture.flatten(
                    trajectories):
                timestamp = idx_to_timestamp[colmap_idx]
                reconstructed_trajectories[timestamp, camera_id_kapture] = pose

    return rigs_kapture, reconstructed_images, reconstructed_trajectories
示例#19
0
    def test_evaluation(self):
        position = [1.658, 0, 0]
        position_a = [2.658, 0, 0]
        position_b = [1.758, 0, 0]
        position_c = [10.1, 0, 0]
        position_d = [2., 0, 0]
        position_e = [6.658, 0, 0]

        rotation = quaternion.from_euler_angles(np.deg2rad(110.0), 0, 0)
        rotation_a = quaternion.from_euler_angles(np.deg2rad(111.0), 0, 0)
        rotation_b = quaternion.from_euler_angles(np.deg2rad(108.0), 0, 0)
        rotation_c = quaternion.from_euler_angles(np.deg2rad(10.0), 0, 0)
        rotation_d = quaternion.from_euler_angles(np.deg2rad(110.0), 0, 0)

        pose_gt = kapture.PoseTransform(r=rotation, t=position).inverse()
        pose_a = kapture.PoseTransform(r=rotation_a, t=position_a).inverse()
        pose_b = kapture.PoseTransform(r=rotation_b, t=position_b).inverse()
        pose_c = kapture.PoseTransform(r=rotation_c, t=position_c).inverse()
        pose_d = kapture.PoseTransform(r=rotation_d, t=position_d).inverse()
        pose_e = kapture.PoseTransform(r=None, t=[-x for x in position_e])

        kdata = kapture.Kapture(sensors=kapture.Sensors(),
                                records_camera=kapture.RecordsCamera(),
                                trajectories=kapture.Trajectories())
        kdata.sensors['cam0'] = kapture.Camera(
            kapture.CameraType.UNKNOWN_CAMERA, [25, 13])
        kdata.records_camera[(0, 'cam0')] = 'a'
        kdata.records_camera[(1, 'cam0')] = 'b'
        kdata.records_camera[(2, 'cam0')] = 'c'
        kdata.records_camera[(3, 'cam0')] = 'd'
        kdata.records_camera[(4, 'cam0')] = 'e'

        kdata.trajectories[(0, 'cam0')] = pose_a
        kdata.trajectories[(1, 'cam0')] = pose_b
        kdata.trajectories[(2, 'cam0')] = pose_c
        kdata.trajectories[(3, 'cam0')] = pose_d

        kdata2 = copy.deepcopy(kdata)
        kdata2.trajectories[(4, 'cam0')] = pose_e
        kdata2.records_camera[(5, 'cam0')] = 'f'

        kdata_gt = copy.deepcopy(kdata2)
        kdata_gt.trajectories[(0, 'cam0')] = pose_gt
        kdata_gt.trajectories[(1, 'cam0')] = pose_gt
        kdata_gt.trajectories[(2, 'cam0')] = pose_gt
        kdata_gt.trajectories[(3, 'cam0')] = pose_gt
        kdata_gt.trajectories[(4, 'cam0')] = pose_gt
        kdata_gt.trajectories[(5, 'cam0')] = pose_gt

        kdata_list = [kdata, kdata2, kdata_gt]
        intersection = {'a', 'b', 'c', 'd', 'e'}

        result1 = evaluate(kdata, kdata_gt, intersection)
        self.assertEqual(len(result1), 5)
        self.assertEqual(result1[0][0], 'a')
        self.assertAlmostEqual(result1[0][1], 1.0)
        self.assertAlmostEqual(result1[0][2], 1.0)
        self.assertEqual(result1[1][0], 'b')
        self.assertAlmostEqual(result1[1][1], 0.1)
        self.assertAlmostEqual(result1[1][2], 2.0)
        self.assertEqual(result1[2][0], 'c')
        self.assertAlmostEqual(result1[2][1], 8.442)
        self.assertAlmostEqual(result1[2][2], 100.0)
        self.assertEqual(result1[3][0], 'd')
        self.assertAlmostEqual(result1[3][1], 0.342)
        self.assertAlmostEqual(result1[3][2], 0.0)
        self.assertEqual(result1[4][0], 'e')
        self.assertTrue(math.isnan(result1[4][1]))
        self.assertTrue(math.isnan(result1[4][2]))

        result2 = evaluate(kdata2, kdata_gt, intersection)
        self.assertEqual(len(result2), 5)
        self.assertEqual(result2[0][0], 'a')
        self.assertAlmostEqual(result2[0][1], 1.0)
        self.assertAlmostEqual(result2[0][2], 1.0)
        self.assertEqual(result2[1][0], 'b')
        self.assertAlmostEqual(result2[1][1], 0.1)
        self.assertAlmostEqual(result2[1][2], 2.0)
        self.assertEqual(result2[2][0], 'c')
        self.assertAlmostEqual(result2[2][1], 8.442)
        self.assertAlmostEqual(result2[2][2], 100.0)
        self.assertEqual(result2[3][0], 'd')
        self.assertAlmostEqual(result2[3][1], 0.342)
        self.assertAlmostEqual(result2[3][2], 0.0)
        self.assertEqual(result2[4][0], 'e')
        self.assertAlmostEqual(result2[4][1], 5.0)
        self.assertTrue(math.isnan(result2[4][2]))

        bins1 = fill_bins(result1, [(0.9, 5), (10, 105)])
        self.assertEqual(len(bins1), 2)
        self.assertEqual(bins1[0][0], 0.9)
        self.assertEqual(bins1[0][1], 5)
        self.assertEqual(bins1[0][2], 2)
        self.assertEqual(bins1[1][0], 10)
        self.assertEqual(bins1[1][1], 105)
        self.assertEqual(bins1[1][2], 4)

        bins2 = fill_bins(result1, [(0.9, 5), (10, 105)])
        self.assertEqual(len(bins2), 2)
        self.assertEqual(bins2[0][0], 0.9)
        self.assertEqual(bins2[0][1], 5)
        self.assertEqual(bins2[0][2], 2)
        self.assertEqual(bins2[1][0], 10)
        self.assertEqual(bins2[1][1], 105)
        self.assertEqual(bins2[1][2], 4)

        bins3 = fill_bins(result2, [(0.9, math.nan), (10, math.nan)])
        self.assertEqual(len(bins3), 2)
        self.assertEqual(bins3[0][0], 0.9)
        self.assertTrue(math.isnan(bins3[0][1]))
        self.assertEqual(bins3[0][2], 2)
        self.assertEqual(bins3[1][0], 10)
        self.assertTrue(math.isnan(bins3[1][1]))
        self.assertEqual(bins3[1][2], 5)

        bins4 = fill_bins(result2, [(0.9, -1), (10, -1)])
        self.assertEqual(len(bins4), 2)
        self.assertEqual(bins4[0][0], 0.9)
        self.assertEqual(bins4[0][1], -1)
        self.assertEqual(bins4[0][2], 2)
        self.assertEqual(bins4[1][0], 10)
        self.assertEqual(bins4[1][1], -1)
        self.assertEqual(bins4[1][2], 5)
示例#20
0
def import_opensfm(
        opensfm_root_dir: str,
        kapture_root_dir: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.copy) -> None:
    """
    Convert an openSfM structure to a kapture on disk. Also copy, move or link the images files if necessary.

    :param opensfm_root_dir: the openSfM top directory
    :param kapture_root_dir: top directory of kapture created
    :param force_overwrite_existing: if true, will remove existing kapture data without prompting the user
    :param images_import_method: action to apply on images: link, copy, move or do nothing.
    :return: the constructed kapture object
    """
    disable_tqdm = logger.getEffectiveLevel() != logging.INFO
    # load reconstruction
    opensfm_reconstruction_filepath = path.join(opensfm_root_dir,
                                                'reconstruction.json')
    with open(opensfm_reconstruction_filepath, 'rt') as f:
        opensfm_reconstruction = json.load(f)
    # remove the single list @ root
    opensfm_reconstruction = opensfm_reconstruction[0]

    # prepare space for output
    os.makedirs(kapture_root_dir, exist_ok=True)
    delete_existing_kapture_files(kapture_root_dir,
                                  force_erase=force_overwrite_existing)

    # import cameras
    kapture_sensors = kapture.Sensors()
    assert 'cameras' in opensfm_reconstruction
    # import cameras
    for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items(
    ):
        camera = import_camera(osfm_camera, name=osfm_camera_id)
        kapture_sensors[osfm_camera_id] = camera

    # import shots
    logger.info('importing images and trajectories ...')
    kapture_images = kapture.RecordsCamera()
    kapture_trajectories = kapture.Trajectories()
    opensfm_image_dir_path = path.join(opensfm_root_dir, 'images')
    assert 'shots' in opensfm_reconstruction
    image_timestamps, image_sensors = {}, {
    }  # used later to retrieve the timestamp of an image.
    for timestamp, (image_filename, shot) in enumerate(
            opensfm_reconstruction['shots'].items()):
        sensor_id = shot['camera']
        image_timestamps[image_filename] = timestamp
        image_sensors[image_filename] = sensor_id
        # in OpenSfm, (sensor, timestamp) is not unique.
        rotation_vector = shot['rotation']
        q = quaternion.from_rotation_vector(rotation_vector)
        translation = shot['translation']
        # capture_time = shot['capture_time'] # may be invalid
        # gps_position = shot['gps_position']
        kapture_images[timestamp, sensor_id] = image_filename
        kapture_trajectories[timestamp,
                             sensor_id] = kapture.PoseTransform(r=q,
                                                                t=translation)

    # copy image files
    filename_list = [f for _, _, f in kapture.flatten(kapture_images)]
    import_record_data_from_dir_auto(
        source_record_dirpath=opensfm_image_dir_path,
        destination_kapture_dirpath=kapture_root_dir,
        filename_list=filename_list,
        copy_strategy=images_import_method)

    # Imports Gnss
    kapture_gnss = _import_gnss(opensfm_root_dir, kapture_sensors,
                                image_sensors, image_timestamps, disable_tqdm)
    # Imports descriptors, keypoints and matches
    kapture_descriptors, kapture_keypoints, kapture_matches = _import_features_and_matches(
        opensfm_root_dir, kapture_root_dir, disable_tqdm)

    # import 3-D points
    if 'points' in opensfm_reconstruction:
        logger.info('importing points 3-D')
        opensfm_points = opensfm_reconstruction['points']
        points_data = []
        for point_id in sorted(opensfm_points):
            point_data = opensfm_points[point_id]
            point_data = point_data['coordinates'] + point_data['color']
            points_data.append(point_data)
        kapture_points = kapture.Points3d(points_data)
    else:
        kapture_points = None

    # saving kapture csv files
    logger.info('saving kapture files')
    kapture_data = kapture.Kapture(sensors=kapture_sensors,
                                   records_camera=kapture_images,
                                   records_gnss=kapture_gnss,
                                   trajectories=kapture_trajectories,
                                   keypoints=kapture_keypoints,
                                   descriptors=kapture_descriptors,
                                   matches=kapture_matches,
                                   points3d=kapture_points)
    kapture_to_dir(kapture_root_dir, kapture_data)
示例#21
0
    def add_frames(self, frames: List[Frame], points3d: List[Keypoint]):
        k = self.kapture

        if k.records_camera is None:
            k.records_camera = kt.RecordsCamera()
        if k.trajectories is None:
            k.trajectories = kt.Trajectories()
        if k.keypoints is None:
            k.keypoints = {
                self.default_kp_type:
                kt.Keypoints(self.default_kp_type, np.float32, 2)
            }
        if k.points3d is None:
            k.points3d = kt.Points3d()
        if k.observations is None:
            k.observations = kt.Observations()

        def check_kp(kp):
            return not kp.bad_qlt and kp.inlier_count > self.min_pt3d_obs and kp.inlier_count / kp.total_count > self.min_pt3d_ratio

        kp_ids, pts3d = zip(*[(kp.id, kp.pt3d) for kp in points3d
                              if check_kp(kp)])
        I = np.argsort(kp_ids)
        pt3d_ids = dict(zip(np.array(kp_ids)[I], np.arange(len(I))))
        pt3d_arr = np.array(pts3d)[I, :]
        k.points3d = kt.Points3d(
            np.concatenate((pt3d_arr, np.ones_like(pt3d_arr) * 128), axis=1))

        for f in frames:
            if not f.pose.post:
                continue

            id = f.frame_num
            img = f.orig_image
            img_file = os.path.join(self.default_cam[1],
                                    'frame%06d.%s' % (id, self.img_format))
            img_fullpath = get_record_fullpath(self.path, img_file)
            os.makedirs(os.path.dirname(img_fullpath), exist_ok=True)

            if not np.isclose(self.scale, 1.0):
                img = cv2.resize(img,
                                 None,
                                 fx=self.scale,
                                 fy=self.scale,
                                 interpolation=cv2.INTER_AREA)
            if self.img_format == self.IMG_FORMAT_PNG:
                cv2.imwrite(img_fullpath, img,
                            (cv2.IMWRITE_PNG_COMPRESSION, 9))
            elif self.img_format == self.IMG_FORMAT_JPG:
                cv2.imwrite(img_fullpath, img,
                            (cv2.IMWRITE_JPEG_QUALITY, self.jpg_qlt))
            else:
                assert False, 'Invalid image format: %s' % (self.img_format, )

            record_id = (id, self.default_cam[0])
            k.records_camera[record_id] = img_file

            pose = f.pose.post if 1 else (-f.pose.post)
            k.trajectories[record_id] = kt.PoseTransform(
                r=pose.quat.components, t=pose.loc)
            k.keypoints[self.default_kp_type].add(img_file)

            uvs = np.zeros((len(f.kps_uv), 2), np.float32)
            i = 0
            for kp_id, uv in f.kps_uv.items():
                if kp_id in pt3d_ids:
                    k.observations.add(int(pt3d_ids[kp_id]),
                                       self.default_kp_type, img_file, i)
                    uvs[i, :] = uv / f.img_sc * self.scale
                    i += 1

            image_keypoints_to_file(
                get_keypoints_fullpath(self.default_kp_type, self.path,
                                       img_file), uvs[:i, :])
示例#22
0
def import_robotcar_seasons(
        robotcar_path:
    str,  # noqa: C901: function a bit long but not too complex
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip,
        import_feature_db: bool = False,
        skip_reconstruction: bool = False,
        rig_collapse: bool = False,
        use_colmap_intrinsics: bool = False,
        import_v1: bool = False) -> None:
    """
    Read the RobotCar Seasons data, creates several kaptures with training and query data.

    :param robotcar_path: path to the robotcar top directory
    :param kapture_path: path to the kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param import_feature_db: if True, will import the features from the database
    :param skip_reconstruction: if True, will skip the reconstruction part from the training data
    :param rig_collapse: if True, will collapse the rig
    :param use_colmap_intrinsics: if True, will use the colmap intrinsics
    :param import_v1: if True, will use the version 1 of the format
    """

    os.makedirs(kapture_path, exist_ok=True)

    cameras = import_robotcar_cameras(path.join(robotcar_path, 'intrinsics'))
    rigs = import_robotcar_rig(path.join(robotcar_path, 'extrinsics'))

    logger.info("Importing test data")
    # Test data
    image_pattern = re.compile(
        r'(?P<condition>.+)/(?P<camera>\w+)/(?P<timestamp>\d+)\.jpg')
    queries_path = path.join(robotcar_path, '3D-models', 'individual',
                             'queries_per_location')
    kapture_imported_query = {}
    for root, dirs, files in os.walk(queries_path):
        for query_file in files:
            records_camera = kapture.RecordsCamera()
            # Get list of query images
            with open(path.join(queries_path, query_file)) as f:
                for line in f:
                    matches = image_pattern.match(line)
                    image_path = line.strip()
                    if not matches:
                        logger.warning(f"Error matching line in {image_path}")
                        continue
                    matches = matches.groupdict()
                    timestamp = int(matches['timestamp'])
                    camera = str(matches['camera'])
                    # condition = str(matches['condition']) : not used ?
                    records_camera[timestamp, camera] = image_path

                (query_name, _) = query_file.split('.')
                kapture_test = kapture.Kapture(sensors=cameras,
                                               rigs=rigs,
                                               records_camera=records_camera)
                kapture_imported_query[int(
                    query_name.split('_')[-1])] = kapture_test

    # Training data
    logger.info("Importing training data")
    colmap_reconstructions_path = path.join(robotcar_path, '3D-models',
                                            'individual',
                                            'colmap_reconstructions')
    kapture_imported_training = {}
    for root, dirs, files in os.walk(colmap_reconstructions_path):
        for colmap_reconstruction in dirs:
            (loc_id, _) = colmap_reconstruction.split('_')
            kapture_reconstruction_dir = path.join(kapture_path,
                                                   f"{int(loc_id):02d}",
                                                   "mapping")
            delete_existing_kapture_files(kapture_reconstruction_dir,
                                          force_overwrite_existing)
            logger.info(f'Converting reconstruction {loc_id} to kapture  ...')
            kapture_reconstruction_data = import_robotcar_colmap_location(
                robotcar_path,
                path.join(colmap_reconstructions_path, colmap_reconstruction),
                kapture_reconstruction_dir, rigs, skip_reconstruction)
            # replace intrinsics with the ones found in the text files
            if not use_colmap_intrinsics:
                kapture_reconstruction_data.sensors = cameras
            kapture_imported_training[int(
                loc_id)] = kapture_reconstruction_data

    if not import_v1:
        _import_robotcar_v2_train(robotcar_path, kapture_imported_query,
                                  kapture_imported_training, image_pattern)

    # apply rig collapse
    if rig_collapse:
        logger.info('replacing camera poses with rig poses.')
        for kapture_mapping in kapture_imported_training.values():
            kapture.rigs_recover_inplace(kapture_mapping.trajectories, rigs,
                                         ['rear'])

    # IO operations
    robotcar_image_path = path.join(robotcar_path, "images")
    for loc_id, kapture_query in kapture_imported_query.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing test data: {loc_id_str}')
        kapture_test_dir = path.join(kapture_path, loc_id_str, "query")
        delete_existing_kapture_files(kapture_test_dir,
                                      force_overwrite_existing)
        if not kapture_query.records_camera:  # all images were removed
            continue
        kapture_to_dir(kapture_test_dir, kapture_query)
        query_images = [
            f for _, _, f in kapture.flatten(kapture_query.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path, kapture_test_dir,
                                         query_images, images_import_method)

    for loc_id, kapture_mapping in kapture_imported_training.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing mapping data: {loc_id_str}')
        kapture_reconstruction_dir = path.join(kapture_path, f"{loc_id:02d}",
                                               "mapping")
        kapture_to_dir(kapture_reconstruction_dir, kapture_mapping)
        mapping_images = [
            f for _, _, f in kapture.flatten(kapture_mapping.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_reconstruction_dir,
                                         mapping_images, images_import_method)

    if import_feature_db:
        _import_colmap_overcast_reference(robotcar_path, kapture_path,
                                          force_overwrite_existing)
示例#23
0
def import_from_colmap_images_txt(colmap_images_filepath: str,
                                  kapture_dirpath: Optional[str] = None
                                  ) -> Tuple[kapture.RecordsCamera, kapture.Trajectories, Optional[kapture.Keypoints]]:
    """
    Imports RecordsCamera, Trajectories and Keypoints from colmap images.txt

    :param colmap_images_filepath: path to colmap images.txt file
    :param kapture_dirpath: path to kapture root path.
                            If not given (None), keypoints are not created.
    :return: kapture images, trajectories and keypoints
    """

    # colmap images file format is :
    # Image list with two lines of data per image:
    #   IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME
    #   POINTS2D[] as (X, Y, POINT3D_ID)

    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories()
    keypoints = None
    image_names = []  # first pass
    # first pass: IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME
    # to images and trajectories
    with open(colmap_images_filepath, 'r') as colmap_images_file:
        lines = colmap_images_file.readlines()
        lines = (line for line in lines if not line.startswith('#'))  # eliminate comments
        lines = (line for i, line in enumerate(lines) if (i % 2) == 0)  # eliminate even lines
        # split by space and or comma
        lines = (re.findall(colmap_reconstruction_split_pattern, line.rstrip())
                 for line in lines)  # split fields
        # but make sure not to split spaces in file names
        lines = (line[0:9] + [' '.join(line[9:])] for line in lines)
        for fields in lines:
            # IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME
            timestamp = int(fields[0])  # use image ID as timestamp
            q = [float(v) for v in fields[1:5]]
            t = [float(v) for v in fields[5:8]]
            pose = kapture.PoseTransform(q, t)
            camera_id = get_camera_kapture_id_from_colmap_id(int(fields[8]))
            image_name = fields[9]
            images[timestamp, camera_id] = image_name
            trajectories[timestamp, camera_id] = pose
            image_names.append(image_name)

    # second pass: keypoints, observations and points 3d
    if kapture_dirpath is not None:
        # second pass: POINTS2D[] as (X, Y, POINT3D_ID)
        image_names_with_keypoints = set()
        # observations = kapture.Observations()
        with open(colmap_images_filepath, 'r') as colmap_images_file:
            lines = colmap_images_file.readlines()
            lines = (line for line in lines if not line.startswith('#'))  # eliminate comments
            lines = (line for i, line in enumerate(lines) if (i % 2) == 1)  # eliminate odd lines
            # split by space and or comma
            lines = (re.findall(colmap_reconstruction_split_pattern, line.rstrip())
                     for line in lines)  # split fields
            for image_name, fields in zip(image_names, lines):
                image_keypoints_colmap = np.array(fields).reshape((-1, 3))[:, 0:2].astype(np.float32)
                # register as keypoints if there is at least one
                if image_keypoints_colmap.shape[0] > 0:
                    keypoints_filepath = kapture.io.features.get_keypoints_fullpath(kapture_dirpath, image_name)
                    kapture.io.features.image_keypoints_to_file(keypoints_filepath, image_keypoints_colmap)
                    image_names_with_keypoints.add(image_name)
                    # TODO: observations

        if image_names_with_keypoints:
            keypoints = kapture.Keypoints('SIFT', np.float32, 2, image_names_with_keypoints)

    return images, trajectories, keypoints
示例#24
0
def import_image_list(images_list_filenames: List[str],
                      images_dirpath: str,
                      kapture_path: str,
                      force_overwrite_existing: bool = False,
                      images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports the list of images to a kapture. This creates only images and cameras.

    :param images_list_filenames: list of text files containing image file names
    :param images_dirpath: path to images directory.
    :param kapture_path: path to kapture root directory.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    assert isinstance(images_list_filenames, list)
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()

    offset = 0
    logger.info('starting conversion...')
    for images_list_filename in images_list_filenames:
        logger.info(f'loading {images_list_filename}')
        with open(images_list_filename) as file:
            images_list = file.readlines()
            # remove end line char and empty lines
            images_list = [line.rstrip() for line in images_list if line != '\n']

            for i in range(0, len(images_list)):
                line = images_list[i].split()
                image_file_name = line[0]
                if len(line) > 1:
                    model = line[1]
                    model_params = line[2:]
                else:
                    model = kapture.CameraType.UNKNOWN_CAMERA.value
                    try:
                        # lazy open
                        with Image.open(path.join(images_dirpath, image_file_name)) as im:
                            width, height = im.size
                            model_params = [width, height]
                    except (OSError, PIL.UnidentifiedImageError):
                        # It is not a valid image: skip it
                        logger.info(f'Skipping invalid image file {image_file_name}')
                        continue

                camera_id = f'sensor{i + offset}'
                cameras[camera_id] = kapture.Camera(model, model_params)
                images[(i + offset, camera_id)] = image_file_name
            offset += len(images_list)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(images_dirpath, kapture_path, filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
示例#25
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):

            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).
    """
    sensors = kapture.Sensors()
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
示例#26
0
def import_silda(
    silda_dir_path: str,
    destination_kapture_dir_path: str,
    fallback_cam_model: str = 'FOV',
    do_split_cams: bool = False,
    corpus: Optional[str] = None,
    replace_pose_rig: bool = False,
    force_overwrite_existing: bool = False,
    images_import_strategy: TransferAction = TransferAction.link_absolute
) -> None:
    """
    Imports data from silda dataset.

    :param silda_dir_path: path to the silda top directory
    :param destination_kapture_dir_path: input path to kapture directory.
    :param fallback_cam_model: camera model to fallback when necessary
    :param do_split_cams: If true, re-organises and renames the image files to split apart cameras.
    :param corpus: the list of corpus to be imported, among 'mapping', 'query'.
    :param replace_pose_rig: if True, replaces poses of individual cameras with poses of the rig.
    :param force_overwrite_existing: if true, Silently overwrite kapture files if already exists.
    :param images_import_strategy: how to copy image files.
    """

    # sanity check
    silda_dir_path = path_secure(path.abspath(silda_dir_path))
    destination_kapture_dir_path = path_secure(
        path.abspath(destination_kapture_dir_path))
    if TransferAction.root_link == images_import_strategy and do_split_cams:
        raise ValueError(
            'impossible to only link images directory and applying split cam.')
    hide_progress_bars = logger.getEffectiveLevel() >= logging.INFO

    # prepare output directory
    kapture.io.structure.delete_existing_kapture_files(
        destination_kapture_dir_path, force_overwrite_existing)
    os.makedirs(destination_kapture_dir_path, exist_ok=True)

    # images ###########################################################################################################
    logger.info('Processing images ...')
    # silda-images
    #   ...
    #   ├── 1445_0.png
    #   ├── 1445_1.png
    #   ...
    silda_images_root_path = path.join(silda_dir_path, 'silda-images')
    # list all png files (its PNG in silda) using a generator.
    if corpus is not None:
        assert corpus in SILDA_CORPUS_SPLIT_FILENAMES
        # if corpus specified, filter by those which directory name match corpus.
        logger.debug(f'only importing {corpus} part.')
        corpus_file_path = path.join(silda_dir_path,
                                     SILDA_CORPUS_SPLIT_FILENAMES[corpus])
        with open(corpus_file_path, 'rt') as corpus_file:
            corpus_filenames = corpus_file.readlines()
            image_filenames_original = sorted(filename.strip()
                                              for filename in corpus_filenames)
    else:
        image_filenames_original = sorted(
            filename for dir_path, sd, fs in os.walk(silda_images_root_path)
            for filename in fs if filename.endswith('.png'))

    image_filenames_kapture = []
    snapshots = kapture.RecordsCamera()
    image_name_to_ids = {}  # '1445_0.png' -> (1445, 0)
    for image_filename_original in tqdm(image_filenames_original,
                                        disable=hide_progress_bars):
        # retrieve info from image filename
        name_parts_match = SILDA_IMAGE_NAME_PATTERN.match(
            image_filename_original)
        assert name_parts_match is not None
        shot_info: Dict[str, Any]
        shot_info = name_parts_match.groupdict()
        shot_info['timestamp'] = int(
            shot_info['timestamp']
        )  # To avoid warnings about type of the value
        # eg. file_info = {'filename': '1445_0.png', 'timestamp': 1445, 'cam_id': '0'}
        # create a path of the image into NLE dir
        if do_split_cams:
            # re-organise images with subfolders per corpus/camera/timestamp.png
            kapture_image_filename = path.join(
                shot_info['cam_id'],
                '{:04d}.png'.format(shot_info['timestamp']))
        else:
            # keep the original file hierarchy
            kapture_image_filename = image_filename_original

        image_filenames_kapture.append(kapture_image_filename)
        snapshots[shot_info['timestamp'],
                  shot_info['cam_id']] = kapture_image_filename
        image_name_to_ids[shot_info['filename']] = (shot_info['timestamp'],
                                                    shot_info['cam_id'])

    assert len(image_filenames_kapture) == len(image_filenames_original)
    # intrinsics #######################################################################################################
    cameras = _import_cameras(silda_dir_path, snapshots, fallback_cam_model)

    # extrinsics #######################################################################################################
    trajectories = _import_trajectories(silda_dir_path, image_name_to_ids,
                                        hide_progress_bars)

    # rigs
    rigs = _make_rigs(replace_pose_rig, trajectories)

    # pack it all together
    kapture_data = kapture.Kapture(sensors=cameras,
                                   records_camera=snapshots,
                                   trajectories=trajectories,
                                   rigs=rigs)

    logger.info('saving to Kapture  ...')
    kapture.io.csv.kapture_to_dir(destination_kapture_dir_path, kapture_data)

    # finally import images
    if images_import_strategy != TransferAction.skip:
        # importing image files
        logger.info(f'importing {len(image_filenames_original)} images ...')
        assert len(image_filenames_original) == len(image_filenames_kapture)
        image_file_paths_original = [
            path.join(silda_images_root_path, image_filename_kapture)
            for image_filename_kapture in image_filenames_original
        ]
        image_file_paths_kapture = [
            get_image_fullpath(destination_kapture_dir_path,
                               image_filename_kapture)
            for image_filename_kapture in image_filenames_kapture
        ]
        transfer_files_from_dir(image_file_paths_original,
                                image_file_paths_kapture,
                                images_import_strategy)
    logger.info('done.')
def import_bundler(
        bundler_path: str,
        image_list_path: str,
        image_dir_path: str,
        kapture_dir_path: str,
        ignore_trajectories: bool,
        add_reconstruction: bool,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports bundler data and save them as kapture.

    :param bundler_path: path to the bundler model file
    :param image_list_path: path to the file containing the list of image names
    :param image_dir_path: input path to bundler image directory.
    :param kapture_dir_path: path to kapture top directory
    :param ignore_trajectories: if True, will not import the trajectories
    :param add_reconstruction: if True, will create 3D points and observations
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path,
                                  force_erase=force_overwrite_existing)

    logger.info('loading all content...')
    # if there is a filter list, parse it
    with open(image_list_path) as file:
        file_content = file.readlines()
    # remove end line char and empty lines
    image_list = [line.rstrip() for line in file_content if line != '\n']

    with open(bundler_path) as file:
        bundler_content = file.readlines()
    # remove end line char and empty lines
    bundler_content = [
        line.rstrip() for line in bundler_content if line != '\n'
    ]
    assert bundler_content[0] == "# Bundle file v0.3"
    # <num_cameras> <num_points>
    line_1 = bundler_content[1].split()
    number_of_cameras = int(line_1[0])
    number_of_points = int(line_1[1])
    offset = 2
    number_of_lines_per_camera = 5  # 1 camera + 3 rotation + 1 translation

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories() if not ignore_trajectories else None
    points3d = [] if add_reconstruction else None
    keypoints = kapture.Keypoints('sift', np.float32,
                                  2) if add_reconstruction else None
    observations = kapture.Observations() if add_reconstruction else None
    image_mapping = []  # bundler camera_id -> (name, width, height)
    for i in range(0, number_of_cameras):
        start_index = i * number_of_lines_per_camera + offset
        file_name = image_list[i]

        # process camera info
        line_camera = bundler_content[start_index].split()
        focal_length = float(line_camera[0])
        k1 = float(line_camera[1])
        k2 = float(line_camera[2])

        # lazy open
        with Image.open(path.join(image_dir_path, file_name)) as im:
            width, height = im.size

        image_mapping.append((file_name, width, height))
        camera = kapture.Camera(
            MODEL,
            [width, height, focal_length, width / 2, height / 2, k1, k2])
        camera_id = f'sensor{i}'
        cameras[camera_id] = camera

        # process extrinsics
        rotation_matrix = [[float(v) for v in line.split()]
                           for line in bundler_content[start_index +
                                                       1:start_index + 4]]

        quaternion_wxyz = quaternion.from_rotation_matrix(rotation_matrix)
        translation = np.array(
            [float(v) for v in bundler_content[start_index + 4].split()])
        pose = kapture.PoseTransform(quaternion_wxyz, translation)

        # The Bundler model uses a coordinate system that differs from the *computer vision camera
        #  coordinate system*. More specifically, they use the camera coordinate system typically used
        #  in *computer graphics*. In this camera coordinate system, the camera is looking down the
        #  `-z`-axis, with the `x`-axis pointing to the right and the `y`-axis pointing upwards.
        # rotation Pi around the x axis to get the *computer vision camera
        #  coordinate system*
        rotation_around_x = quaternion.quaternion(0.0, 1.0, 0.0, 0.0)
        transformation = kapture.PoseTransform(rotation_around_x,
                                               np.array([0, 0, 0]))

        images[(i, camera_id)] = file_name
        if trajectories is not None:
            # transformation.inverse() is equal to transformation (rotation around -Pi or Pi around X is the same)
            trajectories[(i, camera_id)] = kapture.PoseTransform.compose(
                [transformation, pose, transformation])

    if points3d is not None and number_of_points > 0:
        assert keypoints is not None
        assert observations is not None
        offset += number_of_cameras * number_of_lines_per_camera
        number_of_lines_per_point = 3  # position color viewlist

        # (image_name, bundler_keypoint_id ) -> keypoint_id
        known_keypoints = {}
        local_keypoints = {}

        for i in range(0, number_of_points):
            start_index = i * number_of_lines_per_point + offset
            position = [float(v) for v in bundler_content[start_index].split()]
            # apply transformation
            position = [position[0], -position[1], -position[2]]
            color = [
                float(v) for v in bundler_content[start_index + 1].split()
            ]

            # <view list>: length of the list + [<camera> <key> <x> <y>]
            # x, y origin is the center of the image
            view_list = bundler_content[start_index + 2].split()
            number_of_observations = int(view_list[0])

            for j in range(number_of_observations):
                camera_id = int(view_list[1 + 4 * j + 0])
                keypoint_id = int(view_list[1 + 4 * j + 1])
                x = float(view_list[1 + 4 * j + 2])
                y = float(view_list[1 + 4 * j + 3])

                file_name, width, height = image_mapping[camera_id]
                # put (0,0) in upper left corner
                x += (width / 2)
                y += (height / 2)

                # init local_keypoints if needed
                if file_name not in local_keypoints:
                    local_keypoints[file_name] = []
                # do not add the same keypoint twice
                if (file_name, keypoint_id) not in known_keypoints:
                    # in the kapture format, keypoint id is different. Note that it starts from 0
                    known_keypoints[(file_name, keypoint_id)] = len(
                        local_keypoints[file_name])
                    local_keypoints[file_name].append([x, y])
                keypoint_idx = known_keypoints[(file_name, keypoint_id)]
                observations.add(i, file_name, keypoint_idx)
            points3d.append(position + color)
        points3d = np.array(points3d)

        # finally, convert local_keypoints to np.ndarray and add them to the global keypoints variable
        keypoints = kapture.Keypoints('sift', np.float32, 2)
        for image_filename, keypoints_array in local_keypoints.items():
            keypoints_np_array = np.array(keypoints_array).astype(np.float32)
            keypoints_out_path = kapture.io.features.get_keypoints_fullpath(
                kapture_dir_path, image_filename)
            kapture.io.features.image_keypoints_to_file(
                keypoints_out_path, keypoints_np_array)
            keypoints.add(image_filename)

    if points3d is not None:
        points3d = kapture.Points3d(points3d)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(image_dir_path, kapture_dir_path,
                                     filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras,
                                       records_camera=images,
                                       trajectories=trajectories,
                                       points3d=points3d,
                                       keypoints=keypoints,
                                       observations=observations)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
def import_robotcar_seasons(
        robotcar_path: str,
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip,
        skip_reconstruction: bool = False,
        rig_collapse: bool = False,
        use_colmap_intrinsics: bool = False,
        import_v1: bool = False) -> None:
    """
    Read the RobotCar Seasons data, creates several kaptures with training and query data.
    :param robotcar_path: path to the robotcar top directory
    :param kapture_path: path to the kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param skip_reconstruction: if True, will skip the reconstruction part from the training data
    :param rig_collapse: if True, will collapse the rig
    """

    kapture_path = path.join(kapture_path, "base")
    os.makedirs(kapture_path, exist_ok=True)

    cameras = import_robotcar_cameras(path.join(robotcar_path, 'intrinsics'))
    rigs = import_robotcar_rig(path.join(robotcar_path, 'extrinsics'))

    logger.info("Importing test data")
    # Test data
    image_pattern = re.compile(
        r'(?P<condition>.+)/(?P<camera>\w+)/(?P<timestamp>\d+)\.jpg')
    queries_path = path.join(robotcar_path, '3D-models', 'individual',
                             'queries_per_location')
    kapture_imported_query = {}
    for root, dirs, files in os.walk(queries_path):
        for query_file in files:
            records_camera = kapture.RecordsCamera()
            # Get list of query images
            with open(path.join(queries_path, query_file)) as f:
                for line in f:
                    matches = image_pattern.match(line)
                    image_path = line.strip()
                    if not matches:
                        logger.warning(f"Error matching line in {image_path}")
                        continue
                    matches = matches.groupdict()
                    timestamp = int(matches['timestamp'])
                    camera = str(matches['camera'])
                    condition = str(matches['condition'])
                    records_camera[timestamp, camera] = image_path

                (query_name, _) = query_file.split('.')
                kapture_test = kapture.Kapture(sensors=cameras,
                                               rigs=rigs,
                                               records_camera=records_camera)
                kapture_imported_query[int(
                    query_name.split('_')[-1])] = kapture_test

    # Reference map data
    logger.info("Importing reference map")
    colmap_reconstructions_path = path.join(robotcar_path, '3D-models',
                                            'individual',
                                            'colmap_reconstructions')
    kapture_imported_mapping = {}
    for root, dirs, files in os.walk(colmap_reconstructions_path):
        for colmap_reconstruction in dirs:
            (loc_id, _) = colmap_reconstruction.split('_')
            kapture_reconstruction_dir = path.join(kapture_path,
                                                   f"{int(loc_id):02d}",
                                                   "mapping")
            delete_existing_kapture_files(kapture_reconstruction_dir,
                                          force_erase=force_overwrite_existing)
            logger.info(f'Converting reconstruction {loc_id} to kapture  ...')
            kapture_reconstruction_data = import_robotcar_colmap_location(
                robotcar_path,
                path.join(colmap_reconstructions_path, colmap_reconstruction),
                kapture_reconstruction_dir, rigs, skip_reconstruction)
            # replace intrinsics with the ones found in the text files
            if not use_colmap_intrinsics:
                kapture_reconstruction_data.sensors = cameras
            kapture_imported_mapping[int(loc_id)] = kapture_reconstruction_data

    if not import_v1:
        queries_per_location = {
            image_name: (ts, cam_id, loc_id)
            for loc_id, kdata_test in kapture_imported_query.items() for ts,
            cam_id, image_name in kapture.flatten(kdata_test.records_camera)
        }
        kapture_imported_training = {}  # stores kapture for each submap
        # read robotcar_v2_train.txt
        v2_train_data = read_robotcar_v2_train(robotcar_path)
        for image_name, pose in v2_train_data.items():
            ts, cam_id, loc_id = queries_per_location[image_name]
            assert cam_id == 'rear'
            # create kapture object for submap if it doesn't exist
            if loc_id not in kapture_imported_training:
                kapture_loc_id = kapture.Kapture(sensors=cameras, rigs=rigs)
                kapture_loc_id.records_camera = kapture.RecordsCamera()
                kapture_loc_id.trajectories = kapture.Trajectories()
                kapture_imported_training[loc_id] = kapture_loc_id
            kapture_imported_training[loc_id].records_camera[
                ts, cam_id] = image_name
            kapture_imported_training[loc_id].trajectories[ts, cam_id] = pose
            matches = image_pattern.match(image_name)
            if not matches:
                logger.warning(f"Error matching line in {image_name}")
                continue
            matches = matches.groupdict()
            condition = str(matches['condition'])
            timestamp = str(matches['timestamp'])
            camera = str(matches['camera'])
            # added left and right images in records_camera
            left_image_name = condition + '/' + 'left' + '/' + timestamp + '.jpg'
            right_image_name = condition + '/' + 'right' + '/' + timestamp + '.jpg'
            kapture_imported_training[loc_id].records_camera[
                ts, 'left'] = left_image_name
            kapture_imported_training[loc_id].records_camera[
                ts, 'right'] = right_image_name

            # remove entries from query
            del kapture_imported_query[loc_id].records_camera[ts][cam_id]
            del kapture_imported_query[loc_id].records_camera[ts]['left']
            del kapture_imported_query[loc_id].records_camera[ts]['right']
            del kapture_imported_query[loc_id].records_camera[ts]

        # all remaining query images are kept; reading robotcar_v2_test.txt is not necessary

    # apply rig collapse
    if rig_collapse:
        logger.info('replacing camera poses with rig poses.')
        for kdata_mapping in kapture_imported_mapping.values():
            kapture.rigs_recover_inplace(kdata_mapping.trajectories, rigs,
                                         'rear')
        for kdata_training in kapture_imported_training.values():
            kapture.rigs_recover_inplace(kdata_training.trajectories, rigs,
                                         'rear')

    # IO operations
    robotcar_image_path = path.join(robotcar_path, "images")
    for loc_id, kdata_query in kapture_imported_query.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing test data: {loc_id_str}')
        kapture_test_dir = path.join(kapture_path, loc_id_str, "query")
        delete_existing_kapture_files(kapture_test_dir,
                                      force_erase=force_overwrite_existing)
        if not kdata_query.records_camera:  # all images were removed
            continue
        kapture_to_dir(kapture_test_dir, kdata_query)
        query_images = [
            f for _, _, f in kapture.flatten(kdata_query.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path, kapture_test_dir,
                                         query_images, images_import_method)

    for loc_id, kdata_mapping in kapture_imported_mapping.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing mapping data: {loc_id_str}')
        kapture_reconstruction_dir = path.join(kapture_path, f"{loc_id:02d}",
                                               "mapping")
        delete_existing_kapture_files(kapture_reconstruction_dir,
                                      force_erase=force_overwrite_existing)
        kapture_to_dir(kapture_reconstruction_dir, kdata_mapping)
        mapping_images = [
            f for _, _, f in kapture.flatten(kdata_mapping.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_reconstruction_dir,
                                         mapping_images, images_import_method)

    for loc_id, kdata_training in kapture_imported_training.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing training data: {loc_id_str}')
        kapture_training_dir = path.join(kapture_path, f"{loc_id:02d}",
                                         "training")
        delete_existing_kapture_files(kapture_training_dir,
                                      force_erase=force_overwrite_existing)
        kapture_to_dir(kapture_training_dir, kdata_training)
        mapping_images = [
            f for _, _, f in kapture.flatten(kdata_training.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_training_dir, mapping_images,
                                         images_import_method)
def import_virtual_gallery(input_root_path: str,
                           configuration: str,
                           light_range: List[int],
                           loop_range: List[int],
                           camera_range: List[int],
                           occlusion_range: List[int],
                           as_rig: bool,
                           images_import_method: TransferAction,
                           kapture_path: str,
                           force_overwrite_existing: bool = False) -> None:
    """
    Creates a kapture with a virtual gallery.

    :param input_root_path: root path of virtual gallery
    :param configuration: training, testing or all (both)
    :param light_range: list of lights to include
    :param loop_range: list of training loops to include
    :param camera_range: list of training cameras to include
    :param occlusion_range: list of testing occlusion levels to include
    :param as_rig: in training trajectories, writes the position of the rig instead of individual cameras
    :param kapture_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    """
    # Check for existing files
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_overwrite_existing)

    offset = 0
    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories()
    rigs = kapture.Rigs()

    # Process all training data
    if configuration == "training" or configuration == "all":
        logger.info("Reading training files")
        camera_range_set = set(camera_range)
        training_intrinsics = import_training_intrinsics(input_root_path, light_range, loop_range, camera_range_set)
        training_extrinsics = import_training_extrinsics(input_root_path, light_range, loop_range, camera_range_set)

        convert_training_intrinsics(training_intrinsics, cameras)
        convert_training_extrinsics(offset, training_extrinsics, images, trajectories, as_rig)
        rigs.update(training_rig_config)

        offset += len(training_extrinsics)
    # Process all testing data
    if configuration == "testing" or configuration == "all":
        logger.info("Reading testing files")
        testing_intrinsics = import_testing_intrinsics(input_root_path, light_range, occlusion_range)
        testing_extrinsics = import_testing_extrinsics(input_root_path, light_range, occlusion_range)

        convert_testing_intrinsics(testing_intrinsics, cameras)
        convert_testing_extrinsics(offset, testing_extrinsics, images, trajectories)

        offset += len(testing_extrinsics)

    logger.info("Writing imported data to disk")
    kapture_data = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories, rigs=rigs or None)
    # import images
    image_list = [name for _, _, name in kapture.flatten(kapture_data.records_camera)]
    import_record_data_from_dir_auto(input_root_path, kapture_path, image_list, images_import_method)
    kapture_to_dir(kapture_path, kapture_data)
def compute_distance_pairs(mapping_path: str, query_path: Optional[str],
                           output_path: str, topk: int, block_size: int,
                           min_distance: float, max_distance: float,
                           max_angle: float, keep_rejected: bool):
    """
    compute image pairs from distance, and write the result in a text file
    """
    skip_heavy = [
        kapture.RecordsLidar, kapture.RecordsWifi, kapture.Keypoints,
        kapture.Descriptors, kapture.GlobalFeatures, kapture.Matches,
        kapture.Points3d, kapture.Observations
    ]

    logger.info(f'compute_distance_pairs. loading mapping: {mapping_path}')
    kdata = kapture_from_dir(mapping_path, skip_list=skip_heavy)
    assert kdata.sensors is not None
    assert kdata.records_camera is not None
    assert kdata.trajectories is not None

    if query_path is None or mapping_path == query_path:
        logger.info('computing mapping pairs from distance...')
        kdata_query = None
    else:
        logger.info('computing query pairs from distance...')
        kdata_query = kapture_from_dir(query_path, skip_list=skip_heavy)
        assert kdata_query.sensors is not None
        assert kdata_query.records_camera is not None
        assert kdata_query.trajectories is not None

    os.umask(0o002)
    p = pathlib.Path(output_path)
    os.makedirs(str(p.parent.resolve()), exist_ok=True)

    with open(output_path, 'w') as fid:
        if kdata_query is None:
            kdata_query = kdata
        if kdata_query.rigs is not None:
            assert kdata_query.trajectories is not None  # for ide
            kapture.rigs_remove_inplace(kdata_query.trajectories,
                                        kdata_query.rigs)
        records_camera_list = [
            k for k in sorted(kapture.flatten(kdata_query.records_camera),
                              key=lambda x: x[2])
        ]
        number_of_iteration = math.ceil(len(records_camera_list) / block_size)
        table_to_file(fid, [], header='# query_image, map_image, score')
        for i in tqdm(range(number_of_iteration),
                      disable=logging.getLogger().level >= logging.CRITICAL):
            sliced_records = kapture.RecordsCamera()
            for ts, sensor_id, img_name in records_camera_list[i *
                                                               block_size:(i +
                                                                           1) *
                                                               block_size]:
                if (ts, sensor_id) not in kdata_query.trajectories:
                    continue
                sliced_records[(ts, sensor_id)] = img_name
            kdata_slice_query = kapture.Kapture(
                sensors=kdata_query.sensors,
                records_camera=sliced_records,
                trajectories=kdata_query.trajectories)
            image_pairs = get_pairs_distance(kdata, kdata_slice_query, topk,
                                             min_distance, max_distance,
                                             max_angle, keep_rejected)
            table_to_file(fid, image_pairs)
    logger.info('all done')