예제 #1
0
    def test_keypoints_read_from_files(self):
        images_ids = set(f'cam{cam}/{timestamp:05d}.jpg'
                         for cam in range(2)
                         for timestamp in range(2))
        # make up keypoints files
        keypoints_config_filepath = path.join(self._keypoints_dirpath, 'keypoints.txt')
        os.makedirs(path.dirname(keypoints_config_filepath), exist_ok=True)
        with open(keypoints_config_filepath, 'wt') as f:
            f.write('SIFT, float, 4')
        keypoints_fullpaths = [
            path_secure(path.join(self._keypoints_dirpath, image_id + '.kpt'))
            for image_id in images_ids
        ]
        for keypoints_fullpath in keypoints_fullpaths:
            os.makedirs(path.dirname(keypoints_fullpath), exist_ok=True)
            with open(keypoints_fullpath, 'wt') as f:
                f.write(' ')

        # lock and load
        keypoints = csv.keypoints_from_dir(self._kapture_dirpath, None)

        # check
        self.assertEqual('SIFT', keypoints.type_name)
        self.assertEqual(4, len(keypoints))
        keypoints_filepaths = kapture.io.features.keypoints_to_filepaths(keypoints, self._kapture_dirpath)
        image_filenames_expected = {path_secure(os.path.join(f'cam{ci}', f'{ts:05d}.jpg'))
                                    for ci in [0, 1] for ts in [0, 1]}
        feature_filepaths_expected = {
            path_secure(os.path.join(f'{self._kapture_dirpath}', 'reconstruction',
                                     'keypoints', f'cam{ci}', f'{ts:05d}.jpg.kpt'))
            for ci in [0, 1] for ts in [0, 1]}

        self.assertEqual(image_filenames_expected, set(keypoints_filepaths))
        self.assertEqual(feature_filepaths_expected, set(keypoints_filepaths.values()))
예제 #2
0
def transfer_files_from_dir_link(source_filepath_list: Iterable[str],
                                 destination_filepath_list: Iterable[str],
                                 force_overwrite: bool = False,
                                 do_relative_link: bool = False) -> None:
    """
    Transfer every files by linking given from the source list to destination list.
    The matching between source and kapture files are explicitly given.

    :param source_filepath_list: input list of source files. Uses guess_filepaths to obtains it from filenames.
    :param destination_filepath_list: input list of destination files (in kapture tree).
    :param force_overwrite: if True, overwrite destination file.
    :param do_relative_link: if True, do relative links else absolute links.
    """
    hide_progress_bar = logger.getEffectiveLevel() > logging.INFO
    for src, dst in tqdm(zip(source_filepath_list, destination_filepath_list),
                         disable=hide_progress_bar):
        # make sure we deal absolute full path
        src = path_secure(path.abspath(src))
        dst = path_secure(path.abspath(dst))
        os.makedirs(path.dirname(dst), exist_ok=True)
        if force_overwrite and path.lexists(dst):
            os.remove(dst)
        try:  # on windows, symlink requires some privileges, and may crash if not
            if do_relative_link:
                src = path.relpath(src, path.dirname(dst))
            os.symlink(src, dst)
        except OSError as e:
            logger.critical(
                'unable to create symlink on image directory, due to privilege restrictions.'
            )
            raise e
예제 #3
0
def make_fake_filenames(root_path: str, post_fix=''):
    filenames = [
        path_secure(path.join(dir1, dir2, filename)) for dir1 in ['a', 'b']
        for dir2 in [f'{i:02d}' for i in range(3)]
        for filename in [f'{i:02d}' for i in range(3)]
    ]
    filepaths = [
        path_secure(path.join(root_path, filename + post_fix))
        for filename in filenames
    ]
    for filepath in filepaths:
        os.makedirs(path.dirname(filepath), exist_ok=True)
        with open(filepath, 'w') as f:
            f.write(filepath)
    return filenames
예제 #4
0
    def test_keypoints_read_from_images(self):
        # Create
        images_ids = set(f'cam{cam}/{timestamp:05d}.jpg' for cam in range(2)
                         for timestamp in range(2))
        keypoints_config_filepath = path.join(self._keypoints_dirpath,
                                              'keypoints.txt')
        os.makedirs(path.dirname(keypoints_config_filepath), exist_ok=True)
        with open(keypoints_config_filepath, 'wt') as f:
            f.write('SIFT, float, 4')

        # lock and load
        keypoints = csv.keypoints_from_dir(self._kapture_dirpath, images_ids)

        # check its empty
        self.assertEqual('SIFT', keypoints.type_name)
        self.assertEqual(0, len(keypoints))

        valid = kapture.io.features.keypoints_check_dir(
            keypoints, self._kapture_dirpath)
        self.assertTrue(valid)

        # create actual files
        for images_id in images_ids:
            keypoint_filepath = path.join(self._keypoints_dirpath,
                                          images_id + '.kpt')
            os.makedirs(path.dirname(keypoint_filepath), exist_ok=True)
            with open(keypoint_filepath, 'wt') as f:
                f.write('')

        # lock and load again
        keypoints = csv.keypoints_from_dir(self._kapture_dirpath, images_ids)
        self.assertEqual('SIFT', keypoints.type_name)
        self.assertEqual(4, len(keypoints))

        keypoints_filepaths = kapture.io.features.keypoints_to_filepaths(
            keypoints, self._kapture_dirpath)
        image_filenames_expected = {
            f'cam{ci}/{ts:05d}.jpg'
            for ci in [0, 1] for ts in [0, 1]
        }
        feature_filepaths_expected = {
            path_secure(
                f'{self._kapture_dirpath}/reconstruction/keypoints/cam{ci}/{ts:05d}.jpg.kpt'
            )
            for ci in [0, 1] for ts in [0, 1]
        }

        self.assertEqual(image_filenames_expected, set(keypoints_filepaths))
        self.assertEqual(feature_filepaths_expected,
                         set(keypoints_filepaths.values()))

        valid = kapture.io.features.keypoints_check_dir(
            keypoints, self._kapture_dirpath)
        self.assertTrue(valid)

        # destroy files and check
        os.remove(path.join(self._keypoints_dirpath, 'cam0/00000.jpg.kpt'))
        valid = kapture.io.features.keypoints_check_dir(
            keypoints, self._kapture_dirpath)
        self.assertFalse(valid)
예제 #5
0
def guess_filepaths_from_filenames(
        dirpath: str,
        filenames: Iterable[str]
):
    """ returns a generator that prepend the directory path to the given filenames."""
    return (path_secure(path.join(dirpath, record_filename))
            for record_filename in filenames)
예제 #6
0
    def test_link_rel(self):
        source_filepaths = [
            path_secure(path.join(self._source_dirpath, filename))
            for filename in self._filenames
        ]
        destination_filepaths = [
            kapture.io.records.get_image_fullpath(self._dest_dirpath, filename)
            for filename in self._filenames
        ]
        kapture.io.records.transfer_files_from_dir_link(source_filepaths,
                                                        destination_filepaths,
                                                        do_relative_link=True)

        for destination_filepath, source_filepath in zip(
                destination_filepaths, source_filepaths):
            self.assertTrue(path.islink(destination_filepath))
            self.assertNotEqual(source_filepath,
                                os.readlink(destination_filepath))
            resolved_path = path.normpath(
                path.join(path.dirname(destination_filepath),
                          os.readlink(destination_filepath)))
            self.assertEqual(source_filepath, resolved_path)

        for source_filepath in source_filepaths:
            self.assertTrue(path.isfile(source_filepath))
예제 #7
0
def delete_existing_kapture_files(dirpath: str,
                                  force_erase: bool,
                                  only: Optional[List[type]] = None,
                                  skip: Optional[List[type]] = None):
    """
    Deletes all existing files / directories at dirpath that corresponds to kapture data.
    do not use only and skip at the same time.

    :param dirpath:
    :param force_erase: do not ask user confirmation.
    :param only: can be used to select files / directories to be removed.
    :param skip: can be used to select files / directories to be kept.
    :return:
    """
    assert only is None or isinstance(only, list)
    assert skip is None or isinstance(skip, list)

    dirpath = path_secure(dirpath)
    csv_filepaths = [
        path.join(dirpath, filename)
        for dtype, filename in CSV_FILENAMES.items()
        if (not only and not skip) or (only and dtype in only) or (
            skip and dtype not in skip)
    ]
    features_dirpaths = [
        path.join(dirpath, dirname)
        for dtype, dirname in FEATURES_DATA_DIRNAMES.items()
        if (not only and not skip) or (only and dtype in only) or (
            skip and dtype not in skip)
    ]
    records_dirpaths = [RECORD_DATA_DIRNAME]
    # remove existing_files files (start with deepest/longest paths to avoid to delete files before dirs).
    existing_paths = list(
        reversed(
            sorted({
                pathval
                for pathval in csv_filepaths + features_dirpaths +
                records_dirpaths if path.isfile(pathval) or path.isdir(pathval)
            })))
    # if any
    if existing_paths:
        existing_paths_as_string = ', '.join(f'"{path.relpath(p, dirpath)}"'
                                             for p in existing_paths)
        # ask for permission
        to_delete = (force_erase or (input(
            f'{existing_paths_as_string} already in "{dirpath}".'
            ' Delete ? [y/N]').lower() == 'y'))

        # delete all or quit
        if to_delete:
            getLogger().info('deleting already'
                             f' existing {existing_paths_as_string}')
            for pathval in existing_paths:
                if path.islink(pathval) or path.isfile(pathval):
                    os.remove(pathval)
                else:
                    rmtree(pathval)
        else:
            raise ValueError(f'{existing_paths_as_string} already exist')
예제 #8
0
 def test_get_image_fullpath(self):
     image_name = "my_image.jpg"
     image_path = kapture.io.records.get_image_fullpath(
         self._kapture_path, image_name)
     self.assertTrue(image_path.startswith(path_secure(self._kapture_path)),
                     "Image path is under the kapture path")
     self.assertTrue(image_path.endswith(image_name),
                     "Image path end with the image name")
def import_image_folder(
        images_path: str,
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports the images of a folder to a kapture. This creates only images and cameras.

    :param images_path: path to directory containing the images.
    :param kapture_path: path to kapture root directory.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path,
                                  force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()

    file_list = [
        os.path.relpath(os.path.join(dirpath, filename), images_path)
        for dirpath, dirs, filenames in os.walk(images_path)
        for filename in filenames
    ]
    file_list = sorted(file_list)

    logger.info('starting conversion...')
    for n, filename in enumerate(file_list):
        # test if file is a valid image
        try:
            # lazy load
            with Image.open(path.join(images_path, filename)) as im:
                width, height = im.size
                model_params = [width, height]
        except (OSError, PIL.UnidentifiedImageError):
            # It is not a valid image: skip it
            logger.info(f'Skipping invalid image file {filename}')
            continue

        camera_id = f'sensor{n}'
        images[(n, camera_id)] = path_secure(filename)  # don't forget windows
        cameras[camera_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA,
                                            model_params)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(images_path, kapture_path, filename_list,
                                     images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
예제 #10
0
def get_record_fullpath(
        kapture_dirpath: str = '',
        record_filename: Optional[str] = None) -> str:
    """
    Returns full path to subdirectory containing the binary files of the record type.
            Optionally, can give a the file name.
    :param kapture_dirpath: input path to kapture directory.
    :param record_filename: optional input record filename (eg. image filename).
    :return: the record full path
    """
    feature_filename = record_filename or ''
    return path_secure(path.join(kapture_dirpath, RECORD_DATA_DIRNAME, feature_filename))
예제 #11
0
    def test_copy(self):
        origin_filepaths = (path_secure(
            path.join(self._source_dirpath, filename))
                            for filename in self._filenames)
        expected_filepaths = (kapture.io.records.get_image_fullpath(
            self._dest_dirpath, filename) for filename in self._filenames)

        transfer_files_from_dir_copy(origin_filepaths, expected_filepaths)

        for expected_filepath in expected_filepaths:
            self.assertTrue(path.isfile(expected_filepath))
        for origin_filepath in origin_filepaths:
            self.assertTrue(path.isfile(origin_filepath))
예제 #12
0
def get_features_fullpath(data_type: Any,
                          kapture_dirpath: str = '',
                          image_filename: Optional[str] = None) -> str:
    """
    Returns full path to subdirectory containing the binary files of the given type.
            Optionally, can give a the image file name, and add the feature file name (with proper extension).

    :param data_type:
    :param kapture_dirpath: input path to kapture directory.
    :param image_filename: optional input image filename (id).
    :return: Feature full path
    """
    subdir = FEATURES_DATA_DIRNAMES[data_type]
    feature_filename = image_filename + FEATURE_FILE_EXTENSION[
        data_type] if image_filename else ''
    return path_secure(path.join(kapture_dirpath, subdir, feature_filename))
예제 #13
0
def records_to_filepaths(
        records: Union[kapture.RecordsCamera,
                       kapture.RecordsDepth,
                       kapture.RecordsWifi,
                       kapture.RecordsLidar,
                       kapture.RecordsGnss],
        kapture_dirpath: str
) -> Dict[str, str]:
    """
    Computes filepaths for records.

    :param records: records
    :param kapture_dirpath: top kapture directory path
    :return: records name to records file path dictionary
    """
    return {filename: path_secure(path.join(kapture_dirpath, RECORD_DATA_DIRNAME, filename))
            for _, _, filename in kapture.flatten(records)}
예제 #14
0
def _import_images(input_json, image_action, kapture_images_path,
                   openmvg_images_dir, root_path, device_identifiers,
                   timestamp_for_pose):
    records_camera = kapture.RecordsCamera()
    if input_json.get(VIEWS):
        views = input_json[VIEWS]
        if image_action == TransferAction.root_link:
            # Do a unique images directory link
            # kapture/<records_dir>/openmvg_top_images_directory -> openmvg_root_path
            kapture_records_path = get_image_fullpath(kapture_images_path)
            os.makedirs(kapture_records_path, exist_ok=True)
            os.symlink(root_path,
                       path.join(kapture_records_path, openmvg_images_dir))
        logger.info(f'Importing {len(views)} images')
        # Progress bar only in debug or info level
        if image_action != TransferAction.skip and image_action != TransferAction.root_link \
                and logger.getEffectiveLevel() <= logging.INFO:
            progress_bar = tqdm(total=len(views))
        else:
            progress_bar = None
        for view in views:
            input_data = view[VALUE][PTR_WRAPPER][DATA]
            pose_id = input_data[ID_POSE]
            # All two values should be the same (?)
            if input_data[ID_VIEW]:
                timestamp = input_data[ID_VIEW]
            else:
                timestamp = view[KEY]
            device_id = str(input_data[ID_INTRINSIC]
                            )  # device_id must be a string for kapture
            device_identifiers[pose_id] = device_id
            timestamp_for_pose[pose_id] = timestamp

            kapture_filename = _import_image_file(input_data,
                                                  openmvg_images_dir,
                                                  root_path,
                                                  kapture_images_path,
                                                  image_action)

            progress_bar and progress_bar.update(1)

            key = (timestamp, device_id)  # tuple of int,str
            records_camera[key] = path_secure(kapture_filename)
        progress_bar and progress_bar.close()
    return records_camera
예제 #15
0
def matching_pairs_from_dirpath(
        kapture_dirpath: str) -> Iterable[Tuple[str, str]]:
    """
    Read and build Matches from kapture directory tree.
    """
    matches_dirpath = get_matches_fullpath(None, kapture_dirpath)
    # list all files there is
    # filter only match files (the ones endings with .matches)
    matches_filenames = populate_files_in_dirpath(
        matches_dirpath, FEATURE_FILE_EXTENSION[kapture.Matches])

    # remove the extensions and cut
    matching_pairs = (path_secure(matches_filename)
                      [:-len(FEATURE_FILE_EXTENSION[kapture.Matches])].split(
                          FEATURE_PAIR_PATH_SEPARATOR[kapture.Matches] + '/')
                      for matches_filename in matches_filenames)
    matching_pairs = ((matches[0], matches[1]) for matches in matching_pairs
                      if len(matches) == 2)
    return matching_pairs
예제 #16
0
def openmvg_to_kapture(input_json: Dict[str, Union[str, Dict]],
                       kapture_images_path: str,
                       image_action=TransferAction.skip) -> kapture.Kapture:
    """
    Convert an openMVG structure to a kapture object. Also copy, move or link the images files if necessary.

    :param input_json: the openmvg JSON parsed as a dictionary
    :param kapture_images_path: top directory to create the kapture images tree
    :param image_action: action to apply on images: link, copy, move or do nothing.
    :return: the constructed kapture object
    """

    polymorphic_id_to_value = {}
    root_path: str = ''

    if input_json[ROOT_PATH]:
        root_path = input_json[ROOT_PATH]
    elif image_action == TransferAction.skip:
        logger.warning("No root_path in input file")
    else:  # It is needed to execute an action with the image file
        raise ValueError(
            f"Missing root_path to do image action '{image_action.name}'")
    openmvg_images_dir = path.basename(root_path)

    kapture_cameras = kapture.Sensors()
    if input_json.get(INTRINSICS):
        logger.info(f'Importing intrinsics')
        for sensor in input_json[INTRINSICS]:
            value = sensor[VALUE]
            if POLYMORPHIC_NAME in value:
                # new type name: store it for next instances
                polymorphic_id = value[POLYMORPHIC_ID] & GET_ID_MASK
                polymorphic_id_to_value[polymorphic_id] = value[
                    POLYMORPHIC_NAME]
                logger.debug("New camera_type: " +
                             polymorphic_id_to_value[polymorphic_id])
            else:
                if POLYMORPHIC_ID not in value:
                    raise ValueError(
                        f'{POLYMORPHIC_ID} is missing (intrinsics)')
                polymorphic_id = value[POLYMORPHIC_ID]

            if polymorphic_id not in polymorphic_id_to_value:
                raise ValueError(f'Unknown polymorphic_id {polymorphic_id}')

            camera_model = CameraModel(polymorphic_id_to_value[polymorphic_id])
            camera_data = value[PTR_WRAPPER][DATA]

            if camera_model == CameraModel.pinhole:
                # w, h, f, cx, cy
                camera = kapture.Camera(kapture.CameraType.SIMPLE_PINHOLE, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]),
                    camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1],
                ])
            elif camera_model == CameraModel.pinhole_radial_k1:
                # w, h, f, cx, cy, k
                camera = kapture.Camera(kapture.CameraType.SIMPLE_RADIAL, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]), camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1], camera_data[DISTO_K1][0]
                ])
            elif camera_model == CameraModel.pinhole_radial_k3:
                # w, h, f, cx, cy, k1, k2, k3
                camera = kapture.Camera(kapture.CameraType.RADIAL, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]), camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1], camera_data[DISTO_K3][0],
                    camera_data[DISTO_K3][1]
                ])
                # camera_data["disto_k3"][2] ignored: radial model has two distortion param, while openMVG's has three
            elif camera_model == CameraModel.pinhole_brown_t2:
                # w, h, f, cx, cy, k1, k2, k3, t1, t2
                if float(camera_data[DISTO_T2][2]) != 0:
                    # if k3 not null, use FULL_OPENCV, otherwise OPENCV
                    # w, h, fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
                    value0 = camera_data[VALUE0]
                    disto_t2 = camera_data[DISTO_T2]
                    camera = kapture.Camera(kapture.CameraType.FULL_OPENCV, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[FOCAL_LENGTH], value0[PRINCIPAL_POINT][0],
                        value0[PRINCIPAL_POINT][1], disto_t2[0], disto_t2[1],
                        disto_t2[3], disto_t2[4], disto_t2[2], 0, 0, 0
                    ])
                else:
                    # w, h, fx, fy, cx, cy, k1, k2, p1, p2
                    value0 = camera_data[VALUE0]
                    disto_t2 = camera_data[DISTO_T2]
                    camera = kapture.Camera(kapture.CameraType.OPENCV, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[FOCAL_LENGTH], value0[PRINCIPAL_POINT][0],
                        value0[PRINCIPAL_POINT][1], disto_t2[0], disto_t2[1],
                        disto_t2[3], disto_t2[4]
                    ])
            elif camera_model == CameraModel.fisheye:
                logger.warning(
                    "OpenMVG fisheye models are not compatible with OpenCV."
                    " Using SIMPLE_RADIAL_FISHEYE and forcing distortion to 0")
                # w, h, f, cx, cy, k
                value0 = camera_data[VALUE0]
                camera = kapture.Camera(
                    kapture.CameraType.SIMPLE_RADIAL_FISHEYE, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[PRINCIPAL_POINT][0], value0[PRINCIPAL_POINT][1],
                        0
                    ])
            else:
                raise ValueError(f'Camera model {camera_model} not supported')

            kapture_cameras[str(sensor[KEY])] = camera

    device_identifiers = {int: str}  # Pose id -> device id
    timestamp_for_pose = {int: int}  # Pose id -> timestamp
    records_camera = kapture.RecordsCamera()
    if input_json.get(VIEWS):
        views = input_json[VIEWS]
        if image_action == TransferAction.root_link:
            # Do a unique images directory link
            # kapture/<records_dir>/openmvg_top_images_directory -> openmvg_root_path
            kapture_records_path = get_image_fullpath(kapture_images_path)
            os.makedirs(kapture_records_path, exist_ok=True)
            os.symlink(root_path,
                       path.join(kapture_records_path, openmvg_images_dir))
        logger.info(f'Importing {len(views)} images')
        # Progress bar only in debug or info level
        if image_action != TransferAction.skip and image_action != TransferAction.root_link\
                and logger.getEffectiveLevel() <= logging.INFO:
            progress_bar = tqdm(total=len(views))
        else:
            progress_bar = None
        for view in views:
            input_data = view[VALUE][PTR_WRAPPER][DATA]
            pose_id = input_data[ID_POSE]
            # All two values should be the same (?)
            if input_data[ID_VIEW]:
                timestamp = input_data[ID_VIEW]
            else:
                timestamp = view[KEY]
            device_id = str(input_data[ID_INTRINSIC]
                            )  # device_id must be a string for kapture
            device_identifiers[pose_id] = device_id
            timestamp_for_pose[pose_id] = timestamp
            filename: str
            if input_data.get(LOCAL_PATH):
                filename = path.join(input_data[LOCAL_PATH],
                                     input_data[FILENAME])
            else:
                filename = input_data[FILENAME]
            if root_path:
                src_path = path.join(root_path, filename)
            else:
                src_path = filename

            # Add the common openmvg images directory in front of the filename
            kapture_filename = path.join(openmvg_images_dir, filename)
            if image_action != TransferAction.skip and image_action != TransferAction.root_link:
                dst_path = get_image_fullpath(kapture_images_path,
                                              kapture_filename)
                # Create destination directory if necessary
                dst_dir = path.dirname(dst_path)
                if not path.isdir(dst_dir):
                    os.makedirs(dst_dir, exist_ok=True)
                # Check if already exist
                if path.exists(dst_path):
                    os.unlink(dst_path)
                # Create file or link
                if image_action == TransferAction.copy:
                    shutil.copy2(src_path, dst_path)
                elif image_action == TransferAction.move:
                    shutil.move(src_path, dst_path)
                else:
                    # Individual link
                    if image_action == TransferAction.link_relative:
                        # Compute relative path
                        src_path = path.relpath(src_path, dst_dir)
                    os.symlink(src_path, dst_path)
                    # This might crash on Windows if the user executing this code has no admin privilege
                progress_bar and progress_bar.update(1)

            key = (timestamp, device_id)  # tuple of int,str
            records_camera[key] = path_secure(kapture_filename)
        progress_bar and progress_bar.close()

    trajectories = kapture.Trajectories()
    if input_json.get(EXTRINSICS):
        extrinsics = input_json[EXTRINSICS]
        logger.info(f'Importing {len(extrinsics)} extrinsics -> trajectories')
        for pose in extrinsics:
            pose_id = pose[KEY]
            center = pose[VALUE][CENTER]
            rotation = pose[VALUE][ROTATION]
            kap_translation = -1 * np.matmul(rotation, center)
            kap_pose = kapture.PoseTransform(
                quaternion.from_rotation_matrix(rotation), kap_translation)
            timestamp = timestamp_for_pose.get(pose_id)
            if timestamp is None:
                logger.warning(f'Missing timestamp for extrinsic {pose_id}')
                continue
            device_id = device_identifiers.get(pose_id)
            if device_id is None:
                logger.warning(f'Missing device for extrinsic {pose_id}')
                continue
            trajectories[(timestamp,
                          device_id)] = kap_pose  # tuple of int,str -> 6D pose

    kapture_data = kapture.Kapture(sensors=kapture_cameras,
                                   records_camera=records_camera,
                                   trajectories=trajectories)
    return kapture_data
예제 #17
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):
            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename
        kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg'  # kapture depth files are not png
        depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).    
    ----
    We use the extr. kinect camera parameters from https://projet.liris.cnrs.fr/voir/activities-dataset/kinect-calibration.html. 
    """
    sensors = kapture.Sensors()
    # camera_type = kapture.CameraType.OPENCV
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02,
    #                  2.5673002693536984e-01, -9.3976085633794137e-01, -1.8605549188751580e-03, -2.2232238578189420e-03]  # w, h, f, cx, cy, k1, k2, p1, p2, k3
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02]  # w, h, fx, fy, cx, cy
    camera_params = [640, 480, 525, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    # depth_camera_type = kapture.CameraType.OPENCV
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02,
    #                        -1.8932947734719333e-01, 1.1358015104098631e+00, -4.4260345347128536e-03, -5.4869578635708153e-03, -2.2460143607712921e+00] # w, h, f, cx, cy, k1, k2, p1, p2, k3
    depth_camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02] # w, h, fx, fy, cx, cy
    depth_camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=depth_camera_params,
        sensor_type='depth'
    )
    sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera(
        name=REG_DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    R = np.array([[9.9996518012567637e-01, 2.6765126468950343e-03, -7.9041012313000904e-03],
                  [-2.7409311281316700e-03, 9.9996302803027592e-01, -8.1504520778013286e-03],
                  [7.8819942130445332e-03, 8.1718328771890631e-03, 9.9993554558014031e-01]])
    T = np.array([-2.5558943178152542e-02, 1.0109636268061706e-04, 2.0318321729487039e-03])
    Rt = np.vstack((np.hstack((R, T.reshape(3, 1))), np.array([0, 0, 0, 1])))
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        if '.reg' in depth_map_filename:
            continue
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)
        # register depth to rgb
        reg_depth_map = register_depth(get_K(depth_camera_type, depth_camera_params), get_K(camera_type, camera_params),
                                       Rt, depth_map, camera_params[0], camera_params[1])
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
예제 #18
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):

            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).
    """
    sensors = kapture.Sensors()
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
예제 #19
0
def import_silda(
    silda_dir_path: str,
    destination_kapture_dir_path: str,
    fallback_cam_model: str = 'FOV',
    do_split_cams: bool = False,
    corpus: Optional[str] = None,
    replace_pose_rig: bool = False,
    force_overwrite_existing: bool = False,
    images_import_strategy: TransferAction = TransferAction.link_absolute
) -> None:
    """
    Imports data from silda dataset.

    :param silda_dir_path: path to the silda top directory
    :param destination_kapture_dir_path: input path to kapture directory.
    :param fallback_cam_model: camera model to fallback when necessary
    :param do_split_cams: If true, re-organises and renames the image files to split apart cameras.
    :param corpus: the list of corpus to be imported, among 'mapping', 'query'.
    :param replace_pose_rig: if True, replaces poses of individual cameras with poses of the rig.
    :param force_overwrite_existing: if true, Silently overwrite kapture files if already exists.
    :param images_import_strategy: how to copy image files.
    """

    # sanity check
    silda_dir_path = path_secure(path.abspath(silda_dir_path))
    destination_kapture_dir_path = path_secure(
        path.abspath(destination_kapture_dir_path))
    if TransferAction.root_link == images_import_strategy and do_split_cams:
        raise ValueError(
            'impossible to only link images directory and applying split cam.')
    hide_progress_bars = logger.getEffectiveLevel() >= logging.INFO

    # prepare output directory
    kapture.io.structure.delete_existing_kapture_files(
        destination_kapture_dir_path, force_overwrite_existing)
    os.makedirs(destination_kapture_dir_path, exist_ok=True)

    # images ###########################################################################################################
    logger.info('Processing images ...')
    # silda-images
    #   ...
    #   ├── 1445_0.png
    #   ├── 1445_1.png
    #   ...
    silda_images_root_path = path.join(silda_dir_path, 'silda-images')
    # list all png files (its PNG in silda) using a generator.
    if corpus is not None:
        assert corpus in SILDA_CORPUS_SPLIT_FILENAMES
        # if corpus specified, filter by those which directory name match corpus.
        logger.debug(f'only importing {corpus} part.')
        corpus_file_path = path.join(silda_dir_path,
                                     SILDA_CORPUS_SPLIT_FILENAMES[corpus])
        with open(corpus_file_path, 'rt') as corpus_file:
            corpus_filenames = corpus_file.readlines()
            image_filenames_original = sorted(filename.strip()
                                              for filename in corpus_filenames)
    else:
        image_filenames_original = sorted(
            filename for dir_path, sd, fs in os.walk(silda_images_root_path)
            for filename in fs if filename.endswith('.png'))

    image_filenames_kapture = []
    snapshots = kapture.RecordsCamera()
    image_name_to_ids = {}  # '1445_0.png' -> (1445, 0)
    for image_filename_original in tqdm(image_filenames_original,
                                        disable=hide_progress_bars):
        # retrieve info from image filename
        name_parts_match = SILDA_IMAGE_NAME_PATTERN.match(
            image_filename_original)
        assert name_parts_match is not None
        shot_info: Dict[str, Any]
        shot_info = name_parts_match.groupdict()
        shot_info['timestamp'] = int(
            shot_info['timestamp']
        )  # To avoid warnings about type of the value
        # eg. file_info = {'filename': '1445_0.png', 'timestamp': 1445, 'cam_id': '0'}
        # create a path of the image into NLE dir
        if do_split_cams:
            # re-organise images with subfolders per corpus/camera/timestamp.png
            kapture_image_filename = path.join(
                shot_info['cam_id'],
                '{:04d}.png'.format(shot_info['timestamp']))
        else:
            # keep the original file hierarchy
            kapture_image_filename = image_filename_original

        image_filenames_kapture.append(kapture_image_filename)
        snapshots[shot_info['timestamp'],
                  shot_info['cam_id']] = kapture_image_filename
        image_name_to_ids[shot_info['filename']] = (shot_info['timestamp'],
                                                    shot_info['cam_id'])

    assert len(image_filenames_kapture) == len(image_filenames_original)
    # intrinsics #######################################################################################################
    cameras = _import_cameras(silda_dir_path, snapshots, fallback_cam_model)

    # extrinsics #######################################################################################################
    trajectories = _import_trajectories(silda_dir_path, image_name_to_ids,
                                        hide_progress_bars)

    # rigs
    rigs = _make_rigs(replace_pose_rig, trajectories)

    # pack it all together
    kapture_data = kapture.Kapture(sensors=cameras,
                                   records_camera=snapshots,
                                   trajectories=trajectories,
                                   rigs=rigs)

    logger.info('saving to Kapture  ...')
    kapture.io.csv.kapture_to_dir(destination_kapture_dir_path, kapture_data)

    # finally import images
    if images_import_strategy != TransferAction.skip:
        # importing image files
        logger.info(f'importing {len(image_filenames_original)} images ...')
        assert len(image_filenames_original) == len(image_filenames_kapture)
        image_file_paths_original = [
            path.join(silda_images_root_path, image_filename_kapture)
            for image_filename_kapture in image_filenames_original
        ]
        image_file_paths_kapture = [
            get_image_fullpath(destination_kapture_dir_path,
                               image_filename_kapture)
            for image_filename_kapture in image_filenames_kapture
        ]
        transfer_files_from_dir(image_file_paths_original,
                                image_file_paths_kapture,
                                images_import_strategy)
    logger.info('done.')
예제 #20
0
This files contains IO operations on Record related data.
"""

import os
import os.path as path
from typing import Dict, Optional, Union, Iterable, Tuple
import numpy as np
import kapture
from kapture.utils.paths import path_secure
from kapture.utils.logging import getLogger
from kapture.io.binary import TransferAction, transfer_files_from_dir, array_from_file, array_to_file

logger = getLogger()

# Records files related functions ######################################################################################
RECORD_DATA_DIRNAME = path_secure(path.join('sensors', 'records_data'))


########################################################################################################################
def get_record_fullpath(
        kapture_dirpath: str = '',
        record_filename: Optional[str] = None) -> str:
    """
    Returns full path to subdirectory containing the binary files of the record type.
            Optionally, can give a the file name.
    :param kapture_dirpath: input path to kapture directory.
    :param record_filename: optional input record filename (eg. image filename).
    :return: the record full path
    """
    feature_filename = record_filename or ''
    return path_secure(path.join(kapture_dirpath, RECORD_DATA_DIRNAME, feature_filename))
예제 #21
0
    def test_maupertuis_import_db_only(self):
        kapture_data = import_colmap_database(self._database_filepath, self._temp_dirpath,
                                              no_geometric_filtering=True)

        # check the numbers
        self.assertIsNone(kapture_data.trajectories)
        self.assertIsNone(kapture_data.points3d)
        self.assertIsNone(kapture_data.records_lidar)
        self.assertIsNone(kapture_data.records_wifi)
        self.assertIsNone(kapture_data.records_gnss)
        self.assertEqual(1, len(kapture_data.sensors))
        self.assertEqual(4, len(kapture_data.records_camera))
        self.assertEqual(4, len(kapture_data.keypoints))
        self.assertEqual(4, len(kapture_data.descriptors))
        self.assertEqual(6, len(kapture_data.matches))

        # check camera
        camera = kapture_data.sensors['cam_00001']
        self.assertEqual(kapture.SENSOR_TYPE_CAMERA, camera.sensor_type)
        self.assertEqual(kapture.CameraType.SIMPLE_PINHOLE, camera.camera_type)
        self.assertAlmostEqual(camera.camera_params, [1919.0, 1079.0, 2302.7999999999997, 959.5, 539.5])

        # check snapshots
        snapshots = kapture_data.records_camera
        self.assertTrue(all('cam_00001' in ts for ts in snapshots.values()))
        self.assertEqual(['00.jpg', '01.jpg', '02.jpg', '03.jpg'],
                         [filename for _, _, filename in kapture.flatten(snapshots, True)])

        # check keypoints
        keypoints = kapture_data.keypoints
        self.assertEqual(np.float32, keypoints.dtype)
        self.assertEqual(6, keypoints.dsize)
        self.assertEqual({'00.jpg', '01.jpg', '02.jpg', '03.jpg'}, keypoints)
        keypoints_filepaths_actual = kapture.io.features.keypoints_to_filepaths(keypoints, self._temp_dirpath)
        keypoints_filepaths_expected = {
            f'{i:02d}.jpg': path_secure(f'{self._temp_dirpath}/reconstruction/keypoints/{i:02d}.jpg.kpt')
            for i in [0, 1, 2, 3]}
        self.assertDictEqual(keypoints_filepaths_actual, keypoints_filepaths_expected)
        # check a keypoints file
        image_keypoints_filepaths = sorted(
            kapture.io.features.keypoints_to_filepaths(keypoints, self._temp_dirpath).values())
        image_keypoints = image_keypoints_from_file(image_keypoints_filepaths[0], keypoints.dtype, keypoints.dsize)
        self.assertEqual((6424, 6), image_keypoints.shape)
        self.assertAlmostEqual([1290.908447265625, 4.156360626220703, -1.3475048542022705,
                                1.4732409715652466, -1.4732409715652466, -1.3475048542022705],
                               image_keypoints[0].tolist())

        self.assertAlmostEqual([1381.316650390625, 668.8056640625, 59.981021881103516,
                                46.423213958740234, -46.423213958740234, 59.981021881103516],
                               image_keypoints[-1].tolist())

        # check descriptors
        descriptors = kapture_data.descriptors
        self.assertEqual(np.uint8, descriptors.dtype)
        self.assertEqual(128, descriptors.dsize)
        self.assertEqual({'00.jpg', '01.jpg', '02.jpg', '03.jpg'}, descriptors)
        descriptors_filepaths_actual = kapture.io.features.descriptors_to_filepaths(descriptors, self._temp_dirpath)
        descriptors_filepaths_expected = {
            f'{i:02d}.jpg': path_secure(f'{self._temp_dirpath}/reconstruction/descriptors/{i:02d}.jpg.desc')
            for i in [0, 1, 2, 3]}
        self.assertDictEqual(descriptors_filepaths_actual, descriptors_filepaths_expected)
        # check a descriptors file
        image_descriptors_filepaths = sorted(kapture.io.features.descriptors_to_filepaths(
            descriptors, self._temp_dirpath).values())
        image_descriptors = image_descriptors_from_file(
            image_descriptors_filepaths[0], descriptors.dtype, descriptors.dsize)
        self.assertEqual(image_keypoints.shape[0], image_descriptors.shape[0])

        # check matches
        matches = kapture_data.matches
        self.assertEqual({('01.jpg', '03.jpg'), ('00.jpg', '02.jpg'),
                          ('00.jpg', '03.jpg'), ('02.jpg', '03.jpg'),
                          ('00.jpg', '01.jpg'), ('01.jpg', '02.jpg')},
                         set(matches))
예제 #22
0
    def import_multi_camera(
            self,
            odometry_topic: Optional[str],  # noqa: C901
            image_topics: Union[str, List[str]],
            camera_identifiers: Union[str, List[str]],
            save_all_positions: bool = True,
            find_image_position: bool = True,
            percent: int = 100):
        """
        Import the rosbag data. Save the images on disk.
        The image topics list and camera identifiers list must be matching lists.

        :param odometry_topic: the odometry topic to use to compute the trajectory.
        :param image_topics: image topic(s) to import
        :param camera_identifiers: camera identifier(s) corresponding to the image topic(s)
        :param save_all_positions: save all positions from the odometry topic in the trajectory
        :param find_image_position: find the closest position for the image and add it in the trajectory
        :param percent: percentage of images to keep.
        """

        # The image messages have the following structure:
        #
        # std_msgs/Header header
        # uint32 height
        # uint32 width
        # string encoding
        # uint8 is_bigendian
        # uint32 step
        # uint8[] data
        #

        # Check that we have image topics
        if isinstance(image_topics, str):
            image_topics = [image_topics]
        if image_topics is None or len(image_topics) == 0:
            self.logger.fatal('Please provide image topics')
            raise ValueError('Missing image topic')
        # Check that we have as many camera identifiers as image topics
        if isinstance(camera_identifiers, str):
            camera_identifiers = [camera_identifiers]
        nb_image_topics = len(image_topics)
        if nb_image_topics != len(camera_identifiers):
            self.logger.fatal(
                f'Please provide an equal number of image topics and camera identifiers:'
                f' {len(camera_identifiers)} cameras for {nb_image_topics} image topics.'
            )
            raise ValueError(
                'Unequal number of image topics and camera identifiers')
        topics_to_import = image_topics.copy()
        # Check that we have an odometry topic
        if odometry_topic and len(odometry_topic) == 0:
            odometry_topic = None
        if odometry_topic is None:
            self.logger.info('No odometry topic')
            if find_image_position:
                self.logger.info(
                    'Will not find position for the images without odometry')
                find_image_position = False
        else:
            topics_to_import.append(odometry_topic)

        # Make sure the camera identifiers are defined in the list of sensors
        for camera_id in camera_identifiers:
            if self._sensors.get(camera_id) is None:
                raise ValueError(
                    f'Camera identifier {camera_id} is not defined in {list(self._sensors.keys())}'
                )
        # Create images directories
        self._create_images_directories(camera_identifiers, image_topics)

        with rosbag.Bag(self._rosbag_path) as bag:
            self._check_bag_topics(bag, odometry_topic, image_topics)

            # Read the messages
            num_msgs = 1
            image_number = 1
            self._saved_number = 0
            images_stamp: Dict[Any, rospy.rostime.Time] = {
            }  # images time stamp of type rospy.rostime.Time per topic
            images_buffer: Dict[str, np.ndarray] = {
            }  # the images read as byte array per topic : Dict[str, np.ndarray]
            dir_last_image_time = {
            }  # image directory -> time of the last image written : Dict[str, float]
            count_to_skip = int(100 / percent) if percent < 100 else 1
            self.logger.info(
                f'Reading {self._image_count_per_camera} images per camera')
            if percent < 100:
                self.logger.info(f' Keeping {percent}% of the images')
            if not self.logger.isEnabledFor(logging.DEBUG):
                progress_bar = tqdm(total=self._image_count_per_camera)
            else:
                progress_bar = None
            for topic, msg, t in bag.read_messages(topics=topics_to_import):
                # Consider the data timestamp, not the recording timestamp t
                stamp = msg.header.stamp
                if topic == odometry_topic:
                    self.logger.debug(
                        f"{num_msgs:5d} STAMP='{stamp.secs}.{stamp.nsecs}'"
                        f" ODOM msg.poseX='{msg.pose.pose.position.x}'")
                    self._last_poses[stamp] = msg.pose.pose
                    if save_all_positions:
                        self.poses_info.append(
                            PositionInfo(stamp, msg.pose.pose))
                elif topic in image_topics:
                    self.logger.debug(
                        f"{num_msgs:5d} STAMP='{stamp.secs}.{stamp.nsecs}'"
                        f" {topic} HxW={msg.height}x{msg.width}")
                    images_stamp[topic] = stamp
                    images_buffer[topic] = _extract_img_buf(msg)
                else:
                    self.logger.debug(
                        f"{num_msgs:5d} Ignoring message of topic '{topic}'")
                # If we have all images, save them on disk
                if len(images_stamp) == nb_image_topics:
                    _check_timestamp_delta(images_stamp)
                    pose6d = self._find_pose(
                        stamp) if find_image_position else None
                    if pose6d or not find_image_position:
                        # Save only some images
                        if ((image_number - 1) % count_to_skip) == 0:
                            for image_topic in image_topics:
                                img = images_buffer[image_topic]
                                image_directory = self._image_directory_path[
                                    image_topic]
                                stamp = images_stamp[image_topic]
                                try:
                                    img_name = path_secure(
                                        self._save_image(
                                            img, image_directory, image_number,
                                            stamp))
                                except SystemError:
                                    self.logger.exception(
                                        f"Could not save image number {image_number}"
                                        f" from topic {image_topic}")
                                else:
                                    dir_last_image_time[
                                        image_directory] = stamp.to_sec()
                                    self.images_info.append(
                                        ImageInfo(
                                            img_name, stamp,
                                            self._image_topic_to_cam_id[
                                                image_topic]))
                            if pose6d:
                                self.poses_info.append(
                                    PositionInfo(stamp, pose6d))
                        else:
                            self.logger.debug(
                                f'    Skipping image number {image_number}')
                        image_number += 1
                        images_stamp.clear()
                        images_buffer.clear()
                        progress_bar and progress_bar.update(1)
                    # Else wait until we have the first pose
                num_msgs += 1
            progress_bar and progress_bar.close()
            for image_dir, dir_time in dir_last_image_time.items():
                os.utime(image_dir, (dir_time, dir_time))
            self.logger.info(f'Saved {self._saved_number} images')
예제 #23
0
def import_idl_dataset_cvpr17(idl_dataset_path: str,
                              gt_path: Union[str, None],
                              kapture_path: str,
                              force_overwrite_existing: bool = False,
                              images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Reads the IDL dataset and copy it to a kapture.

    :param idl_dataset_path: path to the IDL dataset
    :param gt_path: ground truth data path
    :param kapture_path: path to the kapture top directory to create
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """

    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories()

    file_list = [os.path.relpath(os.path.join(dirpath, filename), idl_dataset_path)
                 for dirpath, dirs, filenames in os.walk(idl_dataset_path)
                 for filename in filenames]
    file_list = sorted(file_list)

    logger.info('starting conversion...')
    for n, filename in enumerate(file_list):
        # test if file is a valid image
        try:
            # lazy load
            with Image.open(path.join(idl_dataset_path, filename)) as im:
                width, height = im.size
                model_params = [width, height]
        except Exception:
            continue

        camera_id = f'sensor{n}'
        images[(n, camera_id)] = path_secure(filename)  # don't forget windows
        model = kapture.CameraType.UNKNOWN_CAMERA
        if gt_path is not None:
            # replace image extension with .camera
            file_gt_path = os.path.splitext(os.path.join(gt_path, filename))[0] + ".camera"

            if os.path.isfile(file_gt_path):
                with open(file_gt_path) as fin:
                    lines = fin.readlines()
                    lines = (line.rstrip().split() for line in lines)  # split fields
                    lines = list(lines)
                fx = float(lines[0][0])
                cx = float(lines[0][2])
                fy = float(lines[1][1])
                cy = float(lines[1][2])
                width_file = float(lines[8][0])
                height_file = float(lines[8][1])
                assert (width_file == width)
                assert (height_file == height)
                model = kapture.CameraType.PINHOLE
                model_params = [width, height, fx, fy, cx, cy]

                rotation_matrix = [[float(v) for v in line] for line in lines[4:7]]
                rotation = quaternion.from_rotation_matrix(rotation_matrix)
                center_of_projection = [float(v) for v in lines[7]]
                pose = kapture.PoseTransform(rotation, center_of_projection).inverse()
                trajectories[(n, camera_id)] = pose
        cameras[camera_id] = kapture.Camera(model, model_params)

    # if no trajectory were added, no need to create the file
    if not trajectories:
        trajectories = None

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(idl_dataset_path, kapture_path, filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
예제 #24
0
    def import_multi_camera(self, odometry_topic: Optional[str],
                            image_topics: Union[str, List[str]],
                            camera_identifiers: Union[str, List[str]],
                            save_all_positions: bool = True,
                            find_image_position: bool = True,
                            percent: int = 100):
        """
        Import the rosbag data. Save the images on disk.
        The image topics list and camera identifiers list must be matching lists.

        :param odometry_topic: the odometry topic to use to compute the trajectory.
        :param image_topics: image topic(s) to import
        :param camera_identifiers: camera identifier(s) corresponding to the image topic(s)
        :param save_all_positions: save all positions from the odometry topic in the trajectory
        :param find_image_position: find the closest position for the image and add it in the trajectory
        :param percent: percentage of images to keep.
        """

        # The image messages have the following structure:
        #
        # std_msgs/Header header
        # uint32 height
        # uint32 width
        # string encoding
        # uint8 is_bigendian
        # uint32 step
        # uint8[] data
        #

        # Check that we have an odometry topic
        if odometry_topic and len(odometry_topic) == 0:
            odometry_topic = None
        if odometry_topic is None:
            self.logger.info('No odometry topic')
            if find_image_position:
                self.logger.info('Will not find position for the images without odometry')
                find_image_position = False
        # Check that we have image topics
        if isinstance(image_topics, str):
            image_topics = [image_topics]
        if image_topics is None or len(image_topics) == 0:
            self.logger.fatal('Please provide image topics')
            raise ValueError('Missing image topic')
        # Check that we have as many camera identifiers as image topics
        if isinstance(camera_identifiers, str):
            camera_identifiers = [camera_identifiers]
        nb_image_topics = len(image_topics)
        if nb_image_topics != len(camera_identifiers):
            self.logger.fatal(f'Please provide an equal number of image topics and camera identifiers:'
                              f' {len(camera_identifiers)} cameras for {nb_image_topics} image topics.')
            raise ValueError('Unequal number of image topics and camera identifiers')

        # Make sure the camera identifiers are defined in the list of sensors
        for camera_id in camera_identifiers:
            if self._sensors.get(camera_id) is None:
                raise ValueError(f'Camera identifier {camera_id} is not defined in {list(self._sensors.keys())}')
        # Create images directories
        image_directory_path = OrderedDict()  # Should be OrderedDict[str, str]
        image_topic_to_cam_id = dict()
        for (image_topic, camera_id) in zip(image_topics, camera_identifiers):
            image_topic_to_cam_id[image_topic] = camera_id
            self.logger.info(f'images from {image_topic} mapped to camera {camera_id}')
            dir_path = path.join(self._images_full_path, camera_id)
            image_directory_path[image_topic] = dir_path
            os.makedirs(dir_path, exist_ok=True)
        self.logger.info(f'Saving images into {" ".join(dir_path for dir_path in image_directory_path.values())}')
        with rosbag.Bag(self._rosbag_path) as bag:
            bag_info = bag.get_type_and_topic_info()
            all_topics = list(bag_info[1].keys())
            self.logger.debug(f'Topics found {all_topics}')
            # Check topics
            if odometry_topic and odometry_topic not in all_topics:
                raise ValueError(f'Missing topic {odometry_topic} in Rosbag')
            for image_topic in image_topics:
                if image_topic not in all_topics:
                    raise ValueError(f'Missing image topic {image_topic} in Rosbag')
            message_count = bag.get_message_count()
            self.logger.info(f"{message_count} messages in total")
            image_count_per_camera = -1
            for topic in all_topics:
                stats = bag_info[1][topic]
                count = stats[1]
                self.logger.info(f'In topic {topic:30s} {count:#10d} messages of type {stats[0]}')
                # Make sure we have the same number of images for all camera
                if topic in image_topics:
                    if image_count_per_camera >= 0:
                        assert count == image_count_per_camera, "All cameras have the same number of images"
                    image_count_per_camera = count

            # Read the messages
            num_msgs = 1
            image_number = 1
            saved_number = 0
            images_stamp = {}  # images time stamp of type rospy.rostime.Time per topic
            images_buffer = {}  # the images read as byte array per topic : Dict[str, np.ndarray]
            dir_last_image_time = {}  # image directory -> time of the last image written : Dict[str, float]
            count_to_skip = int(100 / percent) if percent < 100 else 1
            self.logger.info(f'Reading {image_count_per_camera} images per camera')
            if percent < 100:
                self.logger.info(f' Keeping {percent}% of the images')
            if not self.logger.isEnabledFor(logging.DEBUG):
                progress_bar = tqdm(total=image_count_per_camera)
            else:
                progress_bar = None
            topics_to_import = image_topics.copy()
            if odometry_topic:
                topics_to_import.append(odometry_topic)
            for topic, msg, t in bag.read_messages(topics=topics_to_import):
                # Consider the data timestamp, not the recording timestamp t
                stamp = msg.header.stamp
                if topic == odometry_topic:
                    self.logger.debug(f"{num_msgs:5d} STAMP='{stamp.secs}.{stamp.nsecs}'"
                                      f" ODOM msg.poseX='{msg.pose.pose.position.x}'")
                    self._last_poses[stamp] = msg.pose.pose
                    if save_all_positions:
                        self.poses_info.append(PositionInfo(stamp, msg.pose.pose))
                elif topic in image_topics:
                    self.logger.debug(f"{num_msgs:5d} STAMP='{stamp.secs}.{stamp.nsecs}'"
                                      f" {topic} HxW={msg.height}x{msg.width}")
                    images_stamp[topic] = stamp
                    # TODO: Better image decoding:
                    #  cv_bridge (depend on OpenCV)
                    #  ros_numpy: no dependency on OpenCV but not natively installed on melodic
                    # The below code is adapted from its Image.py file and will work for types like:
                    # rgb8, rgba8, rgb16, rgba16, bgr8, bgra8, bgr16, bgra16, mono8, mono16
                    # and the bayer formats but not the OpenCV CvMat types.
                    dtype = np.uint8
                    if len(msg.encoding) > 0:
                        if msg.encoding.endswith("8"):
                            dtype = np.uint8
                        elif msg.encoding.endswith("16"):
                            dtype = np.uint16
                        else:
                            raise ValueError(f'Unsupported image encoding {msg.encoding}')
                    nb_channels = 1
                    if msg.step > 0:
                        nb_channels = int(msg.step / msg.width)
                    if nb_channels > 1:
                        img_buf = np.frombuffer(msg.data, dtype=dtype).reshape(msg.height, msg.width, nb_channels)
                    else:
                        img_buf = np.frombuffer(msg.data, dtype=dtype).reshape(msg.height, msg.width)
                    images_buffer[topic] = img_buf
                else:
                    self.logger.debug(f"{num_msgs:5d} Ignoring message of topic '{topic}'")
                # If we have all images, save them on disk
                if len(images_stamp) == nb_image_topics:
                    # The images are taken every 0.033 second
                    # Check all images have the same time stamp modulo epsilon
                    for topic1, stamp1 in images_stamp.items():
                        for topic2, stamp2 in images_stamp.items():
                            delta = (stamp1-stamp2).to_sec()
                            assert (abs(delta) < 0.001), "{topic1} and {topic2} images must have the same timestamp"
                    pose6d = self._find_pose(stamp) if find_image_position else None
                    if pose6d or not find_image_position:
                        # Save only some images
                        if ((image_number-1) % count_to_skip) == 0:
                            for image_topic in image_topics:
                                img = images_buffer[image_topic]
                                image_directory = image_directory_path[image_topic]
                                stamp = images_stamp[image_topic]
                                try:
                                    img_name = path_secure(self._save_image(img, image_directory, image_number, stamp))
                                except SystemError:
                                    self.logger.exception(f"Could not save image number {image_number}"
                                                          f" from topic {image_topic}")
                                else:
                                    dir_last_image_time[image_directory] = stamp.to_sec()
                                    saved_number += 1
                                    self.images_info.append(
                                        ImageInfo(img_name, stamp, image_topic_to_cam_id[image_topic])
                                    )
                            if pose6d:
                                self.poses_info.append(PositionInfo(stamp, pose6d))
                        else:
                            self.logger.debug(f'    Skipping image number {image_number}')
                        image_number += 1
                        images_stamp.clear()
                        images_buffer.clear()
                        progress_bar and progress_bar.update(1)
                    # Else wait until we have the first pose
                num_msgs += 1
            progress_bar and progress_bar.close()
            for image_directory, dir_time in dir_last_image_time.items():
                os.utime(image_directory, (dir_time, dir_time))
            self.logger.info(f'Saved {saved_number} images')
예제 #25
0
def import_12scenes(d12scenes_path: str,
                    kapture_dir_path: str,
                    force_overwrite_existing: bool = False,
                    images_import_method: TransferAction = TransferAction.skip,
                    partition: Optional[str] = None
                    ) -> None:
    """
    Imports RGB-D Dataset 12-Scenes dataset and save them as kapture.

    :param d12scenes_path: path to the 12scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d12images_path = os.path.join(d12scenes_path, 'data')
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d12images_path))
                     for dp, _, fs in os.walk(d12images_path) for fn in fs)

    logger.info('populating 12-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = int(file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d12scenes_path, 'split.txt')
        if not path.isfile(partition_filepath):
            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')

        with open(partition_filepath, 'rt') as file:
            # note from dsac++; the first sequence is used for testing, everything else for training
            d7s_split_exp = r'^sequence(?P<sequence>\d+) \[frames=(?P<count>\d+)\]  \[start=(?P<start_frame>\d+) ;' \
                            r' end=(?P<end_frame>\d+)\]$'
            d7s_split_re = re.compile(d7s_split_exp)
            split_sequences = [re.match(d7s_split_re, line) for line in file.readlines()]
            if len(split_sequences) < 1 or not split_sequences[0]:
                raise ValueError('failed to parse split.txt file')
            test_split = (int(split_sequences[0].group('start_frame')), int(split_sequences[0].group('end_frame')))

            # filter out
            if partition == "query":
                shots = {frame: shot
                         for frame, shot in shots.items()
                         if test_split[0] <= frame <= test_split[1]
                         }
            elif partition == "mapping":
                shots = {frame: shot
                         for frame, shot in shots.items()
                         if frame < test_split[0] or frame > test_split[1]
                         }
            else:
                raise ValueError('invalid partition name')

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 12scenes sequence is valid.')

    # eg. shots['000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename
        kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg'  # kapture depth files are not png
        depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d12images_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        with open(pose_filepath, 'r') as file:
            if 'INF' in file.read():
                timestamp = shot['timestamp']
                image_name = shot['color']
                logger.debug(f'ts={timestamp}, name={image_name}: ignored inf pose')
                continue
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    Read info.txt
    """
    info_filepath = path.join(d12scenes_path, 'info.txt')
    if not path.isfile(info_filepath):
        raise FileNotFoundError(f'info file is missing: {info_filepath}.')

    with open(info_filepath, 'rt') as file:
        info_dict = {}
        for line in file.readlines():
            line_splits = line.rstrip().split(' = ')
            info_dict[line_splits[0]] = line_splits[1]

    sensors = kapture.Sensors()
    camera_type = kapture.CameraType.PINHOLE
    assert 'm_calibrationColorIntrinsic' in info_dict
    assert 'm_colorWidth' in info_dict
    assert 'm_colorHeight' in info_dict
    rgb_intrinsics = [float(v) for v in info_dict['m_calibrationColorIntrinsic'].split(' ')]
    # w, h, fx, fy, cx, cy
    rgb_camera_params = [int(info_dict['m_colorWidth']), int(info_dict['m_colorHeight']),
                         rgb_intrinsics[0], rgb_intrinsics[5], rgb_intrinsics[2], rgb_intrinsics[6]]
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=rgb_camera_params
    )

    assert 'm_calibrationDepthIntrinsic' in info_dict
    assert 'm_depthWidth' in info_dict
    assert 'm_depthHeight' in info_dict
    depth_intrinsics = [float(v) for v in info_dict['m_calibrationDepthIntrinsic'].split(' ')]
    # w, h, fx, fy, cx, cy
    depth_camera_params = [int(info_dict['m_depthWidth']), int(info_dict['m_depthHeight']),
                           depth_intrinsics[0], depth_intrinsics[5], depth_intrinsics[2], depth_intrinsics[6]]
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=depth_camera_params,
        sensor_type='depth'
    )

    sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera(
        name=REG_DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=rgb_camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d12images_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        if '.reg' in depth_map_filename:
            continue
        depth_map_filepath_12scenes = path.join(d12images_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_12scenes))
        # depth maps is in mm in 12scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)
        # register depth to rgb
        reg_depth_map = register_depth(get_K(camera_type, depth_camera_params), get_K(camera_type, rgb_camera_params),
                                       np.eye(4), depth_map, rgb_camera_params[0], rgb_camera_params[1])
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
예제 #26
0
def import_silda(
    silda_dirpath: str,
    destination_kapture_dirpath: str,
    fallback_cam_model: str = 'FOV',
    do_split_cams: bool = False,
    corpus: Optional[str] = None,
    replace_pose_rig: bool = False,
    force_overwrite_existing: bool = False,
    images_import_strategy: TransferAction = TransferAction.link_absolute
) -> None:
    """
    Imports data from silda dataset.

    :param silda_dirpath: path to the silda top directory
    :param destination_kapture_dirpath: input path to kapture directory.
    :param fallback_cam_model: camera model to fallback when necessary
    :param do_split_cams: If true, re-organises and renames the image files to split apart cameras.
    :param corpus: the list of corpus to be imported, among 'mapping', 'query'.
    :param replace_pose_rig: if True, replaces poses of individual cameras with poses of the rig.
    :param force_overwrite_existing: if true, Silently overwrite kapture files if already exists.
    :param images_import_strategy: how to copy image files.
    """

    # sanity check
    silda_dirpath = path_secure(path.abspath(silda_dirpath))
    destination_kapture_dirpath = path_secure(
        path.abspath(destination_kapture_dirpath))
    if TransferAction.root_link == images_import_strategy and do_split_cams:
        raise ValueError(
            'impossible to only link images directory and applying split cam.')
    hide_progress_bars = logger.getEffectiveLevel() >= logging.INFO

    # prepare output directory
    kapture.io.structure.delete_existing_kapture_files(
        destination_kapture_dirpath, force_overwrite_existing)
    os.makedirs(destination_kapture_dirpath, exist_ok=True)

    # images ###########################################################################################################
    logger.info('Processing images ...')
    # silda-images
    #   ...
    #   ├── 1445_0.png
    #   ├── 1445_1.png
    #   ...
    silda_images_root_path = path.join(silda_dirpath, 'silda-images')
    # list all png files (its PNG in silda) using a generator.
    if corpus is not None:
        assert corpus in SILDA_CORPUS_SPLIT_FILENAMES
        # if corpus specified, filter by those which directory name match corpus.
        logger.debug(f'only importing {corpus} part.')
        coprus_filepath = path.join(silda_dirpath,
                                    SILDA_CORPUS_SPLIT_FILENAMES[corpus])
        with open(coprus_filepath, 'rt') as corpus_file:
            corpus_filenames = corpus_file.readlines()
            image_filenames_original = sorted(filename.strip()
                                              for filename in corpus_filenames)
    else:
        image_filenames_original = sorted(
            filename for dirpath, sd, fs in os.walk(silda_images_root_path)
            for filename in fs if filename.endswith('.png'))

    image_filenames_kapture = []
    snapshots = kapture.RecordsCamera()
    image_name_to_ids = {}  # '1445_0.png' -> (1445, 0)
    for image_filename_original in tqdm(image_filenames_original,
                                        disable=hide_progress_bars):
        # retrieve info from image filename
        shot_info = SILDA_IMAGE_NAME_PATTERN.match(image_filename_original)
        assert shot_info is not None
        shot_info = shot_info.groupdict()
        shot_info['timestamp'] = int(
            shot_info['timestamp']
        )  # To avoid warnings about type of the value
        # eg. file_info = {'filename': '1445_0.png', 'timestamp': 1445, 'cam_id': '0'}
        # create a path of the image into NLE dir
        if do_split_cams:
            # re-organise images with subfolders per corpus/camera/timestamp.png
            kapture_image_filename = path.join(
                shot_info['cam_id'],
                '{:04d}.png'.format(shot_info['timestamp']))
        else:
            # keep the original file hierarchy
            kapture_image_filename = image_filename_original

        image_filenames_kapture.append(kapture_image_filename)
        snapshots[shot_info['timestamp'],
                  shot_info['cam_id']] = kapture_image_filename
        image_name_to_ids[shot_info['filename']] = (shot_info['timestamp'],
                                                    shot_info['cam_id'])

    assert len(image_filenames_kapture) == len(image_filenames_original)
    # intrinsics #######################################################################################################
    logger.info('Processing sensors ...')
    cameras = kapture.Sensors()
    # use hard coded intrinsics
    # evaluated using colmap
    # 1 OPENCV_FISHEYE 1024 1024 393.299 394.815 512 512 -0.223483 0.117325 -0.0326138 0.00361082
    #                  fx, fy, cx, cy, omega
    # 1 FOV 1024 1024 300 300 512 512 0.899632
    cam_id_list = sorted(
        set(cam_id for _, cam_id, _ in kapture.flatten(snapshots)))
    for cam_id in cam_id_list:
        # pick a image for that cam id
        random_image_intrinsic = next(
            f'{timestamp}_{cam_id}.intrinsics'  # keep only filename (thats what silda expect)
            for timestamp, cid, filename in kapture.flatten(snapshots)
            if cid == cam_id)
        logger.debug(
            f'camera {cam_id} intrinsics : picking at random: ("{random_image_intrinsic}")'
        )
        intrinsic_filepath = path.join(silda_dirpath, 'camera-intrinsics',
                                       random_image_intrinsic)
        logger.debug(f'loading file: "{intrinsic_filepath}"')
        silda_proj_params = np.loadtxt(intrinsic_filepath)
        # only retrieve principal point from intrinsics,
        # because the rest correspond to a fisheye model not available in colmap.
        principal_point = (silda_proj_params[0:2] *
                           SILDA_IMAGE_SIZE).flatten().tolist()
        projection = fallback_cam_model
        if 'OPENCV_FISHEYE' == projection:
            focal_length = [393.299, 394.815]
            fisheye_coefficients = [
                -0.223483, 0.117325, -0.0326138, 0.00361082
            ]
            #          //    fx, fy, cx, cy, k1, k2, k3, k4
            proj_params = focal_length + principal_point + fisheye_coefficients
        elif 'FOV' == projection:
            # use hard coded intrinsics from Torsten reconstruction, ie. :
            #       217.294036, 217.214703, 512.000000, 507.897400, -0.769113
            focal_length = [217.294036, 217.214703]
            # principal_point = [512.000000, 507.897400]
            omega = [-0.769113]
            #                  fx, fy, cx, cy, omega
            proj_params = focal_length + principal_point + omega
        else:
            raise ValueError(
                'Only accepts OPENCV_FISHEYE, or FOV as projection model.')

        camera = kapture.Camera(projection,
                                SILDA_IMAGE_SIZE.tolist() + proj_params)
        cameras[cam_id] = camera

    # extrinsics #######################################################################################################
    logger.info('Processing trajectories ...')
    trajectories = kapture.Trajectories()
    with open(path.join(silda_dirpath, 'silda-train-poses.txt')) as file:
        lines = file.readlines()
        lines = (line.rstrip().split() for line in lines)
        extrinsics = {
            line[0]: np.array(line[1:8], dtype=np.float)
            for line in lines
        }

    for silda_image_name, pose_params in tqdm(extrinsics.items(),
                                              disable=hide_progress_bars):
        # Silda poses are 7-dim vectors with the rotation quaternion,
        # and the translation vector. The order needs to be:
        # qw,qx,qy,qz,tx,ty,tz
        # The parameters should be described in terms of camera to world transformations
        if silda_image_name not in image_name_to_ids:
            # if this is not referenced: means its part of the corpus to be ignored.
            continue
        pose = kapture.PoseTransform(pose_params[0:4],
                                     pose_params[4:7]).inverse()
        timestamp, cam_id = image_name_to_ids[silda_image_name]
        trajectories[timestamp, cam_id] = pose

    # rigs
    logger.info('Making up a rig ...')
    rigs = kapture.Rigs()
    pose_babord = kapture.PoseTransform(t=[0, 0, 0],
                                        r=quaternion.from_rotation_vector(
                                            [0, -np.pi / 2, 0]))
    pose_tribord = kapture.PoseTransform(t=[0, 0, 0],
                                         r=quaternion.from_rotation_vector(
                                             [0, np.pi / 2, 0]))
    rigs['silda_rig', '0'] = pose_babord
    rigs['silda_rig', '1'] = pose_tribord
    if replace_pose_rig:
        logger.info('replacing camera poses with rig poses.')
        kapture.rigs_recover_inplace(trajectories, rigs)

    # pack it all together
    kapture_data = kapture.Kapture(sensors=cameras,
                                   records_camera=snapshots,
                                   trajectories=trajectories,
                                   rigs=rigs)

    logger.info('saving to Kapture  ...')
    kapture.io.csv.kapture_to_dir(destination_kapture_dirpath, kapture_data)

    # finally import images
    if images_import_strategy != TransferAction.skip:
        # importing image files
        logger.info(f'importing {len(image_filenames_original)} images ...')
        assert len(image_filenames_original) == len(image_filenames_kapture)
        image_filepaths_original = [
            path.join(silda_images_root_path, image_filename_kapture)
            for image_filename_kapture in image_filenames_original
        ]
        image_filepaths_kapture = [
            get_image_fullpath(destination_kapture_dirpath,
                               image_filename_kapture)
            for image_filename_kapture in image_filenames_kapture
        ]
        transfer_files_from_dir(image_filepaths_original,
                                image_filepaths_kapture,
                                images_import_strategy)
    logger.info('done.')