def compute_observations_pairs(mapping_path: str,
                               query_path: Optional[str],
                               output_path: str,
                               topk: int,
                               keypoints_type: Optional[str],
                               iou: bool,
                               max_number_of_threads: Optional[int] = None):
    """
    compute image pairs from observations, and write the result in a text file
    """
    skip_heavy_features = [
        kapture.Descriptors, kapture.GlobalFeatures, kapture.Matches
    ]
    skip_heavy = [kapture.RecordsLidar, kapture.RecordsWifi
                  ] + skip_heavy_features

    logger.info(f'compute_observations_pairs. loading mapping: {mapping_path}')
    # the content of the keypoints is not important, we do not need to keep a reference to the tar
    with get_all_tar_handlers(mapping_path,
                              skip_list=skip_heavy_features) as tar_handlers:
        kdata = kapture_from_dir(mapping_path,
                                 skip_list=skip_heavy,
                                 tar_handlers=tar_handlers)
    assert kdata.sensors is not None
    assert kdata.records_camera is not None
    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(kdata.keypoints)
    assert keypoints_type is not None
    assert kdata.observations is not None
    assert kdata.keypoints is not None
    assert keypoints_type in kdata.keypoints
    assert kdata.points3d is not None

    if query_path is None or mapping_path == query_path:
        logger.info('computing mapping pairs from observations...')
        kdata_query = None
    else:
        logger.info('computing query pairs from observations...')
        with get_all_tar_handlers(
                query_path,
                skip_list=skip_heavy_features) as query_tar_handlers:
            kdata_query = kapture_from_dir(query_path,
                                           skip_list=skip_heavy,
                                           tar_handlers=query_tar_handlers)
        assert kdata_query.sensors is not None
        assert kdata_query.records_camera is not None

    os.umask(0o002)
    p = pathlib.Path(output_path)
    os.makedirs(str(p.parent.resolve()), exist_ok=True)

    with open(output_path, 'w') as fid:
        image_pairs = get_pairs_observations(kdata, kdata_query,
                                             keypoints_type,
                                             max_number_of_threads, iou, topk)
        table_to_file(fid,
                      image_pairs,
                      header='# query_image, map_image, score')
    logger.info('all done')
예제 #2
0
def colmap_localize_from_loaded_data(kapture_data: kapture.Kapture,
                                     kapture_path: str,
                                     tar_handlers: Optional[TarCollection],
                                     colmap_path: str,
                                     input_database_path: str,
                                     input_reconstruction_path: str,
                                     colmap_binary: str,
                                     keypoints_type: Optional[str],
                                     use_colmap_matches_importer: bool,
                                     image_registrator_options: List[str],
                                     skip_list: List[str],
                                     force: bool) -> None:
    """
    Localize images on a colmap model with the kapture data.

    :param kapture_data: kapture data to use
    :param kapture_path: path to the kapture to use
    :param tar_handler: collection of preloaded tar archives
    :param colmap_path: path to the colmap build
    :param input_database_path: path to the map colmap.db
    :param input_database_path: path to the map colmap.db
    :param input_reconstruction_path: path to the map reconstruction folder
    :param colmap_binary: path to the colmap binary executable
    :param keypoints_type: type of keypoints, name of the keypoints subfolder
    :param use_colmap_matches_importer: bool,
    :param image_registrator_options: options for the image registrator
    :param skip_list: list of steps to skip
    :param force: Silently overwrite kapture files if already exists.
    """
    os.makedirs(colmap_path, exist_ok=True)

    if not (kapture_data.records_camera and kapture_data.sensors and kapture_data.keypoints and kapture_data.matches):
        raise ValueError('records_camera, sensors, keypoints, matches are mandatory')

    if kapture_data.trajectories:
        logger.warning("Input data contains trajectories: they will be ignored")
        kapture_data.trajectories.clear()
    else:
        kapture_data.trajectories = kapture.Trajectories()

    # COLMAP does not fully support rigs.
    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    # Prepare output
    # Set fixed name for COLMAP database
    colmap_db_path = path.join(colmap_path, 'colmap.db')
    image_list_path = path.join(colmap_path, 'images.list')
    reconstruction_path = path.join(colmap_path, "reconstruction")
    if 'delete_existing' not in skip_list:
        safe_remove_file(colmap_db_path, force)
        safe_remove_file(image_list_path, force)
        safe_remove_any_path(reconstruction_path, force)
    os.makedirs(reconstruction_path, exist_ok=True)

    # Copy colmap db to output
    if not os.path.exists(colmap_db_path):
        shutil.copy(input_database_path, colmap_db_path)

    # find correspondences between the colmap db and the kapture data
    images_all = {image_path: (ts, cam_id)
                  for ts, shot in kapture_data.records_camera.items()
                  for cam_id, image_path in shot.items()}

    colmap_db = COLMAPDatabase.connect(colmap_db_path)
    colmap_image_ids = database_extra.get_colmap_image_ids_from_db(colmap_db)
    colmap_images = database_extra.get_images_from_database(colmap_db)
    colmap_db.close()

    # dict ( kapture_camera -> colmap_camera_id )
    colmap_camera_ids = {images_all[image_path][1]: colmap_cam_id
                         for image_path, colmap_cam_id in colmap_images if image_path in images_all}

    images_to_add = {image_path: value
                     for image_path, value in images_all.items()
                     if image_path not in colmap_image_ids}

    flatten_images_to_add = [(ts, kapture_cam_id, image_path)
                             for image_path, (ts, kapture_cam_id) in images_to_add.items()]

    if 'import_to_db' not in skip_list:
        logger.info("Step 1: Add precomputed keypoints and matches to colmap db")

        if keypoints_type is None:
            keypoints_type = try_get_only_key_from_collection(kapture_data.keypoints)
        assert keypoints_type is not None
        assert keypoints_type in kapture_data.keypoints
        assert keypoints_type in kapture_data.matches

        cameras_to_add = kapture.Sensors()
        for _, (_, kapture_cam_id) in images_to_add.items():
            if kapture_cam_id not in colmap_camera_ids:
                kapture_cam = kapture_data.sensors[kapture_cam_id]
                cameras_to_add[kapture_cam_id] = kapture_cam
        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        colmap_added_camera_ids = database_extra.add_cameras_to_database(cameras_to_add, colmap_db)
        colmap_camera_ids.update(colmap_added_camera_ids)

        colmap_added_image_ids = database_extra.add_images_to_database_from_flatten(
            colmap_db, flatten_images_to_add, kapture_data.trajectories, colmap_camera_ids)
        colmap_image_ids.update(colmap_added_image_ids)

        colmap_image_ids_reversed = {v: k for k, v in colmap_image_ids.items()}  # colmap_id : name

        # add new features
        colmap_keypoints = database_extra.get_keypoints_set_from_database(colmap_db, colmap_image_ids_reversed)

        keypoints_all = kapture_data.keypoints[keypoints_type]
        keypoints_to_add = {name for name in keypoints_all if name not in colmap_keypoints}
        keypoints_to_add = kapture.Keypoints(keypoints_all.type_name, keypoints_all.dtype, keypoints_all.dsize,
                                             keypoints_to_add)
        database_extra.add_keypoints_to_database(colmap_db, keypoints_to_add,
                                                 keypoints_type, kapture_path,
                                                 tar_handlers,
                                                 colmap_image_ids)

        # add new matches
        colmap_matches = kapture.Matches(database_extra.get_matches_set_from_database(colmap_db,
                                                                                      colmap_image_ids_reversed))
        colmap_matches.normalize()

        matches_all = kapture_data.matches[keypoints_type]
        matches_to_add = kapture.Matches({pair for pair in matches_all if pair not in colmap_matches})
        # print(list(matches_to_add))
        database_extra.add_matches_to_database(colmap_db, matches_to_add,
                                               keypoints_type, kapture_path,
                                               tar_handlers,
                                               colmap_image_ids,
                                               export_two_view_geometry=not use_colmap_matches_importer)
        colmap_db.close()

    if use_colmap_matches_importer:
        logger.info('Step 2: Run geometric verification')
        logger.debug('running colmap matches_importer...')

        if keypoints_type is None:
            keypoints_type = try_get_only_key_from_collection(kapture_data.matches)
        assert keypoints_type is not None
        assert keypoints_type in kapture_data.matches

        # compute two view geometry
        colmap_lib.run_matches_importer_from_kapture_matches(
            colmap_binary,
            colmap_use_cpu=True,
            colmap_gpu_index=None,
            colmap_db_path=colmap_db_path,
            kapture_matches=kapture_data.matches[keypoints_type],
            force=force)
    else:
        logger.info('Step 2: Run geometric verification - skipped')
    if 'image_registrator' not in skip_list:
        logger.info("Step 3: Run image_registrator")
        # run image_registrator
        colmap_lib.run_image_registrator(
            colmap_binary,
            colmap_db_path,
            input_reconstruction_path,
            reconstruction_path,
            image_registrator_options
        )

    # run model_converter
    if 'model_converter' not in skip_list:
        logger.info("Step 4: Export reconstruction results to txt")
        colmap_lib.run_model_converter(
            colmap_binary,
            reconstruction_path,
            reconstruction_path
        )
def run_colmap_gv_from_loaded_data(kapture_none_matches: kapture.Kapture,
                                   kapture_colmap_matches: kapture.Kapture,
                                   kapture_none_matches_dirpath: str,
                                   kapture_colmap_matches_dirpath: str,
                                   tar_handlers_none_matches: Optional[TarCollection],
                                   tar_handlers_colmap_matches: Optional[TarCollection],
                                   colmap_binary: str,
                                   keypoints_type: Optional[str],
                                   skip_list: List[str],
                                   force: bool):
    logger.info('run_colmap_gv...')
    if not (kapture_none_matches.records_camera and kapture_none_matches.sensors and
            kapture_none_matches.keypoints and kapture_none_matches.matches):
        raise ValueError('records_camera, sensors, keypoints, matches are mandatory')

    # COLMAP does not fully support rigs.
    if kapture_none_matches.rigs is not None and kapture_none_matches.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_none_matches.trajectories, kapture_none_matches.rigs)

    # Set fixed name for COLMAP database
    colmap_db_path = os.path.join(kapture_colmap_matches_dirpath, 'colmap.db')
    if 'delete_existing' not in skip_list:
        safe_remove_file(colmap_db_path, force)

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(kapture_none_matches.matches)
    assert keypoints_type is not None
    assert keypoints_type in kapture_none_matches.keypoints
    assert keypoints_type in kapture_none_matches.matches

    if 'matches_importer' not in skip_list:
        logger.debug('compute matches difference.')
        if kapture_colmap_matches.matches is not None and keypoints_type in kapture_colmap_matches.matches:
            colmap_matches = kapture_colmap_matches.matches[keypoints_type]
        else:
            colmap_matches = kapture.Matches()
        matches_to_verify = kapture.Matches(kapture_none_matches.matches[keypoints_type].difference(colmap_matches))
        kapture_data_to_export = kapture.Kapture(sensors=kapture_none_matches.sensors,
                                                 trajectories=kapture_none_matches.trajectories,
                                                 records_camera=kapture_none_matches.records_camera,
                                                 keypoints={
                                                     keypoints_type: kapture_none_matches.keypoints[keypoints_type]
                                                 },
                                                 matches={
                                                     keypoints_type: matches_to_verify
                                                 })
        # creates a new database with matches
        logger.debug('export matches difference to db.')
        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        database_extra.kapture_to_colmap(kapture_data_to_export, kapture_none_matches_dirpath,
                                         tar_handlers_none_matches,
                                         colmap_db,
                                         keypoints_type,
                                         None,
                                         export_two_view_geometry=False)
        # close db before running colmap processes in order to avoid locks
        colmap_db.close()

        logger.debug('run matches_importer command.')
        colmap_lib.run_matches_importer_from_kapture_matches(
            colmap_binary,
            colmap_use_cpu=True,
            colmap_gpu_index=None,
            colmap_db_path=colmap_db_path,
            kapture_matches=matches_to_verify,
            force=force
        )

    if 'import' not in skip_list:
        logger.debug('import verified matches.')
        os.umask(0o002)
        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        kapture_data = kapture.Kapture()
        kapture_data.records_camera, _ = get_images_and_trajectories_from_database(colmap_db)
        kapture_data.matches = {
            keypoints_type: get_matches_from_database(colmap_db, kapture_data.records_camera,
                                                      kapture_colmap_matches_dirpath,
                                                      tar_handlers_colmap_matches,
                                                      keypoints_type,
                                                      no_geometric_filtering=False)
        }
        colmap_db.close()

        if kapture_colmap_matches.matches is None:
            kapture_colmap_matches.matches = {}
        if keypoints_type not in kapture_colmap_matches.matches:
            kapture_colmap_matches.matches[keypoints_type] = kapture.Matches()
        kapture_colmap_matches.matches[keypoints_type].update(kapture_data.matches[keypoints_type])

    if 'delete_db' not in skip_list:
        logger.debug('delete intermediate colmap db.')
        os.remove(colmap_db_path)
예제 #4
0
def pyransaclib_localize_from_loaded_data(
        kapture_data: kapture.Kapture, kapture_path: str,
        tar_handlers: TarCollection, kapture_query_data: kapture.Kapture,
        output_path: str, pairsfile_path: str, inlier_threshold: float,
        number_lo_steps: int, min_num_iterations: int, max_num_iterations: int,
        refine_poses: bool, keypoints_type: Optional[str],
        duplicate_strategy: DuplicateCorrespondencesStrategy,
        rerank_strategy: RerankCorrespondencesStrategy,
        write_detailed_report: bool, force: bool) -> None:
    """
    Localize images using pyransaclib.

    :param kapture_data: loaded kapture data (incl. points3d)
    :param kapture_path: path to the kapture to use
    :param tar_handlers: collection of pre-opened tar archives
    :param kapture_data: loaded kapture data (mapping and query images)
    :param output_path: path to the write the localization results
    :param pairsfile_path: pairs to use
    :param inlier_threshold: RANSAC inlier threshold in pixel
    :param number_lo_steps: number of local optimization iterations in LO-MSAC. Use 0 to use MSAC
    :param min_num_iterations: minimum number of ransac loops
    :param max_num_iterations: maximum number of ransac loops
    :param refine_poses: refine poses with pycolmap
    :param keypoints_type: types of keypoints (and observations) to use
    :param force: Silently overwrite kapture files if already exists.
    """
    assert has_pyransaclib
    if refine_poses:
        assert has_pycolmap
    if not (kapture_data.records_camera and kapture_data.sensors
            and kapture_data.keypoints and kapture_data.matches
            and kapture_data.points3d and kapture_data.observations):
        raise ValueError('records_camera, sensors, keypoints, matches, '
                         'points3d, observations are mandatory for map+query')

    if not (kapture_query_data.records_camera and kapture_query_data.sensors):
        raise ValueError('records_camera, sensors are mandatory for query')

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(
            kapture_data.keypoints)
    assert keypoints_type is not None
    assert keypoints_type in kapture_data.keypoints
    assert keypoints_type in kapture_data.matches

    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    if kapture_query_data.trajectories is not None:
        logger.warning(
            "Input query data contains trajectories: they will be ignored")
        kapture_query_data.trajectories.clear()

    os.umask(0o002)
    os.makedirs(output_path, exist_ok=True)
    delete_existing_kapture_files(output_path, force_erase=force)

    # load pairsfile
    pairs = {}
    with open(pairsfile_path, 'r') as fid:
        table = kapture.io.csv.table_from_file(fid)
        for img_query, img_map, _ in table:
            if img_query not in pairs:
                pairs[img_query] = []
            pairs[img_query].append(img_map)

    kapture_data.matches[keypoints_type].normalize()
    keypoints_filepaths = keypoints_to_filepaths(
        kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path,
        tar_handlers)
    obs_for_keypoints_type = {
        point_id: per_keypoints_type_subdict[keypoints_type]
        for point_id, per_keypoints_type_subdict in
        kapture_data.observations.items()
        if keypoints_type in per_keypoints_type_subdict
    }
    point_id_from_obs = {
        (img_name, kp_id): point_id
        for point_id in obs_for_keypoints_type.keys()
        for img_name, kp_id in obs_for_keypoints_type[point_id]
    }
    query_images = [(timestamp, sensor_id, image_name)
                    for timestamp, sensor_id, image_name in kapture.flatten(
                        kapture_query_data.records_camera)]

    # kapture for localized images + pose
    trajectories = kapture.Trajectories()
    progress_bar = tqdm(total=len(query_images),
                        disable=logging.getLogger().level >= logging.CRITICAL)
    for timestamp, sensor_id, image_name in query_images:
        if image_name not in pairs:
            continue
        keypoints_filepath = keypoints_filepaths[image_name]
        kapture_keypoints_query = image_keypoints_from_file(
            filepath=keypoints_filepath,
            dsize=kapture_data.keypoints[keypoints_type].dsize,
            dtype=kapture_data.keypoints[keypoints_type].dtype)
        query_cam = kapture_query_data.sensors[sensor_id]
        assert isinstance(query_cam, kapture.Camera)
        num_keypoints = kapture_keypoints_query.shape[0]
        kapture_keypoints_query, K, distortion = get_camera_matrix_from_kapture(
            kapture_keypoints_query, query_cam)
        kapture_keypoints_query = kapture_keypoints_query.reshape(
            (num_keypoints, 2))

        cv2_keypoints_query = np.copy(kapture_keypoints_query)
        if np.count_nonzero(distortion) > 0:
            epsilon = np.finfo(np.float64).eps
            stop_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                             cv2.TERM_CRITERIA_EPS, 500, epsilon)
            cv2_keypoints_query = cv2.undistortPointsIter(
                cv2_keypoints_query,
                K,
                distortion,
                R=None,
                P=K,
                criteria=stop_criteria)
        cv2_keypoints_query = cv2_keypoints_query.reshape((num_keypoints, 2))
        # center keypoints
        for i in range(cv2_keypoints_query.shape[0]):
            cv2_keypoints_query[i, 0] = cv2_keypoints_query[i, 0] - K[0, 2]
            cv2_keypoints_query[i, 1] = cv2_keypoints_query[i, 1] - K[1, 2]

        kpts_query = kapture_keypoints_query if (
            refine_poses or write_detailed_report) else None
        points2D, points2D_undistorted, points3D, stats = get_correspondences(
            kapture_data, keypoints_type, kapture_path, tar_handlers,
            image_name, pairs[image_name], point_id_from_obs, kpts_query,
            cv2_keypoints_query, duplicate_strategy, rerank_strategy)
        # compute absolute pose
        # inlier_threshold - RANSAC inlier threshold in pixels
        # answer - dictionary containing the RANSAC output
        ret = pyransaclib.ransaclib_localization(image_name, K[0, 0], K[1, 1],
                                                 points2D_undistorted,
                                                 points3D, inlier_threshold,
                                                 number_lo_steps,
                                                 min_num_iterations,
                                                 max_num_iterations)

        # add pose to output kapture
        if ret['success'] and ret['num_inliers'] > 0:
            pose = kapture.PoseTransform(ret['qvec'], ret['tvec'])

            if refine_poses:
                inlier_mask = np.zeros((len(points2D), ), dtype=bool)
                inlier_mask[ret['inliers']] = True
                inlier_mask = inlier_mask.tolist()
                col_cam_id, width, height, params, _ = get_colmap_camera(
                    query_cam)
                cfg = {
                    'model': CAMERA_MODEL_NAME_ID[col_cam_id][0],
                    'width': int(width),
                    'height': int(height),
                    'params': params
                }
                ret_refine = pycolmap.pose_refinement(pose.t_raw, pose.r_raw,
                                                      points2D, points3D,
                                                      inlier_mask, cfg)
                if ret_refine['success']:
                    pose = kapture.PoseTransform(ret_refine['qvec'],
                                                 ret_refine['tvec'])
                    logger.debug(
                        f'{image_name} refinement success, new pose: {pose}')

            if write_detailed_report:
                reprojection_error = compute_reprojection_error(
                    pose, ret['num_inliers'], ret['inliers'], points2D,
                    points3D, K, distortion)
                cache = {
                    "num_correspondences": len(points3D),
                    "num_inliers": ret['num_inliers'],
                    "inliers": ret['inliers'],
                    "reprojection_error": reprojection_error,
                    "stats": stats
                }
                cache_path = os.path.join(
                    output_path, f'pyransaclib_cache/{image_name}.json')
                save_to_json(cache, cache_path)
            trajectories[timestamp, sensor_id] = pose

        progress_bar.update(1)
    progress_bar.close()

    kapture_data_localized = kapture.Kapture(
        sensors=kapture_query_data.sensors,
        trajectories=trajectories,
        records_camera=kapture_query_data.records_camera,
        rigs=kapture_query_data.rigs)
    kapture.io.csv.kapture_to_dir(output_path, kapture_data_localized)
예제 #5
0
def pycolmap_localize_from_loaded_data(
        kapture_data: kapture.Kapture, kapture_path: str,
        tar_handlers: TarCollection, kapture_query_data: kapture.Kapture,
        output_path: str, pairsfile_path: str, max_error: float,
        min_inlier_ratio: float, min_num_iterations: int,
        max_num_iterations: int, confidence: float,
        keypoints_type: Optional[str],
        duplicate_strategy: DuplicateCorrespondencesStrategy,
        rerank_strategy: RerankCorrespondencesStrategy,
        write_detailed_report: bool, force: bool) -> None:
    """
    Localize images using pycolmap.

    :param kapture_data: loaded kapture data (incl. points3d)
    :param kapture_path: path to the kapture to use
    :param tar_handlers: collection of pre-opened tar archives
    :param kapture_data: loaded kapture data (mapping and query images)
    :param output_path: path to the write the localization results
    :param pairsfile_path: pairs to use
    :param max_error: RANSAC inlier threshold in pixel
    :param min_inlier_ratio: abs_pose_options.ransac_options.min_inlier_ratio
    :param min_num_iterations: abs_pose_options.ransac_options.min_num_trials
    :param max_num_iterations: abs_pose_options.ransac_options.max_num_trials
    :param confidence: abs_pose_options.ransac_options.confidence
    :param keypoints_type: types of keypoints (and observations) to use
    :param duplicate_strategy: strategy to handle duplicate correspondences (either kpt_id and/or pt3d_id)
    :param rerank_strategy: strategy to reorder pairs before handling duplicate correspondences
    :param write_detailed_report: if True, write a json file with inliers, reprojection error for each query
    :param force: Silently overwrite kapture files if already exists
    """
    assert has_pycolmap
    if not (kapture_data.records_camera and kapture_data.sensors
            and kapture_data.keypoints and kapture_data.matches
            and kapture_data.points3d and kapture_data.observations):
        raise ValueError('records_camera, sensors, keypoints, matches, '
                         'points3d, observations are mandatory for map+query')

    if not (kapture_query_data.records_camera and kapture_query_data.sensors):
        raise ValueError('records_camera, sensors are mandatory for query')

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(
            kapture_data.keypoints)
    assert keypoints_type is not None
    assert keypoints_type in kapture_data.keypoints
    assert keypoints_type in kapture_data.matches

    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    if kapture_query_data.trajectories is not None:
        logger.warning(
            "Input query data contains trajectories: they will be ignored")
        kapture_query_data.trajectories.clear()

    os.umask(0o002)
    os.makedirs(output_path, exist_ok=True)
    delete_existing_kapture_files(output_path, force_erase=force)

    # load pairsfile
    pairs = {}
    with open(pairsfile_path, 'r') as fid:
        table = kapture.io.csv.table_from_file(fid)
        for img_query, img_map, _ in table:
            if img_query not in pairs:
                pairs[img_query] = []
            pairs[img_query].append(img_map)

    kapture_data.matches[keypoints_type].normalize()
    keypoints_filepaths = keypoints_to_filepaths(
        kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path,
        tar_handlers)
    obs_for_keypoints_type = {
        point_id: per_keypoints_type_subdict[keypoints_type]
        for point_id, per_keypoints_type_subdict in
        kapture_data.observations.items()
        if keypoints_type in per_keypoints_type_subdict
    }
    point_id_from_obs = {
        (img_name, kp_id): point_id
        for point_id in obs_for_keypoints_type.keys()
        for img_name, kp_id in obs_for_keypoints_type[point_id]
    }
    query_images = [(timestamp, sensor_id, image_name)
                    for timestamp, sensor_id, image_name in kapture.flatten(
                        kapture_query_data.records_camera)]

    # kapture for localized images + pose
    trajectories = kapture.Trajectories()
    for timestamp, sensor_id, image_name in tqdm(
            query_images,
            disable=logging.getLogger().level >= logging.CRITICAL):
        if image_name not in pairs:
            continue
        # N number of correspondences
        # points2D - Nx2 array with pixel coordinates
        # points3D - Nx3 array with world coordinates
        points2D = []
        points3D = []
        keypoints_filepath = keypoints_filepaths[image_name]
        kapture_keypoints_query = image_keypoints_from_file(
            filepath=keypoints_filepath,
            dsize=kapture_data.keypoints[keypoints_type].dsize,
            dtype=kapture_data.keypoints[keypoints_type].dtype)
        query_cam = kapture_query_data.sensors[sensor_id]
        assert isinstance(query_cam, kapture.Camera)

        col_cam_id, width, height, params, _ = get_colmap_camera(query_cam)
        cfg = {
            'model': CAMERA_MODEL_NAME_ID[col_cam_id][0],
            'width': int(width),
            'height': int(height),
            'params': params
        }

        points2D, _, points3D, stats = get_correspondences(
            kapture_data, keypoints_type, kapture_path, tar_handlers,
            image_name, pairs[image_name], point_id_from_obs,
            kapture_keypoints_query, None, duplicate_strategy, rerank_strategy)

        # compute absolute pose
        # inlier_threshold - RANSAC inlier threshold in pixels
        # answer - dictionary containing the RANSAC output
        ret = pycolmap.absolute_pose_estimation(points2D, points3D, cfg,
                                                max_error, min_inlier_ratio,
                                                min_num_iterations,
                                                max_num_iterations, confidence)
        # add pose to output kapture
        if ret['success'] and ret['num_inliers'] > 0:
            pose = kapture.PoseTransform(ret['qvec'], ret['tvec'])
            if write_detailed_report:
                num_2dpoints = len(points2D)
                points2D_final, K, distortion = get_camera_matrix_from_kapture(
                    np.array(points2D, dtype=np.float), query_cam)
                points2D_final = list(points2D_final.reshape(
                    (num_2dpoints, 2)))
                inliers = np.where(ret['inliers'])[0].tolist()
                reprojection_error = compute_reprojection_error(
                    pose, ret['num_inliers'], inliers, points2D_final,
                    points3D, K, distortion)
                cache = {
                    "num_correspondences": len(points3D),
                    "num_inliers": inliers,
                    "inliers": ret['inliers'],
                    "reprojection_error": reprojection_error,
                    "stats": stats
                }
                cache_path = os.path.join(output_path,
                                          f'pycolmap_cache/{image_name}.json')
                save_to_json(cache, cache_path)
            trajectories[timestamp, sensor_id] = pose

    kapture_data_localized = kapture.Kapture(
        sensors=kapture_query_data.sensors,
        trajectories=trajectories,
        records_camera=kapture_query_data.records_camera,
        rigs=kapture_query_data.rigs)
    kapture.io.csv.kapture_to_dir(output_path, kapture_data_localized)
def create_3D_model_from_depth_from_loaded_data(
        kdata: kapture.Kapture, input_path: str, tar_handlers: TarCollection,
        output_path: str, keypoints_type: Optional[str], depth_sensor_id: str,
        topk: int, method: Method, cellsizes: List[str], force: bool):
    """
    Create 3D model from a kapture dataset that has registered depth data
    Assumes the kapture data is already loaded
    """
    logger.info(f'create 3D model using depth data')

    if os.path.exists(output_path) and not force:
        print(f'outpath already exists, use --force to overwrite')
        return -1

    if kdata.rigs is not None:
        assert kdata.trajectories is not None
        kapture.rigs_remove_inplace(kdata.trajectories, kdata.rigs)

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(kdata.keypoints)
    assert keypoints_type is not None
    assert kdata.keypoints is not None
    assert keypoints_type in kdata.keypoints

    if method == Method.voxelgrid:
        vg = VoxelGrid(cellsizes)

    # add all 3D points to map that correspond to a keypoint
    logger.info('adding points from scan to kapture')
    points3d = []
    observations = kapture.Observations()

    progress_bar = tqdm(total=len(
        list(kapture.flatten(kdata.records_camera, is_sorted=True))),
                        disable=logger.level >= logging.CRITICAL)
    for timestamp, sensor_id, sensing_filepath in kapture.flatten(
            kdata.records_camera, is_sorted=True):
        logger.info(
            f'total 3d points: {len(points3d)}, processing {sensing_filepath}')
        # check if images have a pose
        if timestamp not in kdata.trajectories:
            logger.info('{} does not have a pose. skipping ...'.format(
                sensing_filepath))
            continue

        # check if depth map exists
        depth_map_record = ''
        if timestamp in kdata.records_depth:
            if depth_sensor_id is None:
                depth_id = sensor_id + '_depth'
            else:
                depth_id = depth_sensor_id
            if depth_id in kdata.records_depth[timestamp]:
                depth_map_record = kdata.records_depth[timestamp][depth_id]
        depth_map_size = tuple(
            [int(x) for x in kdata.sensors[depth_id].camera_params[0:2]])
        depth_path = get_depth_map_fullpath(input_path, depth_map_record)
        if not os.path.exists(depth_path):
            logger.info('no 3D data found for {}. skipping ...'.format(
                sensing_filepath))
            continue
        depth_map = depth_map_from_file(depth_path, depth_map_size)
        img = Image.open(get_image_fullpath(input_path,
                                            sensing_filepath)).convert('RGB')

        assert img.size[0] == depth_map_size[0]
        assert img.size[1] == depth_map_size[1]

        kps_raw = load_keypoints(keypoints_type, input_path, sensing_filepath,
                                 kdata.keypoints[keypoints_type].dtype,
                                 kdata.keypoints[keypoints_type].dsize,
                                 tar_handlers)

        _, camera_sensor_C, camera_dist = get_camera_matrix_from_kapture(
            np.zeros((1, 0, 2), dtype=np.float64), kdata.sensors[sensor_id])
        cv2_keypoints, depth_sensor_C, depth_dist = get_camera_matrix_from_kapture(
            kps_raw, kdata.sensors[depth_id])
        assert np.isclose(depth_sensor_C, camera_sensor_C).all()
        assert np.isclose(depth_dist, camera_dist).all()

        if np.count_nonzero(camera_dist) > 0:
            epsilon = np.finfo(np.float64).eps
            stop_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                             cv2.TERM_CRITERIA_EPS, 500, epsilon)
            undistorted_cv2_keypoints = cv2.undistortPointsIter(
                cv2_keypoints,
                camera_sensor_C,
                camera_dist,
                R=None,
                P=camera_sensor_C,
                criteria=stop_criteria)
        else:
            undistorted_cv2_keypoints = cv2_keypoints

        cv2_keypoints = cv2_keypoints.reshape((kps_raw.shape[0], 2))
        undistorted_cv2_keypoints = undistorted_cv2_keypoints.reshape(
            (kps_raw.shape[0], 2))

        points3d_img = []
        rgb_img = []
        kp_idxs = []
        for idx_kp, kp in enumerate(cv2_keypoints[0:topk]):
            u = round(kp[0])
            v = round(kp[1])

            undist_kp = undistorted_cv2_keypoints[idx_kp]
            undist_u = round(undist_kp[0])
            undist_v = round(undist_kp[1])

            if u >= 0 and u < depth_map_size[
                    0] and v >= 0 and v < depth_map_size[1]:
                if depth_map[v, u] == 0:
                    continue
                pt3d = project_kp_to_3D(undist_u, undist_v, depth_map[v, u],
                                        depth_sensor_C[0,
                                                       2], depth_sensor_C[1,
                                                                          2],
                                        depth_sensor_C[0,
                                                       0], depth_sensor_C[1,
                                                                          1])
                points3d_img.append(pt3d)
                rgb_img.append(img.getpixel((u, v)))
                kp_idxs.append(idx_kp)
        # transform to world coordinates (pt3d from a depth map is in camera coordinates)
        # we use sensor_id here because we assume that the image and the corresponding depthmap have the same pose
        # and sometimes, the pose might only be provided for the images
        cam_to_world = kdata.trajectories[timestamp][sensor_id].inverse()
        if len(points3d_img) == 0:
            continue
        points3d_img = cam_to_world.transform_points(np.array(points3d_img))
        for idx_kp, pt3d, rgb in zip(kp_idxs, points3d_img, rgb_img):
            if not np.isnan(pt3d).any():
                # apply transform (alignment)
                if method == Method.voxelgrid:
                    assert vg is not None
                    if not vg.exists(pt3d):
                        # add 3D point
                        points3d.append(list(pt3d) + list(rgb))
                        # add observation
                        observations.add(
                            len(points3d) - 1, keypoints_type,
                            sensing_filepath, idx_kp)
                        vg.add(pt3d, len(points3d) - 1, sensing_filepath)
                    else:
                        ret = vg.append(pt3d, sensing_filepath)
                        if ret is not None:
                            observations.add(ret[0], keypoints_type,
                                             sensing_filepath, idx_kp)
                elif method == Method.all:
                    # add 3D point
                    points3d.append(list(pt3d) + list(rgb))
                    # add observation
                    observations.add(
                        len(points3d) - 1, keypoints_type, sensing_filepath,
                        idx_kp)
        # save_3Dpts_to_ply(points3d, os.path.join(output_path, 'map.ply'))
        progress_bar.update(1)
    progress_bar.close()

    kdata.points3d = kapture.Points3d(np.array(points3d))
    kdata.observations = observations

    logger.info('saving ...')
    kapture_to_dir(output_path, kdata)
    # save_3Dpts_to_ply(points3d, os.path.join(output_path, 'map.ply'))

    logger.info('all done')
예제 #7
0
def pose_approximation(mapping_path: str,
                       query_path: str,
                       output_path: str,
                       global_features_type: Optional[str],
                       topk: int,
                       force_overwrite_existing: bool,
                       method: PoseApproximationMethods,
                       additional_parameters: dict):
    """
    compute approximated pose from image retrieval results

    :param mapping_path: input path to kapture input root directory
    :type mapping_path: str
    :param query_path: input path to a kapture root directory
    :type query_path: str
    :param output_path: output path to pairsfile
    :type output_path: str
    :param global_features_type: type of global_features, name of the global_features subfolder
    :param topk: the max number of top retained images
    :type topk: int
    :param additional_parameters: store method specific args
    :type additional_parameters: dict
    """
    assert mapping_path != query_path

    os.makedirs(output_path, exist_ok=True)
    delete_existing_kapture_files(output_path, force_erase=force_overwrite_existing)

    logger.info(f'pose_approximation. loading mapping: {mapping_path}')
    with get_all_tar_handlers(mapping_path) as mapping_tar_handlers:
        kdata_map = kapture_from_dir(mapping_path, None, skip_list=[kapture.Keypoints,
                                                                    kapture.Descriptors,
                                                                    kapture.Matches,
                                                                    kapture.Observations,
                                                                    kapture.Points3d],
                                     tar_handlers=mapping_tar_handlers)
        assert kdata_map.sensors is not None
        assert kdata_map.records_camera is not None
        assert kdata_map.global_features is not None
        if global_features_type is None:
            global_features_type = try_get_only_key_from_collection(kdata_map.global_features)
        assert global_features_type is not None
        assert global_features_type in kdata_map.global_features

        global_features_config = GlobalFeaturesConfig(kdata_map.global_features[global_features_type].type_name,
                                                      kdata_map.global_features[global_features_type].dtype,
                                                      kdata_map.global_features[global_features_type].dsize,
                                                      kdata_map.global_features[global_features_type].metric_type)

        logger.info(f'computing pairs with {global_features_type}...')

        map_global_features_to_filepaths = global_features_to_filepaths(
            kdata_map.global_features[global_features_type],
            global_features_type,
            mapping_path,
            mapping_tar_handlers
        )
        mapping_list = list(sorted(map_global_features_to_filepaths.items()))
        map_stacked_features = stack_global_features(global_features_config, mapping_list)

    logger.info(f'pose_approximation. loading query: {query_path}')
    with get_all_tar_handlers(query_path) as query_tar_handlers:
        kdata_query = kapture_from_dir(query_path, None, skip_list=[kapture.Keypoints,
                                                                    kapture.Descriptors,
                                                                    kapture.Matches,
                                                                    kapture.Observations,
                                                                    kapture.Points3d],
                                       tar_handlers=query_tar_handlers)
        assert kdata_query.sensors is not None
        assert kdata_query.records_camera is not None
        assert kdata_query.global_features is not None
        assert global_features_type in kdata_query.global_features

        kdata_mapping_gfeat = kdata_map.global_features[global_features_type]
        kdata_query_gfeat = kdata_query.global_features[global_features_type]
        assert kdata_mapping_gfeat.type_name == kdata_query_gfeat.type_name
        assert kdata_mapping_gfeat.dtype == kdata_query_gfeat.dtype
        assert kdata_mapping_gfeat.dsize == kdata_query_gfeat.dsize

        query_global_features_to_filepaths = global_features_to_filepaths(
            kdata_query_gfeat,
            global_features_type,
            query_path,
            query_tar_handlers
        )
        query_list = list(sorted(query_global_features_to_filepaths.items()))
        query_stacked_features = stack_global_features(global_features_config, query_list)

    logger.info('computing pose approximation from with'
                f' {kdata_map.global_features[global_features_type].type_name}...')

    # main code
    weights = get_interpolation_weights(method,
                                        query_stacked_features,
                                        map_stacked_features,
                                        topk,
                                        additional_parameters)
    out_trajectories = get_interpolated_pose(kdata_map, kdata_query, weights)
    out_kapture = kapture.Kapture(sensors=kdata_query.sensors,
                                  records_camera=kdata_query.records_camera,
                                  trajectories=out_trajectories)
    kapture_to_dir(output_path, out_kapture)
    logger.info('all done')
예제 #8
0
def compute_image_pairs(mapping_path: str, query_path: str, output_path: str,
                        global_features_type: Optional[str], topk: int):
    """
    compute image pairs between query -> mapping from global features, and write the result in a text file

    :param mapping_path: input path to kapture input root directory
    :type mapping_path: str
    :param query_path: input path to a kapture root directory
    :type query_path: str
    :param output_path: output path to pairsfile
    :type output_path: str
    :param global_features_type: type of global_features, name of the global_features subfolder
    :param topk: the max number of top retained images
    :type topk: int
    """
    logger.info(f'compute_image_pairs. loading mapping: {mapping_path}')
    with get_all_tar_handlers(mapping_path) as mapping_tar_handlers:
        kdata_mapping = kapture_from_dir(mapping_path,
                                         None,
                                         skip_list=[
                                             kapture.Keypoints,
                                             kapture.Descriptors,
                                             kapture.Matches,
                                             kapture.Observations,
                                             kapture.Points3d
                                         ],
                                         tar_handlers=mapping_tar_handlers)
        assert kdata_mapping.sensors is not None
        assert kdata_mapping.records_camera is not None
        assert kdata_mapping.global_features is not None
        if global_features_type is None:
            global_features_type = try_get_only_key_from_collection(
                kdata_mapping.global_features)
        assert global_features_type is not None
        assert global_features_type in kdata_mapping.global_features

        global_features_config = GlobalFeaturesConfig(
            kdata_mapping.global_features[global_features_type].type_name,
            kdata_mapping.global_features[global_features_type].dtype,
            kdata_mapping.global_features[global_features_type].dsize,
            kdata_mapping.global_features[global_features_type].metric_type)

        logger.info(f'computing pairs with {global_features_type}...')

        mapping_global_features_to_filepaths = global_features_to_filepaths(
            kdata_mapping.global_features[global_features_type],
            global_features_type, mapping_path, mapping_tar_handlers)
        mapping_list = list(
            sorted(mapping_global_features_to_filepaths.items()))
        mapping_stacked_features = stack_global_features(
            global_features_config, mapping_list)

    if mapping_path == query_path:
        kdata_query = kdata_mapping
        query_stacked_features = mapping_stacked_features
    else:
        logger.info(f'compute_image_pairs. loading query: {query_path}')
        with get_all_tar_handlers(query_path) as query_tar_handlers:
            kdata_query = kapture_from_dir(query_path,
                                           None,
                                           skip_list=[
                                               kapture.Keypoints,
                                               kapture.Descriptors,
                                               kapture.Matches,
                                               kapture.Observations,
                                               kapture.Points3d
                                           ],
                                           tar_handlers=query_tar_handlers)
            assert kdata_query.sensors is not None
            assert kdata_query.records_camera is not None
            assert kdata_query.global_features is not None
            assert global_features_type in kdata_query.global_features

            kdata_mapping_gfeat = kdata_mapping.global_features[
                global_features_type]
            kdata_query_gfeat = kdata_query.global_features[
                global_features_type]
            assert kdata_mapping_gfeat.type_name == kdata_query_gfeat.type_name
            assert kdata_mapping_gfeat.dtype == kdata_query_gfeat.dtype
            assert kdata_mapping_gfeat.dsize == kdata_query_gfeat.dsize

            query_global_features_to_filepaths = global_features_to_filepaths(
                kdata_query_gfeat, global_features_type, query_path,
                query_tar_handlers)
            query_list = list(
                sorted(query_global_features_to_filepaths.items()))
            query_stacked_features = stack_global_features(
                global_features_config, query_list)

    similarity = get_similarity(query_stacked_features,
                                mapping_stacked_features)

    # get list of image pairs
    image_pairs = get_image_pairs(similarity, topk)

    logger.info('saving to file  ...')
    p = pathlib.Path(output_path)
    os.makedirs(str(p.parent.resolve()), exist_ok=True)
    with open(output_path, 'w') as fid:
        table_to_file(fid,
                      image_pairs,
                      header='# query_image, map_image, score')
    logger.info('all done')
예제 #9
0
def compute_matches_from_loaded_data(input_path: str,
                                     tar_handlers: Optional[TarCollection],
                                     kdata: kapture.Kapture,
                                     descriptors_type: Optional[str],
                                     image_pairs: list,
                                     overwrite_existing: bool = False):
    assert kdata.sensors is not None
    assert kdata.records_camera is not None
    assert kdata.descriptors is not None
    os.umask(0o002)

    if descriptors_type is None:
        descriptors_type = try_get_only_key_from_collection(kdata.descriptors)
    assert descriptors_type is not None
    assert descriptors_type in kdata.descriptors
    keypoints_type = kdata.descriptors[descriptors_type].keypoints_type
    # assert kdata.descriptors[descriptors_type].metric_type == "L2"

    matcher = MatchPairNnTorch(use_cuda=torch.cuda.is_available())
    new_matches = kapture.Matches()

    logger.info('compute_matches. entering main loop...')
    hide_progress_bar = logger.getEffectiveLevel() > logging.INFO
    skip_count = 0
    for image_path1, image_path2 in tqdm(image_pairs,
                                         disable=hide_progress_bar):
        if image_path1 == image_path2:
            continue
        if image_path1 > image_path2:
            image_path1, image_path2 = image_path2, image_path1

        # skip existing matches
        if (not overwrite_existing) \
                and (kdata.matches is not None) \
                and keypoints_type in kdata.matches \
                and ((image_path1, image_path2) in kdata.matches[keypoints_type]):
            new_matches.add(image_path1, image_path2)
            skip_count += 1
            continue

        if image_path1 not in kdata.descriptors[descriptors_type] \
                or image_path2 not in kdata.descriptors[descriptors_type]:
            logger.warning('unable to find descriptors for image pair : '
                           '\n\t{} \n\t{}'.format(image_path1, image_path2))
            continue

        descriptor1 = load_descriptors(
            descriptors_type, input_path, tar_handlers, image_path1,
            kdata.descriptors[descriptors_type].dtype,
            kdata.descriptors[descriptors_type].dsize)
        descriptor2 = load_descriptors(
            descriptors_type, input_path, tar_handlers, image_path2,
            kdata.descriptors[descriptors_type].dtype,
            kdata.descriptors[descriptors_type].dsize)
        matches = matcher.match_descriptors(descriptor1, descriptor2)
        matches_path = get_matches_fullpath((image_path1, image_path2),
                                            keypoints_type, input_path,
                                            tar_handlers)
        image_matches_to_file(matches_path, matches)
        new_matches.add(image_path1, image_path2)

    if not overwrite_existing:
        logger.debug(
            f'{skip_count} pairs were skipped because the match file already existed'
        )
    if not matches_check_dir(new_matches, keypoints_type, input_path,
                             tar_handlers):
        logger.critical(
            'matching ended successfully but not all files were saved')

    # update kapture matches
    if kdata.matches is None:
        kdata.matches = {}
    if keypoints_type not in kdata.matches:
        kdata.matches[keypoints_type] = kapture.Matches()
    kdata.matches[keypoints_type].update(new_matches)

    logger.info('all done')
def pycolmap_rig_localize_from_loaded_data(
        kapture_data: kapture.Kapture, kapture_path: str,
        tar_handlers: TarCollection, kapture_query_data: kapture.Kapture,
        output_path: str, pairsfile_path: str, rig_ids: List[str],
        apply_rigs_remove: bool, max_error: float, min_inlier_ratio: float,
        min_num_iterations: int, max_num_iterations: int, confidence: float,
        keypoints_type: Optional[str],
        duplicate_strategy: DuplicateCorrespondencesStrategy,
        rerank_strategy: RerankCorrespondencesStrategy,
        write_detailed_report: bool, force: bool) -> None:
    """
    Localize images from a multi camera rig using pycolmap

    :param kapture_data: loaded kapture data (incl. points3d)
    :param kapture_path: path to the kapture to use
    :param tar_handlers: collection of pre-opened tar archives
    :param kapture_data: loaded kapture data (mapping and query images)
    :param output_path: path to the write the localization results
    :param pairsfile_path: pairs to use
    :param rig_ids: list of rig ids that should be localized
    :param apply_rigs_remove: apply rigs remove before saving poses to disk
    :param max_error: RANSAC inlier threshold in pixel, shared between all cameras
    :param min_inlier_ratio: abs_pose_options.ransac_options.min_inlier_ratio
    :param min_num_iterations: abs_pose_options.ransac_options.min_num_trials
    :param max_num_iterations: abs_pose_options.ransac_options.max_num_trials
    :param confidence: abs_pose_options.ransac_options.confidence
    :param keypoints_type: types of keypoints (and observations) to use
    :param force: Silently overwrite kapture files if already exists.
    """
    assert has_pycolmap
    if not (kapture_data.records_camera and kapture_data.sensors
            and kapture_data.keypoints and kapture_data.matches
            and kapture_data.points3d and kapture_data.observations):
        raise ValueError('records_camera, sensors, keypoints, matches, '
                         'points3d, observations are mandatory for map+query')

    if not (kapture_query_data.records_camera and kapture_query_data.sensors):
        raise ValueError('records_camera, sensors are mandatory for query')

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(
            kapture_data.keypoints)
    assert keypoints_type is not None
    assert keypoints_type in kapture_data.keypoints
    assert keypoints_type in kapture_data.matches

    assert kapture_query_data.rigs is not None
    assert len(kapture_query_data.rigs) >= 1
    if len(rig_ids) == 0:
        rig_ids = get_top_level_rig_ids(kapture_query_data.rigs)

    final_camera_list = get_all_cameras_from_rig_ids(
        rig_ids, kapture_query_data.sensors, kapture_query_data.rigs)
    assert len(final_camera_list) > 0

    if kapture_query_data.trajectories:
        logger.warning(
            "Input query data contains trajectories: they will be ignored")
        kapture_query_data.trajectories.clear()

    os.umask(0o002)
    os.makedirs(output_path, exist_ok=True)
    delete_existing_kapture_files(output_path, force_erase=force)

    # load pairsfile
    pairs = {}
    with open(pairsfile_path, 'r') as fid:
        table = kapture.io.csv.table_from_file(fid)
        for img_query, img_map, _ in table:
            if img_query not in pairs:
                pairs[img_query] = []
            pairs[img_query].append(img_map)

    kapture_data.matches[keypoints_type].normalize()
    keypoints_filepaths = keypoints_to_filepaths(
        kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path,
        tar_handlers)
    obs_for_keypoints_type = {
        point_id: per_keypoints_type_subdict[keypoints_type]
        for point_id, per_keypoints_type_subdict in
        kapture_data.observations.items()
        if keypoints_type in per_keypoints_type_subdict
    }
    point_id_from_obs = {
        (img_name, kp_id): point_id
        for point_id in obs_for_keypoints_type.keys()
        for img_name, kp_id in obs_for_keypoints_type[point_id]
    }
    timestamps = list(kapture_query_data.records_camera.keys())

    # kapture for localized images + pose
    trajectories = kapture.Trajectories()
    progress_bar = tqdm(total=len(timestamps),
                        disable=logging.getLogger().level >= logging.CRITICAL)
    for timestamp in timestamps:
        for rig_id in final_camera_list.keys():
            # with S number of sensors
            # N number of correspondences
            # points2D - SxNx2 array with pixel coordinates
            # points3D - SxNx3 array with world coordinates
            # tvec - Sx3 array with rig relative translations
            # qvec - Sx4 array with rig relative quaternions
            # cameras_dict - array of dict of length S
            points2D = []
            points3D = []
            tvec = []
            qvec = []
            cameras_dict = []
            cameras = []  # Sx2 array for reproj error
            stats = []
            for sensor_id, relative_pose in final_camera_list[rig_id].items():
                if (timestamp,
                        sensor_id) not in kapture_query_data.records_camera:
                    continue
                img_query = kapture_query_data.records_camera[(timestamp,
                                                               sensor_id)]
                if img_query not in pairs:
                    continue
                keypoints_filepath = keypoints_filepaths[img_query]
                kapture_keypoints_query = image_keypoints_from_file(
                    filepath=keypoints_filepath,
                    dsize=kapture_data.keypoints[keypoints_type].dsize,
                    dtype=kapture_data.keypoints[keypoints_type].dtype)

                tvec.append(relative_pose.t_raw)
                qvec.append(relative_pose.r_raw)

                col_cam_id, width, height, params, _ = get_colmap_camera(
                    kapture_query_data.sensors[sensor_id])
                cameras_dict.append({
                    'model': CAMERA_MODEL_NAMES[col_cam_id],
                    'width': int(width),
                    'height': int(height),
                    'params': params
                })
                points2D_it, _, points3D_it, stats_it = get_correspondences(
                    kapture_data, keypoints_type, kapture_path, tar_handlers,
                    img_query, pairs[img_query], point_id_from_obs,
                    kapture_keypoints_query, None, duplicate_strategy,
                    rerank_strategy)

                if write_detailed_report:
                    cameras.append(kapture_query_data.sensors[sensor_id])
                    stats.append(stats_it)
                points2D.append(points2D_it)
                points3D.append(points3D_it)

            if len(cameras_dict) == 0:
                progress_bar and progress_bar.update(1)
                continue

            # compute absolute pose
            # inlier_threshold - RANSAC inlier threshold in pixels
            # answer - dictionary containing the RANSAC output
            ret = pycolmap.rig_absolute_pose_estimation(
                points2D, points3D, cameras_dict, qvec, tvec, max_error,
                min_inlier_ratio, min_num_iterations, max_num_iterations,
                confidence)

            # add pose to output kapture
            if ret['success'] and ret['num_inliers'] > 0:
                pose = kapture.PoseTransform(ret['qvec'], ret['tvec'])
                trajectories[timestamp, rig_id] = pose

                if write_detailed_report:
                    points2D_final = []
                    camera_params = []
                    for points2D_it, query_cam in zip(points2D, cameras):
                        num_2dpoints = len(points2D_it)
                        points2D_final_it, K, distortion = get_camera_matrix_from_kapture(
                            np.array(points2D_it, dtype=np.float), query_cam)
                        points2D_final_it = list(
                            points2D_final_it.reshape((num_2dpoints, 2)))
                        points2D_final.append(points2D_final_it)
                        camera_params.append((K, distortion))
                    num_correspondences = [
                        len(points2D_it) for points2D_it in points2D
                    ]
                    # convert ret['inliers']
                    indexes_flat = [
                        i for i, points2D_it in enumerate(points2D)
                        for _ in points2D_it
                    ]

                    inliers = [[] for _ in range(len(points2D))]
                    for i, (is_inlier, cam_index) in enumerate(
                            zip(ret['inliers'], indexes_flat)):
                        if is_inlier:
                            inliers[cam_index].append(i)
                    cumulative_len_correspondences = []
                    s = 0
                    for num_correspondences_it in num_correspondences:
                        cumulative_len_correspondences.append(s)
                        s += num_correspondences_it
                    inliers = [[
                        v - cumulative_len_correspondences[i]
                        for v in inliers[i]
                    ] for i in range(len(inliers))]
                    num_inliers = [len(inliers_it) for inliers_it in inliers]

                    per_image_reprojection_error = []
                    for tvec_it, qvec_it, points2D_it, points3D_it, inliers_it, camera_params_it in zip(
                            tvec, qvec, points2D_final, points3D, inliers,
                            camera_params):
                        if len(inliers_it) == 0:
                            per_image_reprojection_error.append(np.nan)
                        else:
                            pose_relative_it = kapture.PoseTransform(
                                r=qvec_it, t=tvec_it)  # rig to sensor
                            # pose = world to rig
                            pose_it = kapture.PoseTransform.compose(
                                [pose_relative_it, pose])  # world to sensor
                            reprojection_error = compute_reprojection_error(
                                pose_it, len(inliers_it), inliers_it,
                                points2D_it, points3D_it, camera_params_it[0],
                                camera_params_it[1])
                            per_image_reprojection_error.append(
                                reprojection_error)

                    cache = {
                        "num_correspondences": num_correspondences,
                        "num_inliers": num_inliers,
                        "inliers": inliers,
                        "reprojection_error": per_image_reprojection_error,
                        "stats": stats
                    }
                    cache_path = os.path.join(
                        output_path, f'pycolmap_rig_cache/{timestamp}.json')
                    save_to_json(cache, cache_path)
        progress_bar and progress_bar.update(1)
    progress_bar and progress_bar.close()

    # save output kapture
    if apply_rigs_remove:
        rigs_remove_inplace(trajectories, kapture_query_data.rigs)
    kapture_query_data.trajectories = trajectories
    kapture.io.csv.kapture_to_dir(output_path, kapture_query_data)
def colmap_build_map_from_loaded_data(kapture_data: kapture.Kapture,
                                      kapture_path: str,
                                      tar_handlers: Optional[TarCollection],
                                      colmap_path: str, colmap_binary: str,
                                      keypoints_type: Optional[str],
                                      use_colmap_matches_importer: bool,
                                      point_triangulator_options: List[str],
                                      skip_list: List[str],
                                      force: bool) -> None:
    """
    Build a colmap model using custom features with the kapture data.

    :param kapture_data: kapture data to use
    :param kapture_path: path to the kapture to use
    :param tar_handler: collection of preloaded tar archives
    :param colmap_path: path to the colmap build
    :param colmap_binary: path to the colmap executable
    :param keypoints_type: type of keypoints, name of the keypoints subfolder
    :param use_colmap_matches_importer: bool,
    :param point_triangulator_options: options for the point triangulator
    :param skip_list: list of steps to skip
    :param force: Silently overwrite kapture files if already exists.
    """
    os.makedirs(colmap_path, exist_ok=True)

    if not (kapture_data.records_camera and kapture_data.sensors
            and kapture_data.keypoints and kapture_data.matches):
        raise ValueError(
            'records_camera, sensors, keypoints, matches are mandatory')
    if not kapture_data.trajectories:
        logger.info(
            'there are no trajectories, running mapper instead of point_triangulator'
        )

    # COLMAP does not fully support rigs.
    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    # Set fixed name for COLMAP database
    colmap_db_path = path.join(colmap_path, 'colmap.db')
    reconstruction_path = path.join(colmap_path, "reconstruction")
    priors_txt_path = path.join(colmap_path, "priors_for_reconstruction")
    if 'delete_existing' not in skip_list:
        safe_remove_file(colmap_db_path, force)
        safe_remove_any_path(reconstruction_path, force)
        safe_remove_any_path(priors_txt_path, force)
    os.makedirs(reconstruction_path, exist_ok=True)

    if 'colmap_db' not in skip_list:
        logger.info('Using precomputed keypoints and matches')
        logger.info('Step 1: Export kapture format to colmap')

        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        if kapture_data.descriptors is not None:
            kapture_data.descriptors.clear()

        if keypoints_type is None:
            keypoints_type = try_get_only_key_from_collection(
                kapture_data.keypoints)
        assert keypoints_type is not None
        assert keypoints_type in kapture_data.keypoints
        assert keypoints_type in kapture_data.matches

        database_extra.kapture_to_colmap(
            kapture_data,
            kapture_path,
            tar_handlers,
            colmap_db,
            keypoints_type,
            None,
            export_two_view_geometry=not use_colmap_matches_importer)
        # close db before running colmap processes in order to avoid locks
        colmap_db.close()

        if use_colmap_matches_importer:
            logger.info('Step 2: Run geometric verification')
            logger.debug('running colmap matches_importer...')
            colmap_lib.run_matches_importer_from_kapture_matches(
                colmap_binary,
                colmap_use_cpu=True,
                colmap_gpu_index=None,
                colmap_db_path=colmap_db_path,
                kapture_matches=kapture_data.matches[keypoints_type],
                force=force)
        else:
            logger.info('Step 2: Run geometric verification - skipped')

    if kapture_data.trajectories is not None:
        # Generate priors for reconstruction
        os.makedirs(priors_txt_path, exist_ok=True)
        if 'priors_for_reconstruction' not in skip_list:
            logger.info('Step 3: Exporting priors for reconstruction.')
            colmap_db = COLMAPDatabase.connect(colmap_db_path)
            database_extra.generate_priors_for_reconstruction(
                kapture_data, colmap_db, priors_txt_path)
            colmap_db.close()

        # Point triangulator
        reconstruction_path = path.join(colmap_path, "reconstruction")
        os.makedirs(reconstruction_path, exist_ok=True)
        if 'triangulation' not in skip_list:
            logger.info("Step 4: Triangulation")
            colmap_lib.run_point_triangulator(colmap_binary, colmap_db_path,
                                              get_image_fullpath(kapture_path),
                                              priors_txt_path,
                                              reconstruction_path,
                                              point_triangulator_options)
    else:
        # mapper
        reconstruction_path = path.join(colmap_path, "reconstruction")
        os.makedirs(reconstruction_path, exist_ok=True)
        if 'triangulation' not in skip_list:
            logger.info("Step 4: Triangulation")
            colmap_lib.run_mapper(colmap_binary, colmap_db_path,
                                  get_image_fullpath(kapture_path), None,
                                  reconstruction_path,
                                  point_triangulator_options)
            # use reconstruction 0 as main
            first_reconstruction = os.path.join(reconstruction_path, '0')
            files = os.listdir(first_reconstruction)
            for f in files:
                shutil.move(os.path.join(first_reconstruction, f),
                            os.path.join(reconstruction_path, f))
            shutil.rmtree(first_reconstruction)

    # run model_converter
    if 'model_converter' not in skip_list:
        logger.info("Step 5: Export reconstruction results to txt")
        colmap_lib.run_model_converter(colmap_binary, reconstruction_path,
                                       reconstruction_path)
def local_sfm_from_loaded_data(kdata_map: kapture.Kapture,
                               kdata_map_gv: kapture.Kapture,
                               kdata_query: kapture.Kapture,
                               map_plus_query_path: str,
                               map_plus_query_gv_path: str,
                               tar_handlers_map: Optional[TarCollection],
                               tar_handlers_map_gv: Optional[TarCollection],
                               descriptors_type: Optional[str],
                               pairsfile_path: str,
                               output_path_root: str,
                               colmap_binary: str,
                               force: bool):
    """
    Localize query images in a COLMAP model built from topk retrieved images.

    :param map_plus_query_path: path to the kapture data consisting of mapping and query data (sensors and reconstruction)
    :param map_plus_query_gv_path: path to the kapture data consisting of mapping and query data after geometric verification (sensors and reconstruction)
    :param query_path: path to the query kapture data (sensors)
    :param descriptors_type: type of descriptors, name of the descriptors subfolder
    :param pairsfile_path: path to the pairsfile that contains the topk retrieved mapping images for each query image
    :param output_path_root: root path where outputs should be stored
    :param colmap_binary: path to the COLMAP binary
    :param force: silently overwrite already existing results
    """

    # load query kapture (we use query kapture to reuse sensor_ids etc.)
    if kdata_query.trajectories:
        logger.warning("Query data contains trajectories: they will be ignored")
        kdata_query.trajectories.clear()
    else:
        kdata_query.trajectories = kapture.Trajectories()

    # clear query trajectories in map_plus_query
    kdata_map_cleared_trajectories = kapture.Trajectories()
    query_image_list = set(kdata_query.records_camera.data_list())
    for timestamp, subdict in kdata_map.records_camera.items():
        for sensor_id, image_name in subdict.items():
            if image_name in query_image_list:
                continue
            if (timestamp, sensor_id) in kdata_map.trajectories:
                pose = kdata_map.trajectories.get(timestamp)[sensor_id]
                kdata_map_cleared_trajectories.setdefault(timestamp, {})[sensor_id] = pose
    kdata_map.trajectories = kdata_map_cleared_trajectories

    # load output kapture
    output_path = os.path.join(output_path_root, 'localized')
    if os.path.exists(os.path.join(output_path, 'sensors/trajectories.txt')):
        kdata_output = kapture_from_dir(output_path)
        if kdata_query.records_camera == kdata_output.records_camera and len(
                kdata_output.trajectories) != 0 and not force:
            kdata_query.trajectories = kdata_output.trajectories

    if kdata_map.rigs is not None:
        rigs_remove_inplace(kdata_map.trajectories, kdata_map.rigs)
    if kdata_map_gv.rigs is not None:
        rigs_remove_inplace(kdata_map_gv.trajectories, kdata_map_gv.rigs)

    # load pairsfile
    pairs = {}
    with open(pairsfile_path, 'r') as fid:
        table = table_from_file(fid)
        for img_query, img_map, _ in table:
            if img_query not in pairs:
                pairs[img_query] = []
            pairs[img_query].append(img_map)

    kdata_sub_colmap_path = os.path.join(output_path_root, 'colmap')
    kdata_reg_query_path = os.path.join(output_path_root, 'query_registered')
    sub_kapture_pairsfile_path = os.path.join(output_path_root, 'tmp_pairs.txt')

    if descriptors_type is None:
        descriptors_type = try_get_only_key_from_collection(kdata_map.descriptors)
    assert descriptors_type is not None
    assert descriptors_type in kdata_map.descriptors
    keypoints_type = kdata_map.descriptors[descriptors_type].keypoints_type

    # init matches for kdata_map and kdata_map_gv
    if kdata_map.matches is None:
        kdata_map.matches = {}
    if keypoints_type not in kdata_map.matches:
        kdata_map.matches[keypoints_type] = kapture.Matches()
    if kdata_map_gv.matches is None:
        kdata_map_gv.matches = {}
    if keypoints_type not in kdata_map_gv.matches:
        kdata_map_gv.matches[keypoints_type] = kapture.Matches()

    # run all matching
    # loop over query images
    img_skip_list = set()
    for img_query, img_list_map in pairs.items():
        if pose_found(kdata_query, img_query):
            logger.info(f'{img_query} already processed, skipping...')
            img_skip_list.add(img_query)
            continue
        else:
            map_pairs = get_pairfile_from_img_list(img_list_map)
            query_pairs = get_pairfile_img_vs_img_list(img_query, img_list_map)
            with open(sub_kapture_pairsfile_path, 'w') as fid:
                logger.info(f'matching for {img_query}')
                table_to_file(fid, map_pairs)
                table_to_file(fid, query_pairs)

            pairs_all = map_pairs + query_pairs
            pairs_all = [(i, j) for i, j, _ in pairs_all]
            # match missing pairs
            # kdata_map.matches is being updated by compute_matches_from_loaded_data
            compute_matches_from_loaded_data(map_plus_query_path,
                                             tar_handlers_map,
                                             kdata_map,
                                             descriptors_type,
                                             pairs_all)

    # if kdata_map have matches in tar, they need to be switched to read mode
    matches_handler = retrieve_tar_handler_from_collection(kapture.Matches, keypoints_type, tar_handlers_map)
    if matches_handler is not None:
        matches_handler.close()
        tarfile_path = get_feature_tar_fullpath(kapture.Matches, keypoints_type, map_plus_query_path)
        tar_handlers_map.matches[keypoints_type] = TarHandler(tarfile_path, 'r')

    # run all gv
    # loop over query images
    for img_query, img_list_map in pairs.items():
        if img_query in img_skip_list:
            continue
        else:
            # recompute the pairs
            map_pairs = get_pairfile_from_img_list(img_list_map)
            query_pairs = get_pairfile_img_vs_img_list(img_query, img_list_map)
            with open(sub_kapture_pairsfile_path, 'w') as fid:
                logger.info(f'geometric verification of {img_query}')
                table_to_file(fid, map_pairs)
                table_to_file(fid, query_pairs)

            pairs_all = map_pairs + query_pairs
            pairs_all = [(i, j) for i, j, _ in pairs_all]

            if all(pair in kdata_map_gv.matches[keypoints_type] for pair in pairs_all):
                continue

            # create a sub kapture in order to minimize the amount of data exported to colmap
            # kdata_sub needs to be re-created to add the new matches
            kdata_sub = sub_kapture_from_img_list(kdata_map, img_list_map + [img_query], pairs_all,
                                                  keypoints_type, descriptors_type)

            kdata_sub_gv = sub_kapture_from_img_list(kdata_map_gv, img_list_map + [img_query], pairs_all,
                                                     keypoints_type, descriptors_type)
            # run colmap gv on missing pairs
            run_colmap_gv_from_loaded_data(kdata_sub,
                                           kdata_sub_gv,
                                           map_plus_query_path,
                                           map_plus_query_gv_path,
                                           tar_handlers_map,
                                           tar_handlers_map_gv,
                                           colmap_binary,
                                           keypoints_type,
                                           [],
                                           True)
            # update kdata_map_gv.matches
            kdata_map_gv.matches[keypoints_type].update(kdata_sub_gv.matches[keypoints_type])

    # if kdata_map_gv have matches in tar, they need to be switched to read mode
    matches_gv_handler = retrieve_tar_handler_from_collection(kapture.Matches, keypoints_type, tar_handlers_map_gv)
    if matches_gv_handler is not None:
        print(matches_gv_handler)
        matches_gv_handler.close()
        tarfile_path = get_feature_tar_fullpath(kapture.Matches, keypoints_type, map_plus_query_gv_path)
        tar_handlers_map_gv.matches[keypoints_type] = TarHandler(tarfile_path, 'r')

    # loop over query images
    for img_query, img_list_map in pairs.items():
        if img_query in img_skip_list:
            continue
        else:
            map_pairs = get_pairfile_from_img_list(img_list_map)
            with open(sub_kapture_pairsfile_path, 'w') as fid:
                logger.info(f'mapping and localization for {img_query}')
                table_to_file(fid, map_pairs)
            map_pairs = [(i, j) for i, j, _ in map_pairs]
            kdata_sub_gv = sub_kapture_from_img_list(kdata_map_gv, img_list_map, map_pairs,
                                                     keypoints_type, descriptors_type)
            # sanity check
            if len(map_pairs) != len(kdata_sub_gv.matches[keypoints_type]):
                logger.info(f'not all mapping matches available')

            # build COLMAP map
            try:
                colmap_build_map_from_loaded_data(
                    kdata_sub_gv,
                    map_plus_query_gv_path,
                    tar_handlers_map_gv,
                    kdata_sub_colmap_path,
                    colmap_binary,
                    keypoints_type,
                    False,
                    [],
                    ['model_converter'],
                    True)
            except ValueError:
                logger.info(f'{img_query} was not localized')
                continue

        if not os.path.exists(os.path.join(kdata_sub_colmap_path, 'reconstruction/images.bin')):
            logger.info(f'colmap mapping for {img_query} did not work, image was not localized')
            continue

        query_pairs = get_pairfile_img_vs_img_list(img_query, img_list_map)
        with open(sub_kapture_pairsfile_path, 'w') as fid:
            table_to_file(fid, query_pairs)
        query_pairs = [(i, j) for i, j, _ in query_pairs]
        query_img_kapture_gv = add_image_to_kapture(kdata_map_gv,
                                                    kdata_sub_gv, img_query, query_pairs,
                                                    keypoints_type, descriptors_type)
        # sanity check
        if len(query_pairs) != len(query_img_kapture_gv.matches[keypoints_type]):
            logger.info(f'not all query matches available')

        # localize in COLMAP map
        try:
            colmap_localize_from_loaded_data(
                query_img_kapture_gv,
                map_plus_query_gv_path,
                tar_handlers_map_gv,
                os.path.join(kdata_sub_colmap_path, 'registered'),
                os.path.join(kdata_sub_colmap_path, 'colmap.db'),
                os.path.join(kdata_sub_colmap_path, 'reconstruction'),
                colmap_binary,
                keypoints_type,
                False,
                ['--Mapper.ba_refine_focal_length', '0',
                 '--Mapper.ba_refine_principal_point', '0',
                 '--Mapper.ba_refine_extra_params', '0',
                 '--Mapper.min_num_matches', '4',
                 '--Mapper.init_min_num_inliers', '4',
                 '--Mapper.abs_pose_min_num_inliers', '4',
                 '--Mapper.abs_pose_min_inlier_ratio', '0.05',
                 '--Mapper.ba_local_max_num_iterations', '50',
                 '--Mapper.abs_pose_max_error', '20',
                 '--Mapper.filter_max_reproj_error', '12'],
                [],
                True)
        except ValueError:
            logger.info(f'{img_query} was not localized')
            continue

        if not os.path.exists(os.path.join(os.path.join(kdata_sub_colmap_path, 'registered'),
                                           'reconstruction/images.txt')):
            logger.info(f'colmap localization of {img_query} did not work, image was not localized')
            continue

        # add to results kapture
        kdata_reg_query = import_colmap(
            kdata_reg_query_path,
            os.path.join(os.path.join(kdata_sub_colmap_path, 'registered'), 'colmap.db'),
            os.path.join(os.path.join(kdata_sub_colmap_path, 'registered'),
                         'reconstruction'),
            None,
            None,
            True,
            True,
            True,
            TransferAction.skip)

        if add_pose_to_query_kapture(kdata_reg_query, kdata_query, img_query):
            logger.info('successfully localized')

        # write results (after each image to see the progress)
        kapture_to_dir(output_path, kdata_query)

    # clean up (e.g. remove temporal files and folders)
    safe_remove_any_path(kdata_sub_colmap_path, True)
    safe_remove_any_path(kdata_reg_query_path, True)
    safe_remove_file(sub_kapture_pairsfile_path, True)

    logger.info('all done')