def localize(self, points2D_all, points2D_idxs, points3D_id, query_camera):
     points2D = points2D_all[points2D_idxs]
     points3D = [self.reconstruction.points3D[j].xyz for j in points3D_id]
     ret = pycolmap.absolute_pose_estimation(
         points2D, points3D, query_camera,
         estimation_options=self.config.get('estimation', {}),
         refinement_options=self.config.get('refinement', {}),
     )
     return ret
Ejemplo n.º 2
0
def pose_from_cluster(dataset_dir,
                      q,
                      retrieved,
                      feature_file,
                      match_file,
                      skip=None):
    height, width = cv2.imread(str(dataset_dir / q)).shape[:2]
    cx = .5 * width
    cy = .5 * height
    focal_length = 4032. * 28. / 36.

    all_mkpq = []
    all_mkpr = []
    all_mkp3d = []
    all_indices = []
    kpq = feature_file[q]['keypoints'].__array__()
    num_matches = 0

    for i, r in enumerate(retrieved):
        kpr = feature_file[r]['keypoints'].__array__()
        pair = names_to_pair(q, r)
        m = match_file[pair]['matches0'].__array__()
        v = (m > -1)

        if skip and (np.count_nonzero(v) < skip):
            continue

        mkpq, mkpr = kpq[v], kpr[m[v]]
        num_matches += len(mkpq)

        scan_r = loadmat(Path(dataset_dir, r + '.mat'))["XYZcut"]
        mkp3d, valid = interpolate_scan(scan_r, mkpr)
        Tr = get_scan_pose(dataset_dir, r)
        mkp3d = (Tr[:3, :3] @ mkp3d.T + Tr[:3, -1:]).T

        all_mkpq.append(mkpq[valid])
        all_mkpr.append(mkpr[valid])
        all_mkp3d.append(mkp3d[valid])
        all_indices.append(np.full(np.count_nonzero(valid), i))

    all_mkpq = np.concatenate(all_mkpq, 0)
    all_mkpr = np.concatenate(all_mkpr, 0)
    all_mkp3d = np.concatenate(all_mkp3d, 0)
    all_indices = np.concatenate(all_indices, 0)

    cfg = {
        'model': 'SIMPLE_PINHOLE',
        'width': width,
        'height': height,
        'params': [focal_length, cx, cy]
    }
    ret = pycolmap.absolute_pose_estimation(all_mkpq, all_mkp3d, cfg, 48.00)
    ret['cfg'] = cfg
    return ret, all_mkpq, all_mkpr, all_mkp3d, all_indices, num_matches
def estimate_absolute_pose(kpts_2d, kpts_3d, K, thresh):
    if len(kpts_2d) < 4:
        return None
    cfg = {
        'model': 'SIMPLE_PINHOLE',
        'width': 640,
        'height': 480,
        'params': [K[0, 0], K[0, 2], K[1, 2]]
    }
    ret = pycolmap.absolute_pose_estimation(kpts_2d, kpts_3d, cfg, thresh)
    qw, qx, qy, qz = ret['qvec']
    R = sRot.from_quat([qx, qy, qz, qw]).as_matrix()
    t = ret['tvec']
    T = np.eye(4)
    T[:3, :3] = R
    T[:3, 3] = t
    T = np.linalg.inv(T)
    R, t = T[:3, :3], T[:3, 3]
    ret = (R, t, np.array(ret['inliers'])) if ret['success'] else None
    return ret

    kpts_2d = kpts_2d.astype(np.float32).reshape((-1, 1, 2))
    kpts_3d = kpts_3d.astype(np.float32).reshape((-1, 1, 3))
    success, R_vec, t, inlier_idx = cv2.solvePnPRansac(
        kpts_3d,
        kpts_2d,
        K,
        np.array([0., 0, 0, 0]),
        iterationsCount=5000,
        reprojectionError=thresh,
        flags=cv2.SOLVEPNP_P3P)
    if success:
        inliers = np.zeros(len(kpts_2d), np.bool)
        inliers[inlier_idx[:, 0]] = True

        ret, R_vec, t = cv2.solvePnP(kpts_3d[inliers],
                                     kpts_2d[inliers],
                                     K,
                                     np.array([0., 0, 0, 0]),
                                     rvec=R_vec,
                                     tvec=t,
                                     useExtrinsicGuess=True,
                                     flags=cv2.SOLVEPNP_ITERATIVE)
        assert ret

        query_T_w = np.eye(4)
        query_T_w[:3, :3] = cv2.Rodrigues(R_vec)[0]
        query_T_w[:3, 3] = t[:, 0]
        w_T_query = np.linalg.inv(query_T_w)
        ret = (w_T_query[:3, :3], w_T_query[:3, 3], inliers)
    else:
        ret = None

    return ret
Ejemplo n.º 4
0
def pose_from_cluster(qname, qinfo, db_ids, db_images, points3D, feature_file,
                      match_file, thresh):
    kpq = feature_file[qname]['keypoints'].__array__()
    kp_idx_to_3D = defaultdict(list)
    kp_idx_to_3D_to_db = defaultdict(lambda: defaultdict(list))
    num_matches = 0

    for i, db_id in enumerate(db_ids):
        db_name = db_images[db_id].name
        points3D_ids = db_images[db_id].point3D_ids

        pair = names_to_pair(qname, db_name)
        matches = match_file.get(pair)
        if matches is not None:
            matches = matches['matches0'].__array__()
            valid = np.where(matches > -1)[0]
            valid = valid[points3D_ids[matches[valid]] != -1]
            num_matches += len(valid)

            for idx in valid:
                id_3D = points3D_ids[matches[idx]]
                kp_idx_to_3D_to_db[idx][id_3D].append(i)
                # avoid duplicate observations
                if id_3D not in kp_idx_to_3D[idx]:
                    kp_idx_to_3D[idx].append(id_3D)

    idxs = list(kp_idx_to_3D.keys())
    mkp_idxs = [i for i in idxs for _ in kp_idx_to_3D[i]]
    mkpq = kpq[mkp_idxs]
    mkpq += 0.5  # COLMAP coordinates

    mp3d_ids = [j for i in idxs for j in kp_idx_to_3D[i]]
    mp3d = [points3D[j].xyz for j in mp3d_ids]
    mp3d = np.array(mp3d).reshape(-1, 3)

    # mostly for logging and post-processing
    mkp_to_3D_to_db = [(j, kp_idx_to_3D_to_db[i][j]) for i in idxs
                       for j in kp_idx_to_3D[i]]

    camera_model, width, height, params = qinfo
    cfg = {
        'model': camera_model,
        'width': width,
        'height': height,
        'params': params,
    }
    ret = pycolmap.absolute_pose_estimation(mkpq, mp3d, cfg, thresh)
    ret['cfg'] = cfg
    return ret, mkpq, mp3d, mp3d_ids, num_matches, (mkp_idxs, mkp_to_3D_to_db)
Ejemplo n.º 5
0
def pycolmap_localize_from_loaded_data(
        kapture_data: kapture.Kapture, kapture_path: str,
        tar_handlers: TarCollection, kapture_query_data: kapture.Kapture,
        output_path: str, pairsfile_path: str, max_error: float,
        min_inlier_ratio: float, min_num_iterations: int,
        max_num_iterations: int, confidence: float,
        keypoints_type: Optional[str],
        duplicate_strategy: DuplicateCorrespondencesStrategy,
        rerank_strategy: RerankCorrespondencesStrategy,
        write_detailed_report: bool, force: bool) -> None:
    """
    Localize images using pycolmap.

    :param kapture_data: loaded kapture data (incl. points3d)
    :param kapture_path: path to the kapture to use
    :param tar_handlers: collection of pre-opened tar archives
    :param kapture_data: loaded kapture data (mapping and query images)
    :param output_path: path to the write the localization results
    :param pairsfile_path: pairs to use
    :param max_error: RANSAC inlier threshold in pixel
    :param min_inlier_ratio: abs_pose_options.ransac_options.min_inlier_ratio
    :param min_num_iterations: abs_pose_options.ransac_options.min_num_trials
    :param max_num_iterations: abs_pose_options.ransac_options.max_num_trials
    :param confidence: abs_pose_options.ransac_options.confidence
    :param keypoints_type: types of keypoints (and observations) to use
    :param duplicate_strategy: strategy to handle duplicate correspondences (either kpt_id and/or pt3d_id)
    :param rerank_strategy: strategy to reorder pairs before handling duplicate correspondences
    :param write_detailed_report: if True, write a json file with inliers, reprojection error for each query
    :param force: Silently overwrite kapture files if already exists
    """
    assert has_pycolmap
    if not (kapture_data.records_camera and kapture_data.sensors
            and kapture_data.keypoints and kapture_data.matches
            and kapture_data.points3d and kapture_data.observations):
        raise ValueError('records_camera, sensors, keypoints, matches, '
                         'points3d, observations are mandatory for map+query')

    if not (kapture_query_data.records_camera and kapture_query_data.sensors):
        raise ValueError('records_camera, sensors are mandatory for query')

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(
            kapture_data.keypoints)
    assert keypoints_type is not None
    assert keypoints_type in kapture_data.keypoints
    assert keypoints_type in kapture_data.matches

    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    if kapture_query_data.trajectories is not None:
        logger.warning(
            "Input query data contains trajectories: they will be ignored")
        kapture_query_data.trajectories.clear()

    os.umask(0o002)
    os.makedirs(output_path, exist_ok=True)
    delete_existing_kapture_files(output_path, force_erase=force)

    # load pairsfile
    pairs = {}
    with open(pairsfile_path, 'r') as fid:
        table = kapture.io.csv.table_from_file(fid)
        for img_query, img_map, _ in table:
            if img_query not in pairs:
                pairs[img_query] = []
            pairs[img_query].append(img_map)

    kapture_data.matches[keypoints_type].normalize()
    keypoints_filepaths = keypoints_to_filepaths(
        kapture_data.keypoints[keypoints_type], keypoints_type, kapture_path,
        tar_handlers)
    obs_for_keypoints_type = {
        point_id: per_keypoints_type_subdict[keypoints_type]
        for point_id, per_keypoints_type_subdict in
        kapture_data.observations.items()
        if keypoints_type in per_keypoints_type_subdict
    }
    point_id_from_obs = {
        (img_name, kp_id): point_id
        for point_id in obs_for_keypoints_type.keys()
        for img_name, kp_id in obs_for_keypoints_type[point_id]
    }
    query_images = [(timestamp, sensor_id, image_name)
                    for timestamp, sensor_id, image_name in kapture.flatten(
                        kapture_query_data.records_camera)]

    # kapture for localized images + pose
    trajectories = kapture.Trajectories()
    for timestamp, sensor_id, image_name in tqdm(
            query_images,
            disable=logging.getLogger().level >= logging.CRITICAL):
        if image_name not in pairs:
            continue
        # N number of correspondences
        # points2D - Nx2 array with pixel coordinates
        # points3D - Nx3 array with world coordinates
        points2D = []
        points3D = []
        keypoints_filepath = keypoints_filepaths[image_name]
        kapture_keypoints_query = image_keypoints_from_file(
            filepath=keypoints_filepath,
            dsize=kapture_data.keypoints[keypoints_type].dsize,
            dtype=kapture_data.keypoints[keypoints_type].dtype)
        query_cam = kapture_query_data.sensors[sensor_id]
        assert isinstance(query_cam, kapture.Camera)

        col_cam_id, width, height, params, _ = get_colmap_camera(query_cam)
        cfg = {
            'model': CAMERA_MODEL_NAME_ID[col_cam_id][0],
            'width': int(width),
            'height': int(height),
            'params': params
        }

        points2D, _, points3D, stats = get_correspondences(
            kapture_data, keypoints_type, kapture_path, tar_handlers,
            image_name, pairs[image_name], point_id_from_obs,
            kapture_keypoints_query, None, duplicate_strategy, rerank_strategy)

        # compute absolute pose
        # inlier_threshold - RANSAC inlier threshold in pixels
        # answer - dictionary containing the RANSAC output
        ret = pycolmap.absolute_pose_estimation(points2D, points3D, cfg,
                                                max_error, min_inlier_ratio,
                                                min_num_iterations,
                                                max_num_iterations, confidence)
        # add pose to output kapture
        if ret['success'] and ret['num_inliers'] > 0:
            pose = kapture.PoseTransform(ret['qvec'], ret['tvec'])
            if write_detailed_report:
                num_2dpoints = len(points2D)
                points2D_final, K, distortion = get_camera_matrix_from_kapture(
                    np.array(points2D, dtype=np.float), query_cam)
                points2D_final = list(points2D_final.reshape(
                    (num_2dpoints, 2)))
                inliers = np.where(ret['inliers'])[0].tolist()
                reprojection_error = compute_reprojection_error(
                    pose, ret['num_inliers'], inliers, points2D_final,
                    points3D, K, distortion)
                cache = {
                    "num_correspondences": len(points3D),
                    "num_inliers": inliers,
                    "inliers": ret['inliers'],
                    "reprojection_error": reprojection_error,
                    "stats": stats
                }
                cache_path = os.path.join(output_path,
                                          f'pycolmap_cache/{image_name}.json')
                save_to_json(cache, cache_path)
            trajectories[timestamp, sensor_id] = pose

    kapture_data_localized = kapture.Kapture(
        sensors=kapture_query_data.sensors,
        trajectories=trajectories,
        records_camera=kapture_query_data.records_camera,
        rigs=kapture_query_data.rigs)
    kapture.io.csv.kapture_to_dir(output_path, kapture_data_localized)