예제 #1
0
    def match_descriptors(self, descriptors_1, descriptors_2):
        if descriptors_1.shape[0] == 0 or descriptors_2.shape[0] == 0:
            return np.zeros((0, 3))

        # send data to GPU
        descriptors1_torch = torch.from_numpy(descriptors_1).to(self._device)
        descriptors2_torch = torch.from_numpy(descriptors_2).to(self._device)
        # make sure its double (because CUDA tensors only supports floating-point)
        descriptors1_torch = descriptors1_torch.float()
        descriptors2_torch = descriptors2_torch.float()
        # sanity check
        if not descriptors1_torch.device == self._device:
            getLogger().debug('descriptor on device {} (requested {})'.format(
                descriptors1_torch.device, self._device))
        if not descriptors2_torch.device == self._device:
            getLogger().debug('descriptor on device {} (requested {})'.format(
                descriptors2_torch.device, self._device))

        simmilarity_matrix = descriptors1_torch @ descriptors2_torch.t()
        scores = torch.max(simmilarity_matrix, dim=1)[0]
        nearest_neighbor_idx_1vs2 = torch.max(simmilarity_matrix, dim=1)[1]
        nearest_neighbor_idx_2vs1 = torch.max(simmilarity_matrix, dim=0)[1]
        ids1 = torch.arange(0,
                            simmilarity_matrix.shape[0],
                            device=descriptors1_torch.device)
        # cross check
        mask = ids1 == nearest_neighbor_idx_2vs1[nearest_neighbor_idx_1vs2]
        matches_torch = torch.stack([
            ids1[mask].type(torch.float),
            nearest_neighbor_idx_1vs2[mask].type(torch.float), scores[mask]
        ]).t()
        # retrieve data back from GPU
        matches = matches_torch.data.cpu().numpy()
        matches = matches.astype(np.float)
        return matches
def pycolmap_rig_localize_command_line():
    """
    Parse the command line arguments to localize images
    from a multi camera rig with pycolmap using the given kapture data.
    """
    parser = get_pycolmap_rig_localize_argparser()
    args = parser.parse_args()

    logger.setLevel(args.verbose)
    getLogger().setLevel(args.verbose)
    if args.verbose <= logging.INFO:
        # also let kapture express its logs
        kapture.utils.logging.getLogger().setLevel(args.verbose)

    args_dict = vars(args)
    logger.debug('kapture_pycolmap_rig_localize.py \\\n' +
                 '  \\\n'.join('--{:20} {:100}'.format(k, str(v))
                               for k, v in args_dict.items()))

    pycolmap_rig_localize(
        args.input, args.query, args.output, args.pairsfile_path, args.rig_ids,
        args.apply_rigs_remove, args.max_error, args.min_inlier_ratio,
        args.min_num_iterations, args.max_num_iterations, args.confidence,
        args.keypoints_type, args.duplicate_strategy, args.rerank_strategy,
        args.write_detailed_report, args.force)
예제 #3
0
def pyransaclib_localize_command_line():
    """
    Parse the command line arguments to localize images with pyransaclib using the given kapture data.
    """
    parser = get_pyransaclib_localize_argparser()
    args = parser.parse_args()

    logger.setLevel(args.verbose)
    getLogger().setLevel(args.verbose)
    if args.verbose <= logging.INFO:
        # also let kapture express its logs
        kapture.utils.logging.getLogger().setLevel(args.verbose)

    args_dict = vars(args)
    logger.debug('kapture_pyransaclib_localize.py \\\n' +
                 '  \\\n'.join('--{:20} {:100}'.format(k, str(v))
                               for k, v in args_dict.items()))

    pyransaclib_localize(args.input, args.query, args.output,
                         args.pairsfile_path, args.inlier_threshold,
                         args.number_lo_steps, args.min_num_iterations,
                         args.max_num_iterations, args.refine_poses,
                         args.keypoints_type, args.duplicate_strategy,
                         args.rerank_strategy, args.write_detailed_report,
                         args.force)
예제 #4
0
def stack_global_features(
    global_features_config: ImageFeatureConfig,
    global_features_paths: List[Tuple[str, str]]
) -> Tuple[Union[np.array, List[str]], np.ndarray]:
    """
    loads global features and store them inside a numpy array of shape (number_of_images, dsize)

    :param global_features_config: content of global_features.txt, required to load the global features
    :type global_features_config: ImageFeatureConfig
    :param global_features_paths: list of every image and the full path to the corresponding global feature
    :type global_features_paths: List[Tuple[str, str]]
    """
    getLogger().debug('loading global features')
    number_of_images = len(global_features_paths)

    stacked_features_index = np.empty((number_of_images, ), dtype=object)
    stacked_features = np.empty(
        (number_of_images, global_features_config.dsize),
        dtype=global_features_config.dtype)

    hide_progress_bar = getLogger().getEffectiveLevel() > logging.INFO
    for i, (image_path,
            global_feature_path) in tqdm(enumerate(global_features_paths),
                                         disable=hide_progress_bar):
        stacked_features_index[i] = image_path
        global_feature = image_global_features_from_file(
            global_feature_path, global_features_config.dtype,
            global_features_config.dsize)
        global_feature = global_feature / np.linalg.norm(global_feature)
        stacked_features[i] = global_feature

    return StackedGlobalFeatures(stacked_features_index, stacked_features)
예제 #5
0
def get_observation_image_pairs(keypoints_type: str,
                                kdata: kapture.Kapture,
                                kdata_query: Optional[kapture.Kapture],
                                max_number_of_threads: Optional[int] = None):
    """
    get observations pairs as dictionary
    """
    assert kdata.records_camera is not None
    imgs_map = kdata.records_camera.data_list()
    if kdata_query is not None:
        assert kdata_query.records_camera is not None
        imgs_query = kdata_query.records_camera.data_list()
    else:
        imgs_query = None
    all_pairs = {}

    number_of_threads = multiprocessing.cpu_count(
    ) if max_number_of_threads is None else max_number_of_threads

    def update_all_pairs_and_progress_bar(result):
        for img1 in result:
            if img1 not in all_pairs:
                all_pairs[img1] = {}
            for img2 in result[img1]:
                if img2 not in all_pairs[img1]:
                    all_pairs[img1][img2] = 0
                all_pairs[img1][img2] += result[img1][img2]
        progress_bar.update(1)

    def error_callback(e):
        getLogger().critical(e)

    getLogger().debug(
        f'computing all possible pairs from observations, max-threads={number_of_threads}'
    )
    assert kdata.observations is not None
    progress_bar = tqdm(total=len(kdata.observations),
                        disable=getLogger().level >= logging.CRITICAL)

    imgs_map_set = set(imgs_map)
    imgs_query_set = set(imgs_query) if imgs_query is not None else None
    with multiprocessing.Pool(number_of_threads) as pool:
        for point3d_id in kdata.observations.keys():
            if keypoints_type not in kdata.observations[point3d_id]:
                progress_bar.update(1)
                continue
            pool.apply_async(_child_process_get_pairs,
                             args=(kdata.observations[point3d_id,
                                                      keypoints_type],
                                   imgs_map_set, imgs_query_set),
                             callback=update_all_pairs_and_progress_bar,
                             error_callback=error_callback)
        pool.close()
        pool.join()
    progress_bar.close()
    return all_pairs
예제 #6
0
def get_interpolation_weights(method: PoseApproximationMethods,
                              query_gfeat: StackedGlobalFeatures,
                              map_gfeat: StackedGlobalFeatures, topk: int,
                              additional_parameters: dict):
    """
    compute the pose weights for the given method as a dict { query image name -> list(map image name, weight) }

    :param method: pose approximation method to use
    :type method: PoseApproximationMethods
    :param query_gfeat: global features for the query images
    :type query_gfeat: StackedGlobalFeatures
    :param map_gfeat: global features for the map images
    :type map_gfeat: StackedGlobalFeatures
    :param topk: the max number of top retained images
    :type topk: int
    :param additional_parameters: method specific parameters
    :type additional_parameters: dict
    """
    similarity_matrix = get_similarity_matrix(query_gfeat, map_gfeat)
    local_topk = min(topk, similarity_matrix.shape[1])
    if local_topk != topk:
        getLogger().warning(
            f'topk was set to {local_topk} instead of {topk} because there were not enough map data'
        )
    similarity_sorted = np.empty((similarity_matrix.shape[0], local_topk),
                                 dtype=int)
    for i, scores in enumerate(similarity_matrix):
        indexes = np.argsort(-scores)
        similarity_sorted[i, :] = indexes[:local_topk]

    if method == PoseApproximationMethods.equal_weighted_barycenter:
        weights = _get_EWB_weights(similarity_matrix.shape[0], local_topk)
    elif method == PoseApproximationMethods.barycentric_descriptor_interpolation:
        weights = _get_BDI_weights(similarity_sorted, query_gfeat, map_gfeat)
    elif method == PoseApproximationMethods.cosine_similarity:
        assert 'alpha' in additional_parameters
        weights = _get_CSI_weights(similarity_matrix, similarity_sorted,
                                   additional_parameters['alpha'])
    else:
        raise NotImplementedError(
            f'{method} - unknown PoseApproximationMethods')

    weights_dict = {}
    for i, indexes in enumerate(similarity_sorted):
        query_name = query_gfeat.index[i]
        weights_dict[query_name] = list(
            zip(map_gfeat.index[indexes], weights[i, :]))
    return weights_dict
예제 #7
0
def get_topk_observation_pairs(all_pairs: Dict[str, Dict[str, int]],
                               records_camera: kapture.RecordsCamera,
                               topk: int):
    """
    convert pairs dict to list
    """
    image_pairs = []
    for img1 in sorted(records_camera.data_list()):
        if img1 not in all_pairs:
            getLogger().debug(f'{img1} has no images sharing observations')
            continue
        sorted_pairs = list(
            sorted(all_pairs[img1].items(),
                   key=lambda item: item[1],
                   reverse=True))
        for img2, score in sorted_pairs[0:topk]:
            image_pairs.append((img1, img2, score))
    return image_pairs
예제 #8
0
def get_ordered_pairs_from_file(
        pairsfile_path: str,
        query_records: kapture.RecordsCamera = None,
        map_records: kapture.RecordsCamera = None,
        topk_override=None) -> Dict[str, List[Tuple[str, float]]]:
    """
    read pairfile and return a list of pairs (keep duplicates, order is query, map)
    """
    getLogger().info('reading pairs from pairsfile')
    if query_records is not None:
        query_images = set(query_records.data_list())
    else:
        query_images = None
    if map_records is not None:
        map_images = set(map_records.data_list())
    else:
        map_images = None

    image_pairs = {}
    with open(pairsfile_path, 'r') as fid:
        table = table_from_file(fid)
        for query_name, map_name, score in table:
            if query_images is not None and query_name not in query_images:
                continue
            if map_images is not None and map_name not in map_images:
                continue
            if query_name not in image_pairs:
                image_pairs[query_name] = []
            image_pairs[query_name].append((map_name, float(score)))
    for k in image_pairs.keys():
        sorted_by_score = list(
            sorted(image_pairs[k], key=lambda x: x[1], reverse=True))
        if topk_override is not None and topk_override > len(sorted_by_score):
            getLogger().debug(
                f'image {k} has {len(sorted_by_score)} pairs, less than topk={topk_override}'
            )
        elif topk_override is not None:
            sorted_by_score = sorted_by_score[:topk_override]
        image_pairs[k] = sorted_by_score
    return image_pairs
예제 #9
0
def run_python_command(local_path: str,
                       args: List[str],
                       python_binary: Optional[str] = None):
    """
    run a python subprocess

    :param local_path: path where you expect the file to be
    :type local_path: str
    :param args: the arguments of the python process
    :type args: List[str]
    :param python_binary: path to the python binary, optional, when None, the .py file is called directly
    :type python_binary: Optional[str]
    :raises ValueError: subprocess crashed
    """
    if python_binary is None:
        if path.isfile(local_path):
            compute_image_pairs_bin = path.normpath(local_path)
        else:
            # maybe the script was installed through pip
            compute_image_pairs_bin = path.basename(local_path)
        args.insert(0, compute_image_pairs_bin)
    else:
        if path.isfile(local_path):
            compute_image_pairs_bin = path.normpath(local_path)
        else:
            # maybe the script was installed through pip
            # with the direct binary, we need to get the full path
            compute_image_pairs_bin = find_in_PATH(path.basename(local_path))
        args.insert(0, compute_image_pairs_bin)
        args.insert(0, python_binary)

    getLogger().debug(f'run_python_command : {args}')

    use_shell = sys.platform.startswith("win")
    python_process = subprocess.Popen(args, shell=use_shell)
    python_process.wait()
    if python_process.returncode != 0:
        raise ValueError('\nSubprocess Error (Return code:'
                         f' {python_process.returncode} )')
예제 #10
0
def get_all_cameras_from_rig_ids(rig_ids: List[str],
                                 sensors: kapture.Sensors,
                                 rigs: kapture.Rigs) -> Dict[str, Dict[str, kapture.PoseTransform]]:
    """
    get a dict {rig_id: {camera_id: relative pose}} -> flattened version of rigs
    """
    # recursively get all camera ids for the rig ids
    camera_list = {}
    max_depth = 10
    for rig_id in rig_ids:
        camera_list[rig_id] = {}
        subrig_ids = [(rig_id, kapture.PoseTransform())]
        for _ in range(max_depth + 1):
            if len(subrig_ids) == 0:
                break
            subrig_ids_next = []
            for rig_id_l1, relative_transform_l1 in subrig_ids:
                if rig_id_l1 in sensors and \
                        isinstance(sensors[rig_id_l1], kapture.Camera):
                    camera_list[rig_id][rig_id_l1] = relative_transform_l1
                if rig_id_l1 in rigs:
                    for rig_id_l2, relative_transform_l2 in rigs[rig_id_l1].items():
                        # relative_transform_l2 -> id_l1 to id_l2
                        # relative_transform_l1 -> rig to id_l1
                        # relative_transform_l1_l2 -> rig to id_l2
                        relative_transform_l1_l2 = kapture.PoseTransform.compose(
                            [relative_transform_l2, relative_transform_l1])
                        subrig_ids_next.append((rig_id_l2, relative_transform_l1_l2))
            subrig_ids = subrig_ids_next

    # cleanup empty rigs
    final_camera_list = {}
    for rig_id in camera_list.keys():
        if len(camera_list[rig_id]) > 0:
            final_camera_list[rig_id] = camera_list[rig_id]
        else:
            getLogger().debug(f'{rig_id} removed from list because there was not any camera')
    return final_camera_list
예제 #11
0
def get_pairs_from_file(
    pairsfile_path: str,
    query_records: kapture.RecordsCamera = None,
    map_records: kapture.RecordsCamera = None,
) -> List[Tuple[str, str]]:
    """
    read a pairs file (csv with 3 fields, name1, name2, score) and return the list of matches

    :param pairsfile_path: path to pairsfile
    :type pairsfile_path: str
    """
    getLogger().info('reading pairs from pairsfile')
    if query_records is not None:
        query_images = set(query_records.data_list())
    else:
        query_images = None
    if map_records is not None:
        map_images = set(map_records.data_list())
    else:
        map_images = None

    image_pairs = []
    with open(pairsfile_path, 'r') as fid:
        table = table_from_file(fid)
        for query_name, map_name, _ in table:  # last field score is not used
            if query_images is not None and query_name not in query_images:
                continue
            if map_images is not None and map_name not in map_images:
                continue
            if query_name != map_name:
                image_pairs.append((query_name,
                                    map_name) if query_name < map_name else (
                                        map_name, query_name))
    # remove duplicates without breaking order
    image_pairs = list(OrderedDict.fromkeys(image_pairs))
    return image_pairs
예제 #12
0
def get_pairs_observations(kdata: kapture.Kapture,
                           kdata_query: Optional[kapture.Kapture],
                           keypoints_type: str,
                           max_number_of_threads: Optional[int], iou: bool,
                           topk: int):
    """
    get observations pairs as list
    """
    if iou:
        individual_observations = get_observation_images(
            keypoints_type, kdata, kdata_query, max_number_of_threads)
        gc.collect()
    else:
        individual_observations = None
    all_pairs = get_observation_image_pairs(keypoints_type, kdata, kdata_query,
                                            max_number_of_threads)
    if iou:
        assert individual_observations is not None
        final_pairs = {}
        for img1 in all_pairs.keys():
            for img2 in all_pairs[img1].keys():
                if img1 not in final_pairs:
                    final_pairs[img1] = {}
                union = individual_observations[img1] + individual_observations[
                    img2] - all_pairs[img1][img2]
                if union == 0:
                    final_pairs[img1][img2] = 0
                else:
                    final_pairs[img1][img2] = all_pairs[img1][img2] / union
        all_pairs = final_pairs

    getLogger().info('ranking co-observation pairs...')
    assert kdata.records_camera is not None
    image_pairs = get_topk_observation_pairs(all_pairs, kdata.records_camera,
                                             topk)
    return image_pairs
예제 #13
0
def get_correspondences(kapture_data: kapture.Kapture, keypoints_type: str,
                        kapture_path: str, tar_handlers: TarCollection,
                        img_query: str, pairs: List[str],
                        point_id_from_obs: Dict[Tuple[str, int], int],
                        kpts_query: Optional[np.ndarray],
                        kpts_query_undistorted: Optional[np.ndarray],
                        duplicate_strategy: DuplicateCorrespondencesStrategy,
                        rerank_strategy: RerankCorrespondencesStrategy):
    """
    get 2D-3D correspondences for a given query image, a list of paired map images, and a kapture map
    """
    # first list all correspondences
    correspondences = {}
    for img_map in pairs:
        # get matches
        if img_query < img_map:
            if (img_query,
                    img_map) not in kapture_data.matches[keypoints_type]:
                getLogger().warning(
                    f'pair {img_query}, {img_map} do not have a match file, skipped'
                )
                continue
            matches_path = get_matches_fullpath((img_query, img_map),
                                                keypoints_type, kapture_path,
                                                tar_handlers)
        else:
            if (img_map,
                    img_query) not in kapture_data.matches[keypoints_type]:
                getLogger().warning(
                    f'pair {img_query}, {img_map} do not have a match file, skipped'
                )
            matches_path = get_matches_fullpath((img_map, img_query),
                                                keypoints_type, kapture_path,
                                                tar_handlers)
        matches = image_matches_from_file(matches_path)

        num_matches = matches.shape[0]
        corrs = []
        for m in matches:
            if img_query < img_map:
                kpid_query = m[0]
                kpid_map = m[1]
            else:
                kpid_query = m[1]
                kpid_map = m[0]
            # match_score = m[2]

            if not (img_map, kpid_map) in point_id_from_obs:
                continue
            # get 3D point
            p3did = point_id_from_obs[(img_map, kpid_map)]
            corrs.append((kpid_query, p3did))
        correspondences[img_map] = (num_matches, corrs)

    if rerank_strategy == RerankCorrespondencesStrategy.none:
        reranked_pairs = pairs
    elif rerank_strategy == RerankCorrespondencesStrategy.matches_count:
        reranked_pairs = [
            img_map for img_map, _ in sorted(
                correspondences.items(), key=lambda x: x[1][0], reverse=True)
        ]
    elif rerank_strategy == RerankCorrespondencesStrategy.correspondences_count:
        reranked_pairs = [
            img_map for img_map, _ in sorted(correspondences.items(),
                                             key=lambda x: len(x[1][1]),
                                             reverse=True)
        ]
    else:
        raise NotImplementedError(f'{rerank_strategy} not implemented')

    # N number of correspondences
    # points2D - Nx2 array with pixel coordinates
    # points3D - Nx3 array with world coordinates
    points2D = []
    points2D_undistorted = []
    points3D = []

    assigned_keypoints_ids = {}
    assigned_3d_points_ids = {}
    true_duplicates_count = 0
    same_2d_multiple_3d_count = 0
    same_2d_multiple_3d_max = 0
    same_3d_multiple_2d_count = 0
    same_3d_multiple_2d_max = 0
    rejected_correspondences = 0
    for img_map in reranked_pairs:
        if img_map not in correspondences:
            continue
        for kpid_query, p3did in correspondences[img_map][1]:
            if kpid_query in assigned_keypoints_ids and p3did in assigned_keypoints_ids[
                    kpid_query]:
                true_duplicates_count += 1
                if duplicate_strategy == DuplicateCorrespondencesStrategy.ignore or \
                        duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_strict or \
                        duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_same_kpid or \
                        duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_same_p3did:
                    rejected_correspondences += 1
                    continue
            elif duplicate_strategy == DuplicateCorrespondencesStrategy.ignore and \
                    (kpid_query in assigned_keypoints_ids or p3did in assigned_3d_points_ids):
                rejected_correspondences += 1
                continue
            else:
                if duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_same_kpid and \
                        kpid_query in assigned_keypoints_ids:
                    rejected_correspondences += 1
                    continue
                elif kpid_query not in assigned_keypoints_ids:
                    assigned_keypoints_ids[kpid_query] = {p3did}
                else:
                    # p3did not in assigned_keypoints_ids[kpid_query]
                    same_2d_multiple_3d_count += 1
                    assigned_keypoints_ids[kpid_query].add(p3did)
                    same_2d_multiple_3d_max = max(
                        same_2d_multiple_3d_max,
                        len(assigned_keypoints_ids[kpid_query]))

                if duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_same_p3did and \
                        p3did in assigned_3d_points_ids:
                    rejected_correspondences += 1
                    continue
                elif p3did not in assigned_3d_points_ids:
                    assigned_3d_points_ids[p3did] = {kpid_query}
                else:
                    # kpid_query not in assigned_3d_points_ids[p3did]
                    same_3d_multiple_2d_count += 1
                    assigned_3d_points_ids[p3did].add(p3did)
                    same_3d_multiple_2d_max = max(
                        same_3d_multiple_2d_max,
                        len(assigned_3d_points_ids[p3did]))

            if kpts_query is not None:
                kp_query = kpts_query[int(kpid_query)]
                points2D.append(kp_query[0:2])
            if kpts_query_undistorted is not None:
                kp_query_undistorted = kpts_query_undistorted[int(kpid_query)]
                points2D_undistorted.append(kp_query_undistorted[0:2])
            p3d_map = kapture_data.points3d[p3did]
            points3D.append(p3d_map[0:3])

    stats = {
        "true_duplicates_count": true_duplicates_count,
        "same_2d_multiple_3d_count": same_2d_multiple_3d_count,
        "same_2d_multiple_3d_max": same_2d_multiple_3d_max,
        "same_3d_multiple_2d_count": same_3d_multiple_2d_count,
        "same_3d_multiple_2d_max": same_3d_multiple_2d_max,
        "rejected_correspondences": rejected_correspondences
    }
    return points2D, points2D_undistorted, points3D, stats
예제 #14
0
def get_camera_matrix_from_kapture(pts_2D, sensor: kapture.Camera):
    """
    returned keypoints of shape [1, number of keypoints, dsize]
    :param pts_2D: [description]
    :type pts_2D: keypoints in shape [number of keypoints,dsize] or [1, number of keypoints, dsize]
    """
    if len(pts_2D.shape) == 2:
        pts_2D = np.array([pts_2D], dtype=np.float64)
    assert len(pts_2D.shape) == 3
    pts_2D = pts_2D[:, :, 0:2]

    model = sensor.camera_type
    model_params = sensor.camera_params

    if model == kapture.CameraType.SIMPLE_PINHOLE:
        # w, h, f, cx, cy
        return (pts_2D,
                get_camera_matrix(model_params[2], model_params[2],
                                  model_params[3],
                                  model_params[4]), _get_zero_distortion())
    elif model == kapture.CameraType.PINHOLE:
        # w, h, fx, fy, cx, cy
        return (pts_2D,
                get_camera_matrix(model_params[2], model_params[3],
                                  model_params[4],
                                  model_params[5]), _get_zero_distortion())
    elif model == kapture.CameraType.SIMPLE_RADIAL:
        # w, h, f, cx, cy, k
        return (pts_2D,
                get_camera_matrix(model_params[2], model_params[2],
                                  model_params[3], model_params[4]),
                _get_distortion_4(model_params[5], 0, 0, 0))
    elif model == kapture.CameraType.RADIAL:
        # w, h, f, cx, cy, k1, k2
        return (pts_2D,
                get_camera_matrix(model_params[2], model_params[2],
                                  model_params[3], model_params[4]),
                _get_distortion_4(model_params[5], model_params[6], 0, 0))
    elif model == kapture.CameraType.OPENCV:
        # w, h, fx, fy, cx, cy, k1, k2, p1, p2
        return (pts_2D,
                get_camera_matrix(model_params[2], model_params[3],
                                  model_params[4], model_params[5]),
                _get_distortion_4(model_params[6], model_params[7],
                                  model_params[8], model_params[9]))
    elif model == kapture.CameraType.FULL_OPENCV:
        # w, h, fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
        return (pts_2D,
                get_camera_matrix(model_params[2], model_params[3],
                                  model_params[4], model_params[5]),
                _get_distortion_8(model_params[6], model_params[7],
                                  model_params[8], model_params[9],
                                  model_params[10], model_params[11],
                                  model_params[12], model_params[13]))
    elif model == kapture.CameraType.OPENCV_FISHEYE:
        # w, h, fx, fy, cx, cy, k1, k2, k3, k4
        fisheye_camera_matrix = get_camera_matrix(model_params[2],
                                                  model_params[3],
                                                  model_params[4],
                                                  model_params[5])
        fisheye_distortion = _get_distortion_4(model_params[6],
                                               model_params[7],
                                               model_params[8],
                                               model_params[9])
        P = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            fisheye_camera_matrix, fisheye_distortion,
            (int(model_params[0]), int(model_params[1])), None)
        undist_pts_2D = cv2.fisheye.undistortPoints(pts_2D,
                                                    fisheye_camera_matrix,
                                                    fisheye_distortion,
                                                    P=P)
        return undist_pts_2D, P, _get_zero_distortion()
    elif model == kapture.CameraType.RADIAL_FISHEYE or model == kapture.CameraType.SIMPLE_RADIAL_FISHEYE:
        getLogger().warning(
            'OpenCV radial fisheye model not fully supported, distortion coefficients will be ignored'
        )
        # w, h, f, cx, cy, k or w, h, f, cx, cy, k1, k2
        fisheye_camera_matrix = get_camera_matrix(model_params[2],
                                                  model_params[2],
                                                  model_params[3],
                                                  model_params[4])
        fisheye_distortion = _get_distortion_4(0, 0, 0, 0)
        P = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
            fisheye_camera_matrix, fisheye_distortion,
            (int(model_params[0]), int(model_params[1])), None)
        undist_pts_2D = cv2.fisheye.undistortPoints(pts_2D,
                                                    fisheye_camera_matrix,
                                                    fisheye_distortion,
                                                    P=P)
        return undist_pts_2D, P, _get_zero_distortion()
    else:
        raise ValueError(f'Camera model {model.value} not supported')
예제 #15
0
def get_pairs_sequence(kdata: kapture.Kapture, window_size: int, loop: bool,
                       expand_window: bool, max_interval: int):

    getLogger().info('computing pairs from sequences...')
    assert kdata.records_camera is not None
    start_total = datetime.datetime.now()
    out_of_bounds_images = set()  # records before or after trajectory

    localized_seq = {}
    getLogger().debug("Compute sequences")
    cur_seq = {}
    prev_ts = {}

    start_total = datetime.datetime.now()
    for timestamp, records in sorted(kdata.records_camera.items()):
        for sensor_id, image_name in records.items():
            if sensor_id not in localized_seq:
                localized_seq[sensor_id] = []
            if sensor_id not in cur_seq:
                cur_seq[sensor_id] = []
            if sensor_id not in prev_ts:
                prev_ts[sensor_id] = 0

            if prev_ts[sensor_id] == 0 or timestamp - prev_ts[
                    sensor_id] > max_interval:
                # New sequence
                if len(cur_seq[sensor_id]) >= 2:
                    # image list
                    localized_seq[sensor_id].append(cur_seq[sensor_id])
                    getLogger().debug(
                        f'sequence {len(localized_seq[sensor_id])} '
                        f'for camera {sensor_id}, {len(cur_seq[sensor_id])} images'
                    )
                else:
                    for out_of_bound_img in cur_seq[sensor_id]:
                        out_of_bounds_images.add(out_of_bound_img)
                cur_seq[sensor_id] = []

            cur_seq[sensor_id].append(image_name)
            prev_ts[sensor_id] = timestamp

    for sensor_id, seq in cur_seq.items():
        if len(seq) >= 2:
            # ts list
            localized_seq[sensor_id].append(seq)
            getLogger().debug(f'sequence {len(localized_seq[sensor_id])} '
                              f'for camera {sensor_id}, {len(seq)} images')
        else:
            for out_of_bound_img in seq:
                out_of_bounds_images.add(out_of_bound_img)

    image_pairs = {}
    for sensor_id, sequences in localized_seq.items():
        for sequence in sequences:
            for i in range(len(sequence)):
                if sequence[i] not in image_pairs:
                    image_pairs[sequence[i]] = []

                if loop:
                    cache = set()
                    for j in range(window_size):
                        right_index = (i + j + 1) % len(sequence)
                        if sequence[right_index] != sequence[i] and sequence[
                                right_index] not in cache:
                            image_pairs[sequence[i]].append(
                                (sequence[right_index],
                                 1.0 - (j + 1) / window_size))
                            cache.add(sequence[right_index])
                        left_index = (i - j - 1) % len(sequence)
                        if sequence[left_index] != sequence[i] and sequence[
                                left_index] not in cache:
                            image_pairs[sequence[i]].append(
                                (sequence[left_index],
                                 1.0 - (j + 1) / window_size))
                            cache.add(sequence[left_index])
                else:
                    range_on_right = min(len(sequence) - 1 - i, window_size)
                    range_on_left = min(i, window_size)

                    if expand_window:
                        final_range_on_right = range_on_right + (window_size -
                                                                 range_on_left)
                        final_range_on_left = range_on_left + (window_size -
                                                               range_on_right)
                    else:
                        final_range_on_right = range_on_right
                        final_range_on_left = range_on_left
                    for j in range(final_range_on_right):
                        if i + j + 1 < len(sequence):
                            image_pairs[sequence[i]].append(
                                (sequence[i + j + 1],
                                 1.0 - (j + 1) / final_range_on_right))
                    for j in range(final_range_on_left):
                        if i - j - 1 >= 0:
                            image_pairs[sequence[i]].append(
                                (sequence[i - j - 1],
                                 1.0 - (j + 1) / final_range_on_left))

    images_pairs_list = []
    for query_image, pairs in sorted(image_pairs.items()):
        for map_image, score in sorted(pairs, key=lambda x: x[1],
                                       reverse=True):
            images_pairs_list.append([query_image, map_image, score])

    elapsed_total = datetime.datetime.now() - start_total
    getLogger().info(
        f'sequences processed in {elapsed_total.total_seconds()} seconds')
    getLogger().info(
        f'{len(out_of_bounds_images)} images were not in any sequence')
    getLogger().debug(f'list: {list(sorted(out_of_bounds_images))}')
    return images_pairs_list
예제 #16
0
 def error_callback(e):
     getLogger().critical(e)