コード例 #1
0
 def match_features(self, kapture_data):
     image_list = [
         filename
         for _, _, filename in kapture.flatten(kapture_data.records_camera)
     ]
     descriptors = []
     descriptor_type = kapture_data.descriptors.dtype
     descriptor_size = kapture_data.descriptors.dsize
     for image_path in image_list:
         descriptors_full_path = get_descriptors_fullpath(
             kapture_data.kapture_path, image_path)
         descriptors.append(
             image_descriptors_from_file(descriptors_full_path,
                                         descriptor_type, descriptor_size))
     kapture_data.matches = kapture.Matches()
     if self._sequential_length is None:
         self._sequential_length = len(image_list)
     for i in tqdm(range(len(image_list))):
         for j in range(i + 1,
                        min(len(image_list), i + self._sequential_length)):
             matches = self._matcher.match_descriptors(
                 descriptors[i], descriptors[j])
             if self._minimal_score is not None:
                 mask = matches[:, 2] > self._minimal_score
                 matches = matches[mask]
             kapture_data.matches.add(image_list[i], image_list[j])
             matches_full_path = get_matches_fullpath(
                 (image_list[i], image_list[j]), kapture_data.kapture_path)
             image_matches_to_file(matches_full_path, matches)
def add_image_to_kapture(kdata_src,
                         kdata_src_path,
                         kdata_trg,
                         img_name,
                         pairs,
                         add_pose=False):
    timestamp_sensor_id_from_image_name = {
        img_name: (timestamp, sensor_id)
        for timestamp, sensor_id, img_name in kapture.flatten(
            kdata_src.records_camera)
    }
    timestamp, sensor_id = timestamp_sensor_id_from_image_name[img_name]
    kdata_trg.sensors[sensor_id] = kdata_src.sensors[sensor_id]
    kdata_trg.records_camera[timestamp, sensor_id] = img_name
    kdata_trg.keypoints.add(img_name)
    if kdata_trg.descriptors != None:
        kdata_trg.descriptors.add(img_name)

    if add_pose:
        kdata_trg.trajectories[timestamp,
                               sensor_id] = kdata_src.trajectories[timestamp,
                                                                   sensor_id]

    if os.path.exists(kdata_src_path) and len(pairs) != 0:
        kdata_trg.matches = kapture.Matches()
        for i in pairs:
            image_matches_filepath = get_matches_fullpath((i[0], i[1]),
                                                          kdata_src_path)
            if os.path.exists(image_matches_filepath):
                kdata_trg.matches.add(i[0], i[1])
        kdata_trg.matches.normalize()

    return kdata_trg
コード例 #3
0
def compute_matches(input_path: str, pairsfile_path: str):
    """
    compute matches from descriptors. images to match are selected from a pairsfile (csv with name1, name2, score)

    :param input_path: input path to kapture input root directory
    :type input_path: str
    :param pairsfile_path: path to pairs file (csv with 3 fields, name1, name2, score)
    :type pairsfile_path: str
    """
    logger.info(f'compute_matches. loading input: {input_path}')
    kdata = kapture_from_dir(input_path)
    assert kdata.sensors is not None
    assert kdata.records_camera is not None
    assert kdata.descriptors is not None

    image_pairs = get_pairs_from_file(pairsfile_path)
    matcher = MatchPairNnTorch(use_cuda=torch.cuda.is_available())
    new_matches = kapture.Matches()

    logger.info('compute_matches. entering main loop...')
    hide_progress_bar = logger.getEffectiveLevel() > logging.INFO
    for image_path1, image_path2 in tqdm(image_pairs,
                                         disable=hide_progress_bar):
        if image_path1 == image_path2:
            continue
        if image_path1 > image_path2:
            image_path1, image_path2 = image_path2, image_path1

        if image_path1 not in kdata.descriptors or image_path2 not in kdata.descriptors:
            logger.warning('unable to find descriptors for image pair : '
                           '\n\t{} \n\t{}'.format(image_path1, image_path2))
            continue

        descriptor1 = load_descriptors(input_path, image_path1,
                                       kdata.descriptors.dtype,
                                       kdata.descriptors.dsize)
        descriptor2 = load_descriptors(input_path, image_path2,
                                       kdata.descriptors.dtype,
                                       kdata.descriptors.dsize)
        matches = matcher.match_descriptors(descriptor1, descriptor2)
        matches_path = get_matches_fullpath((image_path1, image_path2),
                                            input_path)
        image_matches_to_file(matches_path, matches)
        new_matches.add(image_path1, image_path2)

    if not matches_check_dir(new_matches, input_path):
        logger.critical(
            'matching ended successfully but not all files were saved')
    logger.info('all done')
コード例 #4
0
def compute_matches_from_loaded_data(input_path: str,
                                     kdata: kapture.Kapture,
                                     image_pairs: list,
                                     overwrite_existing: bool = False):
    assert kdata.sensors is not None
    assert kdata.records_camera is not None
    assert kdata.descriptors is not None

    matcher = MatchPairNnTorch(use_cuda=torch.cuda.is_available())
    new_matches = kapture.Matches()

    logger.info('compute_matches. entering main loop...')
    hide_progress_bar = logger.getEffectiveLevel() > logging.INFO
    skip_count = 0
    for image_path1, image_path2 in tqdm(image_pairs, disable=hide_progress_bar):
        if image_path1 == image_path2:
            continue
        if image_path1 > image_path2:
            image_path1, image_path2 = image_path2, image_path1

        # skip existing matches
        if (not overwrite_existing) and (kdata.matches is not None) and ((image_path1, image_path2) in kdata.matches):
            new_matches.add(image_path1, image_path2)
            skip_count += 1
            continue

        if image_path1 not in kdata.descriptors or image_path2 not in kdata.descriptors:
            logger.warning('unable to find descriptors for image pair : '
                           '\n\t{} \n\t{}'.format(image_path1, image_path2))
            continue

        descriptor1 = load_descriptors(input_path, image_path1, kdata.descriptors.dtype, kdata.descriptors.dsize)
        descriptor2 = load_descriptors(input_path, image_path2, kdata.descriptors.dtype, kdata.descriptors.dsize)
        matches = matcher.match_descriptors(descriptor1, descriptor2)
        matches_path = get_matches_fullpath((image_path1, image_path2), input_path)
        image_matches_to_file(matches_path, matches)
        new_matches.add(image_path1, image_path2)

    if not overwrite_existing:
        logger.debug(f'{skip_count} pairs were skipped because the match file already existed')
    if not matches_check_dir(new_matches, input_path):
        logger.critical('matching ended successfully but not all files were saved')
    logger.info('all done')
def sub_kapture_from_img_list(kdata, kdata_path, img_list, pairs):
    trajectories = kapture.Trajectories()
    sensors = kapture.Sensors()
    records = kapture.RecordsCamera()
    keypoints = kapture.Keypoints(kdata.keypoints._tname,
                                  kdata.keypoints._dtype,
                                  kdata.keypoints._dsize)
    if kdata.descriptors != None:
        descriptors = kapture.Descriptors(kdata.descriptors._tname,
                                          kdata.descriptors._dtype,
                                          kdata.descriptors._dsize)
    else:
        descriptors = None
    matches = kapture.Matches()

    timestamp_sensor_id_from_image_name = {
        img_name: (timestamp, sensor_id)
        for timestamp, sensor_id, img_name in kapture.flatten(
            kdata.records_camera)
    }
    for img in img_list:
        timestamp, sensor_id = timestamp_sensor_id_from_image_name[img]
        pose = kdata.trajectories[timestamp][sensor_id]
        sensors[sensor_id] = kdata.sensors[sensor_id]
        records[timestamp, sensor_id] = img
        trajectories[timestamp, sensor_id] = pose
        keypoints.add(img)
        if kdata.descriptors != None:
            descriptors.add(img)

    for i in pairs:
        image_matches_filepath = get_matches_fullpath((i[0], i[1]), kdata_path)
        if os.path.exists(image_matches_filepath):
            matches.add(i[0], i[1])
    matches.normalize()

    return kapture.Kapture(sensors=sensors,
                           trajectories=trajectories,
                           records_camera=records,
                           descriptors=descriptors,
                           keypoints=keypoints,
                           matches=matches)
コード例 #6
0
def get_correspondences(kapture_data: kapture.Kapture, keypoints_type: str,
                        kapture_path: str, tar_handlers: TarCollection,
                        img_query: str, pairs: List[str],
                        point_id_from_obs: Dict[Tuple[str, int], int],
                        kpts_query: Optional[np.ndarray],
                        kpts_query_undistorted: Optional[np.ndarray],
                        duplicate_strategy: DuplicateCorrespondencesStrategy,
                        rerank_strategy: RerankCorrespondencesStrategy):
    """
    get 2D-3D correspondences for a given query image, a list of paired map images, and a kapture map
    """
    # first list all correspondences
    correspondences = {}
    for img_map in pairs:
        # get matches
        if img_query < img_map:
            if (img_query,
                    img_map) not in kapture_data.matches[keypoints_type]:
                getLogger().warning(
                    f'pair {img_query}, {img_map} do not have a match file, skipped'
                )
                continue
            matches_path = get_matches_fullpath((img_query, img_map),
                                                keypoints_type, kapture_path,
                                                tar_handlers)
        else:
            if (img_map,
                    img_query) not in kapture_data.matches[keypoints_type]:
                getLogger().warning(
                    f'pair {img_query}, {img_map} do not have a match file, skipped'
                )
            matches_path = get_matches_fullpath((img_map, img_query),
                                                keypoints_type, kapture_path,
                                                tar_handlers)
        matches = image_matches_from_file(matches_path)

        num_matches = matches.shape[0]
        corrs = []
        for m in matches:
            if img_query < img_map:
                kpid_query = m[0]
                kpid_map = m[1]
            else:
                kpid_query = m[1]
                kpid_map = m[0]
            # match_score = m[2]

            if not (img_map, kpid_map) in point_id_from_obs:
                continue
            # get 3D point
            p3did = point_id_from_obs[(img_map, kpid_map)]
            corrs.append((kpid_query, p3did))
        correspondences[img_map] = (num_matches, corrs)

    if rerank_strategy == RerankCorrespondencesStrategy.none:
        reranked_pairs = pairs
    elif rerank_strategy == RerankCorrespondencesStrategy.matches_count:
        reranked_pairs = [
            img_map for img_map, _ in sorted(
                correspondences.items(), key=lambda x: x[1][0], reverse=True)
        ]
    elif rerank_strategy == RerankCorrespondencesStrategy.correspondences_count:
        reranked_pairs = [
            img_map for img_map, _ in sorted(correspondences.items(),
                                             key=lambda x: len(x[1][1]),
                                             reverse=True)
        ]
    else:
        raise NotImplementedError(f'{rerank_strategy} not implemented')

    # N number of correspondences
    # points2D - Nx2 array with pixel coordinates
    # points3D - Nx3 array with world coordinates
    points2D = []
    points2D_undistorted = []
    points3D = []

    assigned_keypoints_ids = {}
    assigned_3d_points_ids = {}
    true_duplicates_count = 0
    same_2d_multiple_3d_count = 0
    same_2d_multiple_3d_max = 0
    same_3d_multiple_2d_count = 0
    same_3d_multiple_2d_max = 0
    rejected_correspondences = 0
    for img_map in reranked_pairs:
        if img_map not in correspondences:
            continue
        for kpid_query, p3did in correspondences[img_map][1]:
            if kpid_query in assigned_keypoints_ids and p3did in assigned_keypoints_ids[
                    kpid_query]:
                true_duplicates_count += 1
                if duplicate_strategy == DuplicateCorrespondencesStrategy.ignore or \
                        duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_strict or \
                        duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_same_kpid or \
                        duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_same_p3did:
                    rejected_correspondences += 1
                    continue
            elif duplicate_strategy == DuplicateCorrespondencesStrategy.ignore and \
                    (kpid_query in assigned_keypoints_ids or p3did in assigned_3d_points_ids):
                rejected_correspondences += 1
                continue
            else:
                if duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_same_kpid and \
                        kpid_query in assigned_keypoints_ids:
                    rejected_correspondences += 1
                    continue
                elif kpid_query not in assigned_keypoints_ids:
                    assigned_keypoints_ids[kpid_query] = {p3did}
                else:
                    # p3did not in assigned_keypoints_ids[kpid_query]
                    same_2d_multiple_3d_count += 1
                    assigned_keypoints_ids[kpid_query].add(p3did)
                    same_2d_multiple_3d_max = max(
                        same_2d_multiple_3d_max,
                        len(assigned_keypoints_ids[kpid_query]))

                if duplicate_strategy == DuplicateCorrespondencesStrategy.ignore_same_p3did and \
                        p3did in assigned_3d_points_ids:
                    rejected_correspondences += 1
                    continue
                elif p3did not in assigned_3d_points_ids:
                    assigned_3d_points_ids[p3did] = {kpid_query}
                else:
                    # kpid_query not in assigned_3d_points_ids[p3did]
                    same_3d_multiple_2d_count += 1
                    assigned_3d_points_ids[p3did].add(p3did)
                    same_3d_multiple_2d_max = max(
                        same_3d_multiple_2d_max,
                        len(assigned_3d_points_ids[p3did]))

            if kpts_query is not None:
                kp_query = kpts_query[int(kpid_query)]
                points2D.append(kp_query[0:2])
            if kpts_query_undistorted is not None:
                kp_query_undistorted = kpts_query_undistorted[int(kpid_query)]
                points2D_undistorted.append(kp_query_undistorted[0:2])
            p3d_map = kapture_data.points3d[p3did]
            points3D.append(p3d_map[0:3])

    stats = {
        "true_duplicates_count": true_duplicates_count,
        "same_2d_multiple_3d_count": same_2d_multiple_3d_count,
        "same_2d_multiple_3d_max": same_2d_multiple_3d_max,
        "same_3d_multiple_2d_count": same_3d_multiple_2d_count,
        "same_3d_multiple_2d_max": same_3d_multiple_2d_max,
        "rejected_correspondences": rejected_correspondences
    }
    return points2D, points2D_undistorted, points3D, stats
コード例 #7
0
def export_opensfm(
        kapture_rootdir: str,
        opensfm_rootdir: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.copy) -> None:
    """

    :param kapture_rootdir:
    :param opensfm_rootdir:
    :param force_overwrite_existing:
    :param images_import_method:
    :return:
    """

    disable_tqdm = logger.getEffectiveLevel(
    ) > logging.INFO  # dont display tqdm for non-verbose levels
    # load reconstruction
    kapture_data = kapture.io.csv.kapture_from_dir(
        kapture_dirpath=kapture_rootdir)

    # export cameras
    opensfm_cameras = {}
    kapture_cameras = {
        cam_id: cam
        for cam_id, cam in kapture_data.sensors.items()
        if cam.sensor_type == 'camera'
    }
    for cam_id, kapture_camera in kapture_cameras.items():
        opensfm_cameras[cam_id] = export_opensfm_camera(kapture_camera)

    # export shots
    opensfm_shots = {}
    for timestamp, camera_id, image_filename in tqdm(kapture.flatten(
            kapture_data.records_camera),
                                                     disable=disable_tqdm):
        # retrieve pose (if there is one).
        # opensfm_shots = {image_filename: shot}
        # shot = {camera , rotation, translation, capture_time, gps_position, ...}
        opensfm_shot = {
            'capture_time': 0,  # in ms != timestamp
            'camera': camera_id,
        }
        if (timestamp, camera_id) in kapture_data.trajectories:
            pose = kapture_data.trajectories[timestamp, camera_id]
            rotation_vector = quaternion.as_rotation_vector(pose.r)
            translation_vector = pose.t.flatten()
            opensfm_shot.update({
                'rotation': rotation_vector.tolist(),
                'translation': translation_vector.tolist()
            })
        opensfm_shots[image_filename] = opensfm_shot

    # pack it
    opensfm_reconstruction = {
        'cameras': opensfm_cameras,
        'shots': opensfm_shots,
    }

    # images
    logger.info(
        f'writing image files "{path.join(opensfm_rootdir, "images")}".')
    image_filenames = [
        f for _, _, f in kapture.flatten(kapture_data.records_camera)
    ]
    kapture_image_filepaths = [
        get_record_fullpath(kapture_rootdir, image_filename)
        for image_filename in image_filenames
    ]
    opensfm_image_filepaths = [
        path.join(opensfm_rootdir, 'images', image_filename)
        for image_filename in image_filenames
    ]
    transfer_files_from_dir(
        source_filepath_list=kapture_image_filepaths,
        destination_filepath_list=opensfm_image_filepaths,
        force_overwrite=force_overwrite_existing,
        copy_strategy=images_import_method,
    )

    # export features files (keypoints + descriptors)
    opensfm_features_suffix = '.features.npz'
    opensfm_features_dirpath = path.join(opensfm_rootdir, 'features')
    logger.info(
        f'exporting keypoint and descriptors to {opensfm_features_dirpath}')
    os.makedirs(opensfm_features_dirpath, exist_ok=True)
    for image_filename in tqdm(image_filenames, disable=disable_tqdm):
        opensfm_features = {}
        # look and load for keypoints in kapture
        if kapture_data.keypoints is not None and image_filename in kapture_data.keypoints:
            kapture_keypoints_filepath = get_keypoints_fullpath(
                kapture_dirpath=kapture_rootdir, image_filename=image_filename)
            logger.debug(f'loading {kapture_keypoints_filepath}')
            kapture_keypoint = image_keypoints_from_file(
                kapture_keypoints_filepath,
                dtype=kapture_data.keypoints.dtype,
                dsize=kapture_data.keypoints.dsize)
            opensfm_features['points'] = kapture_keypoint

        # look and load for descriptors in kapture
        if kapture_data.descriptors is not None and image_filename in kapture_data.descriptors:
            kapture_descriptor_filepath = get_descriptors_fullpath(
                kapture_dirpath=kapture_rootdir, image_filename=image_filename)
            logger.debug(f'loading {kapture_descriptor_filepath}')
            kapture_descriptor = image_descriptors_from_file(
                kapture_descriptor_filepath,
                dtype=kapture_data.descriptors.dtype,
                dsize=kapture_data.descriptors.dsize)
            opensfm_features['descriptors'] = kapture_descriptor

        # writing opensfm feature file
        if len(opensfm_features) > 0:
            opensfm_features_filepath = path.join(
                opensfm_features_dirpath,
                image_filename + opensfm_features_suffix)
            logger.debug(f'writing {opensfm_features_filepath}')
            os.makedirs(path.dirname(opensfm_features_filepath), exist_ok=True)
            np.save(opensfm_features_filepath, opensfm_features)

    # export matches files
    if kapture_data.matches is not None:
        opensfm_matches_suffix = '_matches.pkl.gz'
        opensfm_matches_dirpath = path.join(opensfm_rootdir, 'matches')
        os.makedirs(opensfm_matches_dirpath, exist_ok=True)
        logger.info(f'exporting matches to {opensfm_matches_dirpath}')
        opensfm_pairs = {}
        for image_filename1, image_filename2 in kapture_data.matches:
            opensfm_pairs.setdefault(image_filename1,
                                     []).append(image_filename2)

        for image_filename1 in tqdm(image_filenames, disable=disable_tqdm):
            opensfm_matches = {}
            opensfm_matches_filepath = path.join(
                opensfm_matches_dirpath,
                image_filename1 + opensfm_matches_suffix)
            logger.debug(f'loading matches for {image_filename1}')
            for image_filename2 in opensfm_pairs.get(image_filename1, []):
                # print(image_filename1, image_filename2)
                kapture_matches_filepath = get_matches_fullpath(
                    (image_filename1, image_filename2),
                    kapture_dirpath=kapture_rootdir)
                kapture_matches = image_matches_from_file(
                    kapture_matches_filepath)
                opensfm_matches[image_filename2] = kapture_matches[:,
                                                                   0:2].astype(
                                                                       np.int)

            os.makedirs(path.dirname(opensfm_matches_filepath), exist_ok=True)
            with gzip.open(opensfm_matches_filepath, 'wb') as f:
                pickle.dump(opensfm_matches, f)

    # export 3D-points files
    if kapture_data.points3d is not None:
        logger.info('exporting points 3-D')
        opensfm_reconstruction['points'] = {}
        for i, (x, y, z, r, g, b) in tqdm(enumerate(kapture_data.points3d),
                                          disable=disable_tqdm):
            opensfm_reconstruction['points'][i] = {
                'coordinates': [x, y, z],
                'color': [r, g, b]
            }

    # write json files #################################################################################################
    os.makedirs(opensfm_rootdir, exist_ok=True)
    # write reconstruction.json
    opensfm_reconstruction_filepath = path.join(opensfm_rootdir,
                                                'reconstruction.json')
    logger.info(
        f'writing reconstruction file "{opensfm_reconstruction_filepath}".')
    with open(opensfm_reconstruction_filepath, 'wt') as f:
        json.dump([opensfm_reconstruction], f, indent=4)

    # write camera_models.json
    opensfm_cameras_filepath = path.join(opensfm_rootdir, 'camera_models.json')
    logger.info(f'writing camera models file "{opensfm_cameras_filepath}".')
    with open(opensfm_cameras_filepath, 'wt') as f:
        json.dump(opensfm_cameras, f, indent=4)
コード例 #8
0
def _export_opensfm_features_and_matches(image_filenames, kapture_data,
                                         kapture_root_dir, opensfm_root_dir,
                                         disable_tqdm):
    """
    export features files (keypoints + descriptors) and matches
    """
    opensfm_features_suffix = '.features.npz'
    opensfm_features_dir_path = path.join(opensfm_root_dir, 'features')
    logger.info(
        f'exporting keypoint and descriptors to {opensfm_features_dir_path}')
    os.makedirs(opensfm_features_dir_path, exist_ok=True)
    for image_filename in tqdm(image_filenames, disable=disable_tqdm):
        opensfm_features = {}
        # look and load for keypoints in kapture
        if kapture_data.keypoints is not None and image_filename in kapture_data.keypoints:
            kapture_keypoints_filepath = get_keypoints_fullpath(
                kapture_dirpath=kapture_root_dir,
                image_filename=image_filename)
            logger.debug(f'loading {kapture_keypoints_filepath}')
            kapture_keypoint = image_keypoints_from_file(
                kapture_keypoints_filepath,
                dtype=kapture_data.keypoints.dtype,
                dsize=kapture_data.keypoints.dsize)
            opensfm_features['points'] = kapture_keypoint

        # look and load for descriptors in kapture
        if kapture_data.descriptors is not None and image_filename in kapture_data.descriptors:
            kapture_descriptor_filepath = get_descriptors_fullpath(
                kapture_dirpath=kapture_root_dir,
                image_filename=image_filename)
            logger.debug(f'loading {kapture_descriptor_filepath}')
            kapture_descriptor = image_descriptors_from_file(
                kapture_descriptor_filepath,
                dtype=kapture_data.descriptors.dtype,
                dsize=kapture_data.descriptors.dsize)
            opensfm_features['descriptors'] = kapture_descriptor

        # writing opensfm feature file
        if len(opensfm_features) > 0:
            opensfm_features_filepath = path.join(
                opensfm_features_dir_path,
                image_filename + opensfm_features_suffix)
            logger.debug(f'writing {opensfm_features_filepath}')
            os.makedirs(path.dirname(opensfm_features_filepath), exist_ok=True)
            np.save(opensfm_features_filepath, opensfm_features)

    # export matches files
    if kapture_data.matches is not None:
        opensfm_matches_suffix = '_matches.pkl.gz'
        opensfm_matches_dir_path = path.join(opensfm_root_dir, 'matches')
        os.makedirs(opensfm_matches_dir_path, exist_ok=True)
        logger.info(f'exporting matches to {opensfm_matches_dir_path}')
        opensfm_pairs = {}
        for image_filename1, image_filename2 in kapture_data.matches:
            opensfm_pairs.setdefault(image_filename1,
                                     []).append(image_filename2)

        for image_filename1 in tqdm(image_filenames, disable=disable_tqdm):
            opensfm_matches = {}
            opensfm_matches_filepath = path.join(
                opensfm_matches_dir_path,
                image_filename1 + opensfm_matches_suffix)
            logger.debug(f'loading matches for {image_filename1}')
            for image_filename2 in opensfm_pairs.get(image_filename1, []):
                # print(image_filename1, image_filename2)
                kapture_matches_filepath = get_matches_fullpath(
                    (image_filename1, image_filename2),
                    kapture_dirpath=kapture_root_dir)
                kapture_matches = image_matches_from_file(
                    kapture_matches_filepath)
                opensfm_matches[image_filename2] = kapture_matches[:,
                                                                   0:2].astype(
                                                                       np.int)

            os.makedirs(path.dirname(opensfm_matches_filepath), exist_ok=True)
            with gzip.open(opensfm_matches_filepath, 'wb') as f:
                pickle.dump(opensfm_matches, f)
コード例 #9
0
def compute_matches_from_loaded_data(input_path: str,
                                     tar_handlers: Optional[TarCollection],
                                     kdata: kapture.Kapture,
                                     descriptors_type: Optional[str],
                                     image_pairs: list,
                                     overwrite_existing: bool = False):
    assert kdata.sensors is not None
    assert kdata.records_camera is not None
    assert kdata.descriptors is not None
    os.umask(0o002)

    if descriptors_type is None:
        descriptors_type = try_get_only_key_from_collection(kdata.descriptors)
    assert descriptors_type is not None
    assert descriptors_type in kdata.descriptors
    keypoints_type = kdata.descriptors[descriptors_type].keypoints_type
    # assert kdata.descriptors[descriptors_type].metric_type == "L2"

    matcher = MatchPairNnTorch(use_cuda=torch.cuda.is_available())
    new_matches = kapture.Matches()

    logger.info('compute_matches. entering main loop...')
    hide_progress_bar = logger.getEffectiveLevel() > logging.INFO
    skip_count = 0
    for image_path1, image_path2 in tqdm(image_pairs,
                                         disable=hide_progress_bar):
        if image_path1 == image_path2:
            continue
        if image_path1 > image_path2:
            image_path1, image_path2 = image_path2, image_path1

        # skip existing matches
        if (not overwrite_existing) \
                and (kdata.matches is not None) \
                and keypoints_type in kdata.matches \
                and ((image_path1, image_path2) in kdata.matches[keypoints_type]):
            new_matches.add(image_path1, image_path2)
            skip_count += 1
            continue

        if image_path1 not in kdata.descriptors[descriptors_type] \
                or image_path2 not in kdata.descriptors[descriptors_type]:
            logger.warning('unable to find descriptors for image pair : '
                           '\n\t{} \n\t{}'.format(image_path1, image_path2))
            continue

        descriptor1 = load_descriptors(
            descriptors_type, input_path, tar_handlers, image_path1,
            kdata.descriptors[descriptors_type].dtype,
            kdata.descriptors[descriptors_type].dsize)
        descriptor2 = load_descriptors(
            descriptors_type, input_path, tar_handlers, image_path2,
            kdata.descriptors[descriptors_type].dtype,
            kdata.descriptors[descriptors_type].dsize)
        matches = matcher.match_descriptors(descriptor1, descriptor2)
        matches_path = get_matches_fullpath((image_path1, image_path2),
                                            keypoints_type, input_path,
                                            tar_handlers)
        image_matches_to_file(matches_path, matches)
        new_matches.add(image_path1, image_path2)

    if not overwrite_existing:
        logger.debug(
            f'{skip_count} pairs were skipped because the match file already existed'
        )
    if not matches_check_dir(new_matches, keypoints_type, input_path,
                             tar_handlers):
        logger.critical(
            'matching ended successfully but not all files were saved')

    # update kapture matches
    if kdata.matches is None:
        kdata.matches = {}
    if keypoints_type not in kdata.matches:
        kdata.matches[keypoints_type] = kapture.Matches()
    kdata.matches[keypoints_type].update(new_matches)

    logger.info('all done')