def get_descriptors_from_database(
        database: COLMAPDatabase,
        images: kapture.RecordsCamera,
        kapture_dirpath: str,
        descriptor_name: str = 'SIFT') -> Optional[kapture.Descriptors]:
    """
    Writes descriptors files and return the list in kapture format from the colmap database.

    :param database: colmap database.
    :param images: list of images (as RecordsCamera).
    :param kapture_dirpath: input root path to kapture.
    :param descriptor_name: name of the keypoints descriptor (by default, in colmap, its SIFT, but can be imported)
    :return: kapture descriptors
    """
    image_filenames = set()
    dtype = np.uint8  # values in the range 0…255
    # see https://colmap.github.io/tutorial.html#feature-detection-and-extraction
    dsize = None  # usually uint8, 128, will be retrieved on first descriptor of DB
    colmap_descriptors = ((image_id, blob_to_array(
        data, dtype, (rows, cols)) if (rows > 0 and cols > 0) else np.zeros(
            (0, dsize))) for image_id, rows, cols, data in database.execute(
                "SELECT image_id, rows, cols, data FROM descriptors"))
    hide_progressbar = logger.getEffectiveLevel() > logging.INFO
    for image_id, image_descriptors in tqdm(colmap_descriptors,
                                            disable=hide_progressbar):
        # retrieve image path from image_id (actually the timestamp)
        if dsize is None:
            dsize = int(image_descriptors.shape[1])
        elif dsize != image_descriptors.shape[
                1] or dtype != image_descriptors.dtype:
            raise ValueError('inconsistent descriptors size or type.')

        image_filename = next((v for v in images[image_id].values()), None)
        assert image_filename
        descriptors_filepath = kapture.io.features.get_descriptors_fullpath(
            kapture_dirpath, image_filename)
        if image_descriptors.shape[0] == 0:
            logger.warning(
                f'image={image_id}:{image_filename} has 0 descriptors.')
        # save the actual file
        kapture.io.features.image_keypoints_to_file(descriptors_filepath,
                                                    image_descriptors)
        # register it into to kapture
        image_filenames.add(image_filename)

    if image_filenames:
        return kapture.Descriptors(descriptor_name, dtype, dsize,
                                   image_filenames)
    else:
        return None
def sub_kapture_from_img_list(kdata, kdata_path, img_list, pairs):
    trajectories = kapture.Trajectories()
    sensors = kapture.Sensors()
    records = kapture.RecordsCamera()
    keypoints = kapture.Keypoints(kdata.keypoints._tname,
                                  kdata.keypoints._dtype,
                                  kdata.keypoints._dsize)
    if kdata.descriptors != None:
        descriptors = kapture.Descriptors(kdata.descriptors._tname,
                                          kdata.descriptors._dtype,
                                          kdata.descriptors._dsize)
    else:
        descriptors = None
    matches = kapture.Matches()

    timestamp_sensor_id_from_image_name = {
        img_name: (timestamp, sensor_id)
        for timestamp, sensor_id, img_name in kapture.flatten(
            kdata.records_camera)
    }
    for img in img_list:
        timestamp, sensor_id = timestamp_sensor_id_from_image_name[img]
        pose = kdata.trajectories[timestamp][sensor_id]
        sensors[sensor_id] = kdata.sensors[sensor_id]
        records[timestamp, sensor_id] = img
        trajectories[timestamp, sensor_id] = pose
        keypoints.add(img)
        if kdata.descriptors != None:
            descriptors.add(img)

    for i in pairs:
        image_matches_filepath = get_matches_fullpath((i[0], i[1]), kdata_path)
        if os.path.exists(image_matches_filepath):
            matches.add(i[0], i[1])
    matches.normalize()

    return kapture.Kapture(sensors=sensors,
                           trajectories=trajectories,
                           records_camera=records,
                           descriptors=descriptors,
                           keypoints=keypoints,
                           matches=matches)
def sub_kapture_from_img_list(kdata, img_list, pairs, keypoints_type, descriptors_type):
    trajectories = kapture.Trajectories()
    sensors = kapture.Sensors()
    records = kapture.RecordsCamera()
    keypoints = kapture.Keypoints(kdata.keypoints[keypoints_type].type_name,
                                  kdata.keypoints[keypoints_type].dtype,
                                  kdata.keypoints[keypoints_type].dsize)
    if kdata.descriptors is not None and descriptors_type in kdata.descriptors:
        descriptors = kapture.Descriptors(kdata.descriptors[descriptors_type].type_name,
                                          kdata.descriptors[descriptors_type].dtype,
                                          kdata.descriptors[descriptors_type].dsize,
                                          kdata.descriptors[descriptors_type].keypoints_type,
                                          kdata.descriptors[descriptors_type].metric_type)
    else:
        descriptors = None
    matches = kapture.Matches()

    timestamp_sensor_id_from_image_name = {img_name: (timestamp, sensor_id) for timestamp, sensor_id, img_name in
                                           kapture.flatten(kdata.records_camera)}
    for img in img_list:
        timestamp, sensor_id = timestamp_sensor_id_from_image_name[img]
        sensors[sensor_id] = kdata.sensors[sensor_id]
        records[timestamp, sensor_id] = img
        if (timestamp, sensor_id) in kdata.trajectories:
            pose = kdata.trajectories[timestamp][sensor_id]
            trajectories[timestamp, sensor_id] = pose
        keypoints.add(img)
        if kdata.descriptors is not None:
            descriptors.add(img)

    for i in pairs:
        if i in kdata.matches[keypoints_type]:
            matches.add(i[0], i[1])
    matches.normalize()

    return kapture.Kapture(sensors=sensors, trajectories=trajectories, records_camera=records,
                           descriptors={descriptors_type: descriptors},
                           keypoints={keypoints_type: keypoints},
                           matches={keypoints_type: matches})
Esempio n. 4
0
def import_openmvg_regions(openmvg_regions_directory_path, kapture_data,
                           kapture_path):
    # look for the "image_describer.json"
    image_describer_path = path.join(openmvg_regions_directory_path,
                                     'image_describer.json')
    if not path.isfile(image_describer_path):
        logger.debug(f'file not found : {image_describer_path}')
        return

    with open(image_describer_path) as f:
        image_describer = json.load(f)

    # retrieve what type of keypoints it is.
    keypoints_type = image_describer.get('regions_type',
                                         {}).get('polymorphic_name',
                                                 'UNDEFINED')
    keypoints_name = {
        'SIFT_Regions': 'SIFT',
        'AKAZE_Float_Regions': 'AKAZE'
    }.get(keypoints_type, keypoints_type)
    kapture_keypoints = kapture.Keypoints(type_name=keypoints_name,
                                          dtype=float,
                                          dsize=4)

    # retrieve what type of descriptors it is.
    descriptors_type = image_describer.get('image_describer',
                                           {}).get('polymorphic_name',
                                                   'UNDEFINED')
    descriptors_props = {
        'SIFT_Image_describer':
        dict(type_name='SIFT', dtype=np.int32, dsize=128),
        'AKAZE_Image_describer_SURF':
        dict(type_name='AKAZE', dtype=np.int32, dsize=128),
    }.get(descriptors_type)
    if not descriptors_props:
        raise ValueError(
            f'conversion of {descriptors_type} descriptors not implemented.')
    kapture_descriptors = kapture.Descriptors(**descriptors_props)

    # populate regions files in openMVG directory
    # https://github.com/openMVG/openMVG/blob/master/src/openMVG/features/scalar_regions.hpp#L23
    for _, _, image_name in kapture.flatten(kapture_data.records_camera):
        openmvg_image_name = path.splitext(path.basename(image_name))[0]
        # keypoints
        openmvg_keypoints_filepath = path.join(openmvg_regions_directory_path,
                                               openmvg_image_name + '.feat')
        if path.isfile(openmvg_keypoints_filepath):
            # there is a keypoints file in openMVG, lets add it to kapture
            keypoints_data = np.loadtxt(openmvg_keypoints_filepath)
            assert keypoints_data.shape[1] == 4
            kapture_keypoints.add(image_name)
            # and convert file
            kapture_keypoints_filepath = kapture.io.features.get_keypoints_fullpath(
                kapture_path, image_name)
            array_to_file(kapture_keypoints_filepath, keypoints_data)

        # descriptors
        openmvg_descriptors_filepath = path.join(
            openmvg_regions_directory_path, openmvg_image_name + '.desc')
        if path.isfile(openmvg_descriptors_filepath):
            assert path.isfile(openmvg_keypoints_filepath)
            # there is a keypoints file in openMVG, lets add it to kapture
            # assumes descriptors shape from keypoints_data shape
            descriptors_data_bytes = np.fromfile(openmvg_descriptors_filepath,
                                                 dtype=np.uint8)
            nb_features = keypoints_data.shape[0]
            descriptors_shape = descriptors_data_bytes[0:8].view(
                descriptors_props['dtype'])
            assert descriptors_shape[0] == nb_features
            descriptors_data = descriptors_data_bytes[8:].view(
                np.uint8).reshape((nb_features, 128))
            # descriptors_data.reshape((keypoints_data.shape[0], -1))
            kapture_descriptors.add(image_name)
            # and convert file
            kapture_descriptors_filepath = kapture.io.features.get_descriptors_fullpath(
                kapture_path, image_name)
            array_to_file(kapture_descriptors_filepath, descriptors_data)

    kapture_data.keypoints = kapture_keypoints
    kapture_data.descriptors = kapture_descriptors
Esempio n. 5
0
 def test_init_descriptors_unknown(self):
     descriptors = kapture.Descriptors(
         'R2D2', float, 64, ['a/a.jpg', 'b/b.jpg', 'c/c.jpg', 'c/c.jpg'])
     self.assertEqual('R2D2', descriptors.type_name)
     self.assertEqual(3, len(descriptors))
     self.assertIn('a/a.jpg', descriptors)
Esempio n. 6
0
def extract_kapture_keypoints(kapture_root,
                              config,
                              output_dir='',
                              overwrite=False):
    """
    Extract r2d2 keypoints and descritors to the kapture format directly
    """
    print('extract_kapture_keypoints...')
    kdata = kapture_from_dir(kapture_root, matches_pairsfile_path=None,
    skip_list= [kapture.GlobalFeatures,
                kapture.Matches,
                kapture.Points3d,
                kapture.Observations])
    export_dir = output_dir if output_dir else kapture_root  # root of output directory for features
    os.makedirs(export_dir, exist_ok=True)

    assert kdata.records_camera is not None
    image_list = [filename for _, _, filename in kapture.flatten(kdata.records_camera)]
    # resume extraction if some features exist
    try:
        # load existing features, if any
        kdata.keypoints = keypoints_from_dir(export_dir, None)
        kdata.descriptors = descriptors_from_dir(export_dir, None)
        if kdata.keypoints is not None and kdata.descriptors is not None and not overwrite:
            image_list = [name for name in image_list if name not in kdata.keypoints or name not in kdata.descriptors]
    except FileNotFoundError:
        pass
    except:
        logging.exception("Error with importing existing local features.")

    # clear features first if overwriting
    if overwrite: delete_existing_kapture_files(export_dir, True, only=[kapture.Descriptors, kapture.Keypoints])

    if len(image_list) == 0:
        print('All features were already extracted')
        return
    else:
        print(f'Extracting r2d2 features for {len(image_list)} images')

    iscuda = common.torch_set_gpu([torch.cuda.is_available()])

    # load the network...
    net = load_network(config['checkpoint'])
    if iscuda: net = net.cuda()

    # create the non-maxima detector
    detector = NonMaxSuppression(
        rel_thr = config['reliability_thr'],
        rep_thr = config['repeatability_thr'])

    keypoints_dtype = None if kdata.keypoints is None else kdata.keypoints.dtype
    descriptors_dtype = None if kdata.descriptors is None else kdata.descriptors.dtype

    keypoints_dsize = None if kdata.keypoints is None else kdata.keypoints.dsize
    descriptors_dsize = None if kdata.descriptors is None else kdata.descriptors.dsize

    for image_name in image_list:
        img_path = get_image_fullpath(kapture_root, image_name)

        if img_path.endswith('.txt'):
            images = open(img_path).read().splitlines() + images
            continue

        print(f"\nExtracting features for {img_path}")
        img = Image.open(img_path).convert('RGB')
        W, H = img.size
        img = norm_RGB(img)[None]
        if iscuda: img = img.cuda()

        # extract keypoints/descriptors for a single image
        xys, desc, scores = extract_multiscale(net, img, detector,
            scale_f   = config['scale_f'],
            min_scale = config['min_scale'],
            max_scale = config['max_scale'],
            min_size  = config['min_size'],
            max_size  = config['max_size'],
            verbose = True)

        xys = xys.cpu().numpy()
        desc = desc.cpu().numpy()
        scores = scores.cpu().numpy()
        idxs = scores.argsort()[-config['top_k'] or None:]

        xys = xys[idxs]
        desc = desc[idxs]
        if keypoints_dtype is None or descriptors_dtype is None:
            keypoints_dtype = xys.dtype
            descriptors_dtype = desc.dtype

            keypoints_dsize = xys.shape[1]
            descriptors_dsize = desc.shape[1]

            kdata.keypoints = kapture.Keypoints('r2d2', keypoints_dtype, keypoints_dsize)
            kdata.descriptors = kapture.Descriptors('r2d2', descriptors_dtype, descriptors_dsize)

            keypoints_config_absolute_path = get_csv_fullpath(kapture.Keypoints, export_dir)
            descriptors_config_absolute_path = get_csv_fullpath(kapture.Descriptors, export_dir)

            keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints)
            descriptors_to_file(descriptors_config_absolute_path, kdata.descriptors)
        else:
            assert kdata.keypoints.type_name == 'r2d2'
            assert kdata.descriptors.type_name == 'r2d2'
            assert kdata.keypoints.dtype == xys.dtype
            assert kdata.descriptors.dtype == desc.dtype
            assert kdata.keypoints.dsize == xys.shape[1]
            assert kdata.descriptors.dsize == desc.shape[1]

        keypoints_fullpath = get_keypoints_fullpath(export_dir, image_name)
        print(f"Saving {xys.shape[0]} keypoints to {keypoints_fullpath}")
        image_keypoints_to_file(keypoints_fullpath, xys)
        kdata.keypoints.add(image_name)


        descriptors_fullpath = get_descriptors_fullpath(export_dir, image_name)
        print(f"Saving {desc.shape[0]} descriptors to {descriptors_fullpath}")
        image_descriptors_to_file(descriptors_fullpath, desc)
        kdata.descriptors.add(image_name)

    if not keypoints_check_dir(kdata.keypoints, export_dir) or \
            not descriptors_check_dir(kdata.descriptors, export_dir):
        print('local feature extraction ended successfully but not all files were saved')
Esempio n. 7
0
    if args.max_keypoints != float("+inf"):
        # keep the last (the highest) indexes
        idx_keep = scores.argsort()[-min(len(keypoints), args.max_keypoints):]
        keypoints = keypoints[idx_keep]
        descriptors = descriptors[idx_keep]

    
    if keypoints_dtype is None or descriptors_dtype is None:
        keypoints_dtype = keypoints.dtype
        descriptors_dtype = descriptors.dtype

        keypoints_dsize = keypoints.shape[1]
        descriptors_dsize = descriptors.shape[1]

        kdata.keypoints = kapture.Keypoints('d2net', keypoints_dtype, keypoints_dsize)
        kdata.descriptors = kapture.Descriptors('d2net', descriptors_dtype, descriptors_dsize)

        keypoints_config_absolute_path = get_csv_fullpath(kapture.Keypoints, args.kapture_root)
        descriptors_config_absolute_path = get_csv_fullpath(kapture.Descriptors, args.kapture_root)

        keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints)
        descriptors_to_file(descriptors_config_absolute_path, kdata.descriptors)
    else:
        assert kdata.keypoints.type_name == 'd2net'
        assert kdata.descriptors.type_name == 'd2net'
        assert kdata.keypoints.dtype == keypoints.dtype
        assert kdata.descriptors.dtype == descriptors.dtype
        assert kdata.keypoints.dsize == keypoints.shape[1]
        assert kdata.descriptors.dsize == descriptors.shape[1]

    keypoints_fullpath = get_keypoints_fullpath(args.kapture_root, image_name)
Esempio n. 8
0
def extract_kapture_keypoints(args):
    """
    Extract r2d2 keypoints and descritors to the kapture format directly 
    """
    print('extract_kapture_keypoints...')
    kdata = kapture_from_dir(args.kapture_root,
                             matches_pairsfile_path=None,
                             skip_list=[
                                 kapture.GlobalFeatures, kapture.Matches,
                                 kapture.Points3d, kapture.Observations
                             ])

    assert kdata.records_camera is not None
    image_list = [
        filename for _, _, filename in kapture.flatten(kdata.records_camera)
    ]
    if kdata.keypoints is not None and kdata.descriptors is not None:
        image_list = [
            name for name in image_list
            if name not in kdata.keypoints or name not in kdata.descriptors
        ]

    if len(image_list) == 0:
        print('All features were already extracted')
        return
    else:
        print(f'Extracting r2d2 features for {len(image_list)} images')

    iscuda = common.torch_set_gpu(args.gpu)

    # load the network...
    net = load_network(args.model)
    if iscuda: net = net.cuda()

    # create the non-maxima detector
    detector = NonMaxSuppression(rel_thr=args.reliability_thr,
                                 rep_thr=args.repeatability_thr)

    keypoints_dtype = None if kdata.keypoints is None else kdata.keypoints.dtype
    descriptors_dtype = None if kdata.descriptors is None else kdata.descriptors.dtype

    keypoints_dsize = None if kdata.keypoints is None else kdata.keypoints.dsize
    descriptors_dsize = None if kdata.descriptors is None else kdata.descriptors.dsize

    for image_name in image_list:
        img_path = get_image_fullpath(args.kapture_root, image_name)

        print(f"\nExtracting features for {img_path}")
        img = Image.open(img_path).convert('RGB')
        W, H = img.size
        img = norm_RGB(img)[None]
        if iscuda: img = img.cuda()

        # extract keypoints/descriptors for a single image
        xys, desc, scores = extract_multiscale(net,
                                               img,
                                               detector,
                                               scale_f=args.scale_f,
                                               min_scale=args.min_scale,
                                               max_scale=args.max_scale,
                                               min_size=args.min_size,
                                               max_size=args.max_size,
                                               verbose=True)

        xys = xys.cpu().numpy()
        desc = desc.cpu().numpy()
        scores = scores.cpu().numpy()
        idxs = scores.argsort()[-args.top_k or None:]

        xys = xys[idxs]
        desc = desc[idxs]
        if keypoints_dtype is None or descriptors_dtype is None:
            keypoints_dtype = xys.dtype
            descriptors_dtype = desc.dtype

            keypoints_dsize = xys.shape[1]
            descriptors_dsize = desc.shape[1]

            kdata.keypoints = kapture.Keypoints('r2d2', keypoints_dtype,
                                                keypoints_dsize)
            kdata.descriptors = kapture.Descriptors('r2d2', descriptors_dtype,
                                                    descriptors_dsize)

            keypoints_config_absolute_path = get_csv_fullpath(
                kapture.Keypoints, args.kapture_root)
            descriptors_config_absolute_path = get_csv_fullpath(
                kapture.Descriptors, args.kapture_root)

            keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints)
            descriptors_to_file(descriptors_config_absolute_path,
                                kdata.descriptors)
        else:
            assert kdata.keypoints.type_name == 'r2d2'
            assert kdata.descriptors.type_name == 'r2d2'
            assert kdata.keypoints.dtype == xys.dtype
            assert kdata.descriptors.dtype == desc.dtype
            assert kdata.keypoints.dsize == xys.shape[1]
            assert kdata.descriptors.dsize == desc.shape[1]

        keypoints_fullpath = get_keypoints_fullpath(args.kapture_root,
                                                    image_name)
        print(f"Saving {xys.shape[0]} keypoints to {keypoints_fullpath}")
        image_keypoints_to_file(keypoints_fullpath, xys)
        kdata.keypoints.add(image_name)

        descriptors_fullpath = get_descriptors_fullpath(
            args.kapture_root, image_name)
        print(f"Saving {desc.shape[0]} descriptors to {descriptors_fullpath}")
        image_descriptors_to_file(descriptors_fullpath, desc)
        kdata.descriptors.add(image_name)

    if not keypoints_check_dir(kdata.keypoints, args.kapture_root) or \
            not descriptors_check_dir(kdata.descriptors, args.kapture_root):
        print(
            'local feature extraction ended successfully but not all files were saved'
        )
Esempio n. 9
0
def _import_features_and_matches(opensfm_root_dir, kapture_root_dir, disable_tqdm)\
        -> Tuple[kapture.Descriptors, kapture.Keypoints, kapture.Matches]:
    # import features (keypoints + descriptors)
    kapture_keypoints = None  # kapture.Keypoints(type_name='opensfm', dsize=4, dtype=np.float64)
    kapture_descriptors = None  # kapture.Descriptors(type_name='opensfm', dsize=128, dtype=np.uint8)
    opensfm_features_dir_path = path.join(opensfm_root_dir, 'features')
    opensfm_features_suffix = '.features.npz'
    if path.isdir(opensfm_features_dir_path):
        logger.info('importing keypoints and descriptors ...')
        opensfm_features_file_list = (path.join(
            dp, fn) for dp, _, fs in os.walk(opensfm_features_dir_path)
                                      for fn in fs)
        opensfm_features_file_list = (
            filepath for filepath in opensfm_features_file_list
            if filepath.endswith(opensfm_features_suffix))
        for opensfm_feature_filename in tqdm(opensfm_features_file_list,
                                             disable=disable_tqdm):
            image_filename = path.relpath(
                opensfm_feature_filename,
                opensfm_features_dir_path)[:-len(opensfm_features_suffix)]
            opensfm_image_features = np.load(opensfm_feature_filename)
            opensfm_image_keypoints = opensfm_image_features['points']
            opensfm_image_descriptors = opensfm_image_features['descriptors']
            logger.debug(
                f'parsing keypoints and descriptors in {opensfm_feature_filename}'
            )
            if kapture_keypoints is None:
                # print(type(opensfm_image_keypoints.dtype))
                # HAHOG = Hessian Affine feature point detector + HOG descriptor
                kapture_keypoints = kapture.Keypoints(
                    type_name='HessianAffine',
                    dsize=opensfm_image_keypoints.shape[1],
                    dtype=opensfm_image_keypoints.dtype)
            if kapture_descriptors is None:
                kapture_descriptors = kapture.Descriptors(
                    type_name='HOG',
                    dsize=opensfm_image_descriptors.shape[1],
                    dtype=opensfm_image_descriptors.dtype)

            # convert keypoints file
            keypoint_file_path = kapture.io.features.get_features_fullpath(
                data_type=kapture.Keypoints,
                kapture_dirpath=kapture_root_dir,
                image_filename=image_filename)
            kapture.io.features.image_keypoints_to_file(
                filepath=keypoint_file_path,
                image_keypoints=opensfm_image_keypoints)
            # register the file
            kapture_keypoints.add(image_filename)

            # convert descriptors file
            descriptor_file_path = kapture.io.features.get_features_fullpath(
                data_type=kapture.Descriptors,
                kapture_dirpath=kapture_root_dir,
                image_filename=image_filename)
            kapture.io.features.image_descriptors_to_file(
                filepath=descriptor_file_path,
                image_descriptors=opensfm_image_descriptors)
            # register the file
            kapture_descriptors.add(image_filename)
    # import matches
    kapture_matches = kapture.Matches()
    opensfm_matches_suffix = '_matches.pkl.gz'
    opensfm_matches_dir_path = path.join(opensfm_root_dir, 'matches')
    if path.isdir(opensfm_matches_dir_path):
        logger.info('importing matches ...')
        opensfm_matches_file_list = (path.join(
            dp, fn) for dp, _, fs in os.walk(opensfm_matches_dir_path)
                                     for fn in fs)
        opensfm_matches_file_list = (
            filepath for filepath in opensfm_matches_file_list
            if filepath.endswith(opensfm_matches_suffix))

        for opensfm_matches_filename in tqdm(opensfm_matches_file_list,
                                             disable=disable_tqdm):
            image_filename_1 = path.relpath(
                opensfm_matches_filename,
                opensfm_matches_dir_path)[:-len(opensfm_matches_suffix)]
            logger.debug(f'parsing matches in {image_filename_1}')
            with gzip.open(opensfm_matches_filename, 'rb') as f:
                opensfm_matches = pickle.load(f)
                for image_filename_2, opensfm_image_matches in opensfm_matches.items(
                ):
                    image_pair = (image_filename_1, image_filename_2)
                    # register the pair to kapture
                    kapture_matches.add(*image_pair)
                    # convert the bin file to kapture
                    kapture_matches_filepath = kapture.io.features.get_matches_fullpath(
                        image_filename_pair=image_pair,
                        kapture_dirpath=kapture_root_dir)
                    kapture_image_matches = np.hstack([
                        opensfm_image_matches.astype(np.float64),
                        # no matches scoring = assume all to one
                        np.ones(shape=(opensfm_image_matches.shape[0], 1),
                                dtype=np.float64)
                    ])
                    kapture.io.features.image_matches_to_file(
                        kapture_matches_filepath, kapture_image_matches)
    return kapture_descriptors, kapture_keypoints, kapture_matches
Esempio n. 10
0
            idx_keep = scores.argsort(
            )[-min(len(keypoints), args.max_keypoints):]
            keypoints = keypoints[idx_keep]
            descriptors = descriptors[idx_keep]

        if keypoints_dtype is None or descriptors_dtype is None:
            keypoints_dtype = keypoints.dtype
            descriptors_dtype = descriptors.dtype

            keypoints_dsize = keypoints.shape[1]
            descriptors_dsize = descriptors.shape[1]

            kdata.keypoints[args.keypoints_type] = kapture.Keypoints(
                'd2net', keypoints_dtype, keypoints_dsize)
            kdata.descriptors[args.descriptors_type] = kapture.Descriptors(
                'd2net', descriptors_dtype, descriptors_dsize,
                args.keypoints_type, 'L2')

            keypoints_config_absolute_path = get_feature_csv_fullpath(
                kapture.Keypoints, args.keypoints_type, args.kapture_root)
            descriptors_config_absolute_path = get_feature_csv_fullpath(
                kapture.Descriptors, args.descriptors_type, args.kapture_root)

            keypoints_to_file(keypoints_config_absolute_path,
                              kdata.keypoints[args.keypoints_type])
            descriptors_to_file(descriptors_config_absolute_path,
                                kdata.descriptors[args.descriptors_type])
        else:
            assert kdata.keypoints[
                args.keypoints_type].dtype == keypoints.dtype
            assert kdata.descriptors[
Esempio n. 11
0
def import_opensfm(
        opensfm_rootdir: str,
        kapture_rootdir: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.copy) -> None:
    disable_tqdm = logger.getEffectiveLevel() != logging.INFO
    # load reconstruction
    opensfm_reconstruction_filepath = path.join(opensfm_rootdir,
                                                'reconstruction.json')
    with open(opensfm_reconstruction_filepath, 'rt') as f:
        opensfm_reconstruction = json.load(f)
    # remove the single list @ root
    opensfm_reconstruction = opensfm_reconstruction[0]

    # prepare space for output
    os.makedirs(kapture_rootdir, exist_ok=True)
    delete_existing_kapture_files(kapture_rootdir,
                                  force_erase=force_overwrite_existing)

    # import cameras
    kapture_sensors = kapture.Sensors()
    assert 'cameras' in opensfm_reconstruction
    # import cameras
    for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items(
    ):
        camera = import_camera(osfm_camera, name=osfm_camera_id)
        kapture_sensors[osfm_camera_id] = camera

    # import shots
    logger.info('importing images and trajectories ...')
    kapture_images = kapture.RecordsCamera()
    kapture_trajectories = kapture.Trajectories()
    opensfm_image_dirpath = path.join(opensfm_rootdir, 'images')
    assert 'shots' in opensfm_reconstruction
    image_timestamps, image_sensors = {}, {
    }  # used later to retrieve the timestamp of an image.
    for timestamp, (image_filename, shot) in enumerate(
            opensfm_reconstruction['shots'].items()):
        sensor_id = shot['camera']
        image_timestamps[image_filename] = timestamp
        image_sensors[image_filename] = sensor_id
        # in OpenSfm, (sensor, timestamp) is not unique.
        rotation_vector = shot['rotation']
        q = quaternion.from_rotation_vector(rotation_vector)
        translation = shot['translation']
        # capture_time = shot['capture_time'] # may be invalid
        # gps_position = shot['gps_position']
        kapture_images[timestamp, sensor_id] = image_filename
        kapture_trajectories[timestamp,
                             sensor_id] = kapture.PoseTransform(r=q,
                                                                t=translation)

    # copy image files
    filename_list = [f for _, _, f in kapture.flatten(kapture_images)]
    import_record_data_from_dir_auto(
        source_record_dirpath=opensfm_image_dirpath,
        destination_kapture_dirpath=kapture_rootdir,
        filename_list=filename_list,
        copy_strategy=images_import_method)

    # gps from pre-extracted exif, in exif/image_name.jpg.exif
    kapture_gnss = None
    opensfm_exif_dirpath = path.join(opensfm_rootdir, 'exif')
    opensfm_exif_suffix = '.exif'
    if path.isdir(opensfm_exif_dirpath):
        logger.info('importing GNSS from exif ...')
        camera_ids = set(image_sensors.values())
        # add a gps sensor for each camera
        map_cam_to_gnss_sensor = {
            cam_id: 'GPS_' + cam_id
            for cam_id in camera_ids
        }
        for gnss_id in map_cam_to_gnss_sensor.values():
            kapture_sensors[gnss_id] = kapture.Sensor(
                sensor_type='gnss', sensor_params=['EPSG:4326'])
        # build epsg_code for all cameras
        kapture_gnss = kapture.RecordsGnss()
        opensfm_exif_filepath_list = (
            path.join(dirpath, filename)
            for dirpath, _, filename_list in os.walk(opensfm_exif_dirpath)
            for filename in filename_list
            if filename.endswith(opensfm_exif_suffix))
        for opensfm_exif_filepath in tqdm(opensfm_exif_filepath_list,
                                          disable=disable_tqdm):
            image_filename = path.relpath(
                opensfm_exif_filepath,
                opensfm_exif_dirpath)[:-len(opensfm_exif_suffix)]
            image_timestamp = image_timestamps[image_filename]
            image_sensor_id = image_sensors[image_filename]
            gnss_timestamp = image_timestamp
            gnss_sensor_id = map_cam_to_gnss_sensor[image_sensor_id]
            with open(opensfm_exif_filepath, 'rt') as f:
                js_root = json.load(f)
                if 'gps' not in js_root:
                    logger.warning(f'NO GPS data in "{opensfm_exif_filepath}"')
                    continue

                gps_coords = {
                    'x': js_root['gps']['longitude'],
                    'y': js_root['gps']['latitude'],
                    'z': js_root['gps'].get('altitude', 0.0),
                    'dop': js_root['gps'].get('dop', 0),
                    'utc': 0,
                }
                logger.debug(
                    f'found GPS data for ({gnss_timestamp}, {gnss_sensor_id}) in "{opensfm_exif_filepath}"'
                )
                kapture_gnss[gnss_timestamp,
                             gnss_sensor_id] = kapture.RecordGnss(**gps_coords)

    # import features (keypoints + descriptors)
    kapture_keypoints = None  # kapture.Keypoints(type_name='opensfm', dsize=4, dtype=np.float64)
    kapture_descriptors = None  # kapture.Descriptors(type_name='opensfm', dsize=128, dtype=np.uint8)
    opensfm_features_dirpath = path.join(opensfm_rootdir, 'features')
    opensfm_features_suffix = '.features.npz'
    if path.isdir(opensfm_features_dirpath):
        logger.info('importing keypoints and descriptors ...')
        opensfm_features_file_list = (path.join(
            dp, fn) for dp, _, fs in os.walk(opensfm_features_dirpath)
                                      for fn in fs)
        opensfm_features_file_list = (
            filepath for filepath in opensfm_features_file_list
            if filepath.endswith(opensfm_features_suffix))
        for opensfm_feature_filename in tqdm(opensfm_features_file_list,
                                             disable=disable_tqdm):
            image_filename = path.relpath(
                opensfm_feature_filename,
                opensfm_features_dirpath)[:-len(opensfm_features_suffix)]
            opensfm_image_features = np.load(opensfm_feature_filename)
            opensfm_image_keypoints = opensfm_image_features['points']
            opensfm_image_descriptors = opensfm_image_features['descriptors']
            logger.debug(
                f'parsing keypoints and descriptors in {opensfm_feature_filename}'
            )
            if kapture_keypoints is None:
                # print(type(opensfm_image_keypoints.dtype))
                # HAHOG = Hessian Affine feature point detector + HOG descriptor
                kapture_keypoints = kapture.Keypoints(
                    type_name='HessianAffine',
                    dsize=opensfm_image_keypoints.shape[1],
                    dtype=opensfm_image_keypoints.dtype)
            if kapture_descriptors is None:
                kapture_descriptors = kapture.Descriptors(
                    type_name='HOG',
                    dsize=opensfm_image_descriptors.shape[1],
                    dtype=opensfm_image_descriptors.dtype)

            # convert keypoints file
            keypoint_filpath = kapture.io.features.get_features_fullpath(
                data_type=kapture.Keypoints,
                kapture_dirpath=kapture_rootdir,
                image_filename=image_filename)
            kapture.io.features.image_keypoints_to_file(
                filepath=keypoint_filpath,
                image_keypoints=opensfm_image_keypoints)
            # register the file
            kapture_keypoints.add(image_filename)

            # convert descriptors file
            descriptor_filpath = kapture.io.features.get_features_fullpath(
                data_type=kapture.Descriptors,
                kapture_dirpath=kapture_rootdir,
                image_filename=image_filename)
            kapture.io.features.image_descriptors_to_file(
                filepath=descriptor_filpath,
                image_descriptors=opensfm_image_descriptors)
            # register the file
            kapture_descriptors.add(image_filename)

    # import matches
    kapture_matches = kapture.Matches()
    opensfm_matches_suffix = '_matches.pkl.gz'
    opensfm_matches_dirpath = path.join(opensfm_rootdir, 'matches')
    if path.isdir(opensfm_matches_dirpath):
        logger.info('importing matches ...')
        opensfm_matches_file_list = (path.join(
            dp, fn) for dp, _, fs in os.walk(opensfm_matches_dirpath)
                                     for fn in fs)
        opensfm_matches_file_list = (
            filepath for filepath in opensfm_matches_file_list
            if filepath.endswith(opensfm_matches_suffix))

        for opensfm_matches_filename in tqdm(opensfm_matches_file_list,
                                             disable=disable_tqdm):
            image_filename_1 = path.relpath(
                opensfm_matches_filename,
                opensfm_matches_dirpath)[:-len(opensfm_matches_suffix)]
            logger.debug(f'parsing mathes in {image_filename_1}')
            with gzip.open(opensfm_matches_filename, 'rb') as f:
                opensfm_matches = pickle.load(f)
                for image_filename_2, opensfm_image_matches in opensfm_matches.items(
                ):
                    image_pair = (image_filename_1, image_filename_2)
                    # register the pair to kapture
                    kapture_matches.add(*image_pair)
                    # convert the bin file to kapture
                    kapture_matches_filepath = kapture.io.features.get_matches_fullpath(
                        image_filename_pair=image_pair,
                        kapture_dirpath=kapture_rootdir)
                    kapture_image_matches = np.hstack([
                        opensfm_image_matches.astype(np.float64),
                        # no macthes scoring = assume all to one
                        np.ones(shape=(opensfm_image_matches.shape[0], 1),
                                dtype=np.float64)
                    ])
                    kapture.io.features.image_matches_to_file(
                        kapture_matches_filepath, kapture_image_matches)

    # import 3-D points
    if 'points' in opensfm_reconstruction:
        logger.info('importing points 3-D')
        opensfm_points = opensfm_reconstruction['points']
        points_data = []
        for point_id in sorted(opensfm_points):
            point_data = opensfm_points[point_id]
            point_data = point_data['coordinates'] + point_data['color']
            points_data.append(point_data)
        kapture_points = kapture.Points3d(points_data)
    else:
        kapture_points = None

    # saving kapture csv files
    logger.info('saving kapture files')
    kapture_data = kapture.Kapture(sensors=kapture_sensors,
                                   records_camera=kapture_images,
                                   records_gnss=kapture_gnss,
                                   trajectories=kapture_trajectories,
                                   keypoints=kapture_keypoints,
                                   descriptors=kapture_descriptors,
                                   matches=kapture_matches,
                                   points3d=kapture_points)
    kapture.io.csv.kapture_to_dir(dirpath=kapture_rootdir,
                                  kapture_data=kapture_data)