예제 #1
0
    def __init__(self,
                 rosbag_path: str,
                 rigs: Optional[Rigs],
                 sensors: Sensors,
                 kapture_path: str,
                 force_overwrite_existing: bool = False) -> None:
        """

        :param rosbag_path: full path to the rosbag file
        :param rigs: rigs of the sensors
        :param sensors: sensors definition used for the capture
        :param kapture_path: full path to the top kapture directory to save
        :param force_overwrite_existing: silently overwrite kapture files if already exists
        """
        if not path.isfile(rosbag_path):
            raise ValueError(f'Rosbag file {rosbag_path} does not exist')
        self._rosbag_path = rosbag_path
        self._rigs = rigs
        self._sensors = sensors
        self._kapture_path = kapture_path
        self.logger = logging.getLogger('rosbag')
        self.logger.info(f'Reading rosbag file {rosbag_path} and exporting as Kapture format into {kapture_path}')
        os.makedirs(kapture_path, exist_ok=True)
        kapture.io.structure.delete_existing_kapture_files(kapture_path, force_overwrite_existing)
        self._images_full_path = get_image_fullpath(kapture_path)
        # Keys = timestamp, values odometer poses
        self._last_poses = LimitedDictionary(20)
        self.images_info = list()  # Of ImageInfo: type annotation is not supported in 3.6
        self.poses_info = list()  # Of PositionInfo
예제 #2
0
    def test_export(self):
        temp_kapture_dirpath = path.join(self._tempdir.name, 'kapture')
        shutil.copytree(self._kapture_dirpath, temp_kapture_dirpath)
        kapture_data = kapture.io.csv.kapture_from_dir(temp_kapture_dirpath)
        images_filepaths = images_to_filepaths(kapture_data.records_camera,
                                               temp_kapture_dirpath)
        # make sure there is no EXIF in images
        for image_filepath in images_filepaths.values():
            clear_exif(image_filepath)

        # insert gps to exif
        export_gps_to_exif(kapture_data=kapture_data,
                           kapture_dirpath=temp_kapture_dirpath)

        rebuilt_records = kapture.RecordsGnss()
        for timestamp, cam_id, image_name in kapture.flatten(
                kapture_data.records_camera):
            image_filepath = get_image_fullpath(temp_kapture_dirpath,
                                                image_name)
            exif_data = read_exif(image_filepath)
            rebuilt_records[timestamp, 'GPS_' +
                            cam_id] = convert_gps_to_kapture_record(exif_data)

        self.assertTrue(
            equal_records_gnss(kapture_data.records_gnss, rebuilt_records))
예제 #3
0
 def test_import_openmvg(self) -> None:
     """
     Test the import_openmvg function on a small JSON file while linking the images
     """
     self.assertTrue(path.isdir(self._openmvg_sample_path))
     self.assertTrue(path.exists(self._kapture_path), "Kapture directory exists")
     sfm_file = path.join(self._openmvg_sample_path, 'sfm_data_small.json')
     # on windows, without admin rights, fails with OSError: symbolic link privilege not held
     # see https://docs.python.org/3.6/library/os.html#os.symlink
     logger.info(f'Running on "{sys.platform}" which is {"" if self.isWindows else "not"} a Windows platform')
     file_operation = TransferAction.skip if self.isWindows else TransferAction.link_relative
     import_openmvg(sfm_file, self._kapture_path, file_operation, True)
     #  test presence or absence of kapture files
     cameras_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.Sensors])
     self.assertTrue(path.isfile(cameras_file_path), "Camera file written")
     rigs_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.Rigs])
     self.assertFalse(path.isfile(rigs_file_path), "Rigs file should be missing")
     records_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.RecordsCamera])
     self.assertTrue(path.isfile(records_file_path), "Camera Records file written")
     lidars_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.RecordsLidar])
     self.assertFalse(path.isfile(lidars_file_path), "Lidar Records file should be missing")
     trajectories_file_path = path.join(self._kapture_path, kcsv.CSV_FILENAMES[kapture.Trajectories])
     self.assertTrue(path.isfile(trajectories_file_path), "Trajectories file written")
     # Reload data and verify
     kapture_data = kcsv.kapture_from_dir(self._kapture_path)
     self._verify_data(kapture_data)
     if not self.isWindows:
         # Test images path
         all_records_camera = list(kapture.flatten(kapture_data.records_camera))
         for _, _, name in all_records_camera:
             img_path = get_image_fullpath(self._kapture_path, name)
             self.assertTrue(path.islink(img_path), f"image link {img_path}")
예제 #4
0
def export_gps_to_exif(kapture_data: kapture.Kapture,
                       kapture_dirpath: str,
                       gps_id_to_cam_id: Optional[Dict[str, str]] = None):
    """
    Export GPS from GNSS data in kapture, to image exifs.
    If the mapping from gnns_id to camera_id is not given,
    gnss_id is assumed to be to be gps_<cam_id>.
    """

    # sanity check
    if None in [
            kapture_data.sensors, kapture_data.records_camera,
            kapture_data.records_gnss
    ]:
        logger.warning('cannot export exif: missing data.')
        return

    # auto build GPS/camera map, based on the prefix rule.
    if gps_id_to_cam_id is None:
        camera_ids = {
            cam_id
            for cam_id, sensor in kapture_data.sensors.items()
            if sensor.sensor_type == 'camera'
        }

        gps_ids = {
            gps_id
            for gps_id, sensor in kapture_data.sensors.items()
            if sensor.sensor_type == 'gnss' and gps_id.startswith('GPS_')
        }

        gps_id_to_cam_id = {
            'GPS_' + cam_id: cam_id
            for cam_id in camera_ids if 'GPS_' + cam_id in gps_ids
        }

        if len(gps_id_to_cam_id) != len(gps_ids):
            logger.warning('unable to map some GPS to a camera.')

    gps_records = ((timestamp, gps_id, gps_id_to_cam_id[gps_id], gnss_record)
                   for timestamp, gps_id, gnss_record in kapture.flatten(
                       kapture_data.records_gnss)
                   if gps_id in gps_id_to_cam_id)

    for timestamp, gps_id, cam_id, gps_record in gps_records:
        if (timestamp, cam_id) not in kapture_data.records_camera:
            logger.warning(
                f'no image found corresponding to GPS record ({timestamp}, {cam_id})'
            )
        else:
            image_name = kapture_data.records_camera[timestamp, cam_id]
            image_filepath = get_image_fullpath(
                kapture_dir_path=kapture_dirpath, image_filename=image_name)
            exif_data = gps_record_to_exif_dict(gps_record)
            update_exif(image_filepath, exif_data)
예제 #5
0
    def test_maupertuis_import(self):
        kapture_data = import_colmap(
            self._temp_dirpath,
            self._database_filepath,
            self._reconstruction_path,
            self._images_filepath,
            force_overwrite_existing=True,
            images_import_strategy=TransferAction.copy,
            no_geometric_filtering=True)

        # check the numbers
        self.assertEqual(1, len(kapture_data.sensors))
        self.assertEqual(4, len(kapture_data.trajectories))
        self.assertEqual(4, len(kapture_data.records_camera))
        self.assertIs(kapture_data.records_lidar, None)
        self.assertIs(kapture_data.records_wifi, None)
        self.assertIs(kapture_data.records_gnss, None)
        self.assertEqual(4, len(kapture_data.keypoints))
        self.assertEqual(4, len(kapture_data.descriptors))
        self.assertEqual(6, len(kapture_data.matches))
        self.assertEqual(1039, len(kapture_data.points3d))
        self.assertEqual(1039, len(kapture_data.observations))

        # compare against golden kapture
        kapture_data_golden = kapture_from_dir(self._kapture_dirpath)
        # drop GPS, Wifi, Lidar
        kapture_data.records_lidar = None
        kapture_data.records_wifi = None
        kapture_data_golden.records_gnss = None
        kapture_data_golden.sensors = kapture.Sensors({
            sensor_id: sensor
            for sensor_id, sensor in kapture_data_golden.sensors.items()
            if sensor.sensor_type == 'camera'
        })

        # compare
        equivalence = kapture.algo.compare.equal_kapture(
            kapture_data, kapture_data_golden)
        self.assertTrue(equivalence)
        # Check images copy
        all_records_cameras = list(kapture.flatten(
            kapture_data.records_camera))
        for _, _, name in all_records_cameras:
            image_path = get_image_fullpath(self._temp_dirpath, name)
            self.assertTrue(path.isfile(image_path),
                            f"image link {image_path}")
예제 #6
0
def _import_images(input_json, image_action, kapture_images_path,
                   openmvg_images_dir, root_path, device_identifiers,
                   timestamp_for_pose):
    records_camera = kapture.RecordsCamera()
    if input_json.get(VIEWS):
        views = input_json[VIEWS]
        if image_action == TransferAction.root_link:
            # Do a unique images directory link
            # kapture/<records_dir>/openmvg_top_images_directory -> openmvg_root_path
            kapture_records_path = get_image_fullpath(kapture_images_path)
            os.makedirs(kapture_records_path, exist_ok=True)
            os.symlink(root_path,
                       path.join(kapture_records_path, openmvg_images_dir))
        logger.info(f'Importing {len(views)} images')
        # Progress bar only in debug or info level
        if image_action != TransferAction.skip and image_action != TransferAction.root_link \
                and logger.getEffectiveLevel() <= logging.INFO:
            progress_bar = tqdm(total=len(views))
        else:
            progress_bar = None
        for view in views:
            input_data = view[VALUE][PTR_WRAPPER][DATA]
            pose_id = input_data[ID_POSE]
            # All two values should be the same (?)
            if input_data[ID_VIEW]:
                timestamp = input_data[ID_VIEW]
            else:
                timestamp = view[KEY]
            device_id = str(input_data[ID_INTRINSIC]
                            )  # device_id must be a string for kapture
            device_identifiers[pose_id] = device_id
            timestamp_for_pose[pose_id] = timestamp

            kapture_filename = _import_image_file(input_data,
                                                  openmvg_images_dir,
                                                  root_path,
                                                  kapture_images_path,
                                                  image_action)

            progress_bar and progress_bar.update(1)

            key = (timestamp, device_id)  # tuple of int,str
            records_camera[key] = path_secure(kapture_filename)
        progress_bar and progress_bar.close()
    return records_camera
예제 #7
0
def import_openmvg_image_file(input_data, openmvg_images_dir, root_path,
                              kapture_images_path, image_action) -> str:
    # Add the common openmvg images directory in front of the filename
    filename: str
    if input_data.get(JSON_KEY.LOCAL_PATH):
        filename = path.join(input_data[JSON_KEY.LOCAL_PATH],
                             input_data[JSON_KEY.FILENAME])
    else:
        filename = input_data[JSON_KEY.FILENAME]
    kapture_filename = path.join(openmvg_images_dir, filename)
    if image_action != TransferAction.skip and image_action != TransferAction.root_link:
        src_path: str
        if root_path:
            src_path = path.join(root_path, filename)
        else:
            src_path = filename
        dst_path = get_image_fullpath(kapture_images_path, kapture_filename)
        # Create destination directory if necessary
        dst_dir = path.dirname(dst_path)
        if not path.isdir(dst_dir):
            os.makedirs(dst_dir, exist_ok=True)
        # Check if already exist
        if path.exists(dst_path):
            os.unlink(dst_path)
        # Create file or link
        if image_action == TransferAction.copy:
            shutil.copy2(src_path, dst_path)
        elif image_action == TransferAction.move:
            shutil.move(src_path, dst_path)
        else:
            # Individual link
            if image_action == TransferAction.link_relative:
                # Compute relative path
                src_path = path.relpath(src_path, dst_dir)
            os.symlink(src_path, dst_path)
            # Symlink might crash on Windows if the user executing this code has no admin privilege
    return kapture_filename
예제 #8
0
# Creating CNN model
model = D2Net(
    model_file=args.model_file,
    use_relu=args.use_relu,
    use_cuda=use_cuda
)

keypoints_dtype = None if kdata.keypoints is None else kdata.keypoints.dtype
descriptors_dtype = None if kdata.descriptors is None else kdata.descriptors.dtype

keypoints_dsize = None if kdata.keypoints is None else kdata.keypoints.dsize
descriptors_dsize = None if kdata.descriptors is None else kdata.descriptors.dsize

# Process the files
for image_name in tqdm(image_list, total=len(image_list)):
    img_path = get_image_fullpath(args.kapture_root, image_name)
    image = Image.open(img_path).convert('RGB')

    width, height = image.size

    resized_image = image
    resized_width = width
    resized_height = height

    max_edge = args.max_edge
    max_sum_edges = args.max_sum_edges
    if max(resized_width, resized_height) > max_edge:
        scale_multiplier = max_edge / max(resized_width, resized_height)
        resized_width = math.floor(resized_width * scale_multiplier)
        resized_height = math.floor(resized_height * scale_multiplier)
        resized_image = image.resize((resized_width, resized_height))
예제 #9
0
def extract_kapture_keypoints(kapture_root,
                              config,
                              output_dir='',
                              overwrite=False):
    """
    Extract r2d2 keypoints and descritors to the kapture format directly
    """
    print('extract_kapture_keypoints...')
    kdata = kapture_from_dir(kapture_root, matches_pairsfile_path=None,
    skip_list= [kapture.GlobalFeatures,
                kapture.Matches,
                kapture.Points3d,
                kapture.Observations])
    export_dir = output_dir if output_dir else kapture_root  # root of output directory for features
    os.makedirs(export_dir, exist_ok=True)

    assert kdata.records_camera is not None
    image_list = [filename for _, _, filename in kapture.flatten(kdata.records_camera)]
    # resume extraction if some features exist
    try:
        # load existing features, if any
        kdata.keypoints = keypoints_from_dir(export_dir, None)
        kdata.descriptors = descriptors_from_dir(export_dir, None)
        if kdata.keypoints is not None and kdata.descriptors is not None and not overwrite:
            image_list = [name for name in image_list if name not in kdata.keypoints or name not in kdata.descriptors]
    except FileNotFoundError:
        pass
    except:
        logging.exception("Error with importing existing local features.")

    # clear features first if overwriting
    if overwrite: delete_existing_kapture_files(export_dir, True, only=[kapture.Descriptors, kapture.Keypoints])

    if len(image_list) == 0:
        print('All features were already extracted')
        return
    else:
        print(f'Extracting r2d2 features for {len(image_list)} images')

    iscuda = common.torch_set_gpu([torch.cuda.is_available()])

    # load the network...
    net = load_network(config['checkpoint'])
    if iscuda: net = net.cuda()

    # create the non-maxima detector
    detector = NonMaxSuppression(
        rel_thr = config['reliability_thr'],
        rep_thr = config['repeatability_thr'])

    keypoints_dtype = None if kdata.keypoints is None else kdata.keypoints.dtype
    descriptors_dtype = None if kdata.descriptors is None else kdata.descriptors.dtype

    keypoints_dsize = None if kdata.keypoints is None else kdata.keypoints.dsize
    descriptors_dsize = None if kdata.descriptors is None else kdata.descriptors.dsize

    for image_name in image_list:
        img_path = get_image_fullpath(kapture_root, image_name)

        if img_path.endswith('.txt'):
            images = open(img_path).read().splitlines() + images
            continue

        print(f"\nExtracting features for {img_path}")
        img = Image.open(img_path).convert('RGB')
        W, H = img.size
        img = norm_RGB(img)[None]
        if iscuda: img = img.cuda()

        # extract keypoints/descriptors for a single image
        xys, desc, scores = extract_multiscale(net, img, detector,
            scale_f   = config['scale_f'],
            min_scale = config['min_scale'],
            max_scale = config['max_scale'],
            min_size  = config['min_size'],
            max_size  = config['max_size'],
            verbose = True)

        xys = xys.cpu().numpy()
        desc = desc.cpu().numpy()
        scores = scores.cpu().numpy()
        idxs = scores.argsort()[-config['top_k'] or None:]

        xys = xys[idxs]
        desc = desc[idxs]
        if keypoints_dtype is None or descriptors_dtype is None:
            keypoints_dtype = xys.dtype
            descriptors_dtype = desc.dtype

            keypoints_dsize = xys.shape[1]
            descriptors_dsize = desc.shape[1]

            kdata.keypoints = kapture.Keypoints('r2d2', keypoints_dtype, keypoints_dsize)
            kdata.descriptors = kapture.Descriptors('r2d2', descriptors_dtype, descriptors_dsize)

            keypoints_config_absolute_path = get_csv_fullpath(kapture.Keypoints, export_dir)
            descriptors_config_absolute_path = get_csv_fullpath(kapture.Descriptors, export_dir)

            keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints)
            descriptors_to_file(descriptors_config_absolute_path, kdata.descriptors)
        else:
            assert kdata.keypoints.type_name == 'r2d2'
            assert kdata.descriptors.type_name == 'r2d2'
            assert kdata.keypoints.dtype == xys.dtype
            assert kdata.descriptors.dtype == desc.dtype
            assert kdata.keypoints.dsize == xys.shape[1]
            assert kdata.descriptors.dsize == desc.shape[1]

        keypoints_fullpath = get_keypoints_fullpath(export_dir, image_name)
        print(f"Saving {xys.shape[0]} keypoints to {keypoints_fullpath}")
        image_keypoints_to_file(keypoints_fullpath, xys)
        kdata.keypoints.add(image_name)


        descriptors_fullpath = get_descriptors_fullpath(export_dir, image_name)
        print(f"Saving {desc.shape[0]} descriptors to {descriptors_fullpath}")
        image_descriptors_to_file(descriptors_fullpath, desc)
        kdata.descriptors.add(image_name)

    if not keypoints_check_dir(kdata.keypoints, export_dir) or \
            not descriptors_check_dir(kdata.descriptors, export_dir):
        print('local feature extraction ended successfully but not all files were saved')
예제 #10
0
def merge_remap(kapture_list: List[kapture.Kapture], skip_list: List[Type],
                data_paths: List[str], kapture_path: str,
                images_import_method: TransferAction) -> kapture.Kapture:
    """
    Merge multiple kapture while keeping ids (sensor_id) identical in merged and inputs.

    :param kapture_list: list of kapture to merge.
    :param skip_list: input optional types to not merge. sensors and rigs are unskippable
    :param data_paths: list of path to root path directory in same order as mentioned in kapture_list.
    :param kapture_path: directory root path to the merged kapture.
    :return:
    """
    merged_kapture = kapture.Kapture()

    # find new sensor ids / rig ids
    sensor_offset = 0
    rigs_offset = 0
    sensors_mapping = []
    rigs_mapping = []
    for every_kapture in kapture_list:
        if every_kapture.sensors is not None:
            sensors_mapping.append(
                get_sensors_mapping(every_kapture.sensors, sensor_offset))
            sensor_offset += len(every_kapture.sensors)
        else:
            sensors_mapping.append({})

        if every_kapture.rigs is not None:
            rigs_mapping.append(
                get_rigs_mapping(every_kapture.rigs, rigs_offset))
            rigs_offset += len(every_kapture.rigs)
        else:
            rigs_mapping.append({})

    # concatenate all sensors with the remapped ids
    new_sensors = merge_sensors(
        [a_kapture.sensors for a_kapture in kapture_list], sensors_mapping)
    if new_sensors:  # if merge_sensors returned an empty object, keep merged_kapture.sensors to None
        merged_kapture.sensors = new_sensors

    # concatenate all rigs with the remapped ids
    new_rigs = merge_rigs([a_kapture.rigs for a_kapture in kapture_list],
                          rigs_mapping, sensors_mapping)
    if new_rigs:  # if merge_rigs returned an empty object, keep merged_kapture.rigs to None
        merged_kapture.rigs = new_rigs

    # all fields below can be skipped with skip_list
    # we do not assign the properties when the merge evaluate to false, we keep it as None
    if kapture.Trajectories not in skip_list:
        new_trajectories = merge_trajectories(
            [a_kapture.trajectories for a_kapture in kapture_list],
            rigs_mapping, sensors_mapping)
        if new_trajectories:
            merged_kapture.trajectories = new_trajectories

    if kapture.RecordsCamera not in skip_list:
        new_records_camera = merge_records_camera(
            [a_kapture.records_camera for a_kapture in kapture_list],
            sensors_mapping)
        if new_records_camera:
            merged_kapture.records_camera = new_records_camera

        merge_records_data(
            [[
                image_name for _, _, image_name in kapture.flatten(
                    every_kapture.records_camera)
            ] if every_kapture.records_camera is not None else []
             for every_kapture in kapture_list], [
                 get_image_fullpath(data_path, image_filename=None)
                 for data_path in data_paths
             ], kapture_path, images_import_method)
    if kapture.RecordsLidar not in skip_list:
        new_records_lidar = merge_records_lidar(
            [a_kapture.records_lidar for a_kapture in kapture_list],
            sensors_mapping)
        if new_records_lidar:
            merged_kapture.records_lidar = new_records_lidar
    if kapture.RecordsWifi not in skip_list:
        new_records_wifi = merge_records_wifi(
            [a_kapture.records_wifi for a_kapture in kapture_list],
            sensors_mapping)
        if new_records_wifi:
            merged_kapture.records_wifi = new_records_wifi
    if kapture.RecordsGnss not in skip_list:
        new_records_gnss = merge_records_gnss(
            [a_kapture.records_gnss for a_kapture in kapture_list],
            sensors_mapping)
        if new_records_gnss:
            merged_kapture.records_gnss = new_records_gnss

    # for the reconstruction, except points and observations, the files are copied with shutil.copy
    # if kapture_path evaluates to False, all copies will be skipped (but classes will be filled normally)
    if kapture.Keypoints not in skip_list:
        keypoints = [a_kapture.keypoints for a_kapture in kapture_list]
        keypoints_not_none = [k for k in keypoints if k is not None]
        if len(keypoints_not_none) > 0:
            new_keypoints = merge_keypoints(keypoints, data_paths,
                                            kapture_path)
            if new_keypoints:
                merged_kapture.keypoints = new_keypoints
    if kapture.Descriptors not in skip_list:
        descriptors = [a_kapture.descriptors for a_kapture in kapture_list]
        descriptors_not_none = [k for k in descriptors if k is not None]
        if len(descriptors_not_none) > 0:
            new_descriptors = merge_descriptors(descriptors, data_paths,
                                                kapture_path)
            if new_descriptors:
                merged_kapture.descriptors = new_descriptors
    if kapture.GlobalFeatures not in skip_list:
        global_features = [
            a_kapture.global_features for a_kapture in kapture_list
        ]
        global_features_not_none = [
            k for k in global_features if k is not None
        ]
        if len(global_features_not_none) > 0:
            new_global_features = merge_global_features(
                global_features, data_paths, kapture_path)
            if new_global_features:
                merged_kapture.global_features = new_global_features
    if kapture.Matches not in skip_list:
        matches = [a_kapture.matches for a_kapture in kapture_list]
        new_matches = merge_matches(matches, data_paths, kapture_path)
        if new_matches:
            merged_kapture.matches = new_matches

    if kapture.Points3d not in skip_list and kapture.Observations not in skip_list:
        points_and_obs = [(a_kapture.points3d, a_kapture.observations)
                          for a_kapture in kapture_list]
        new_points, new_observations = merge_points3d_and_observations(
            points_and_obs)
        if new_points:
            merged_kapture.points3d = new_points
        if new_observations:
            merged_kapture.observations = new_observations
    elif kapture.Points3d not in skip_list:
        points = [a_kapture.points3d for a_kapture in kapture_list]
        new_points = merge_points3d(points)
        if new_points:
            merged_kapture.points3d = new_points
    return merged_kapture
예제 #11
0
def colmap_localize_sift(kapture_path: str,
                         colmap_path: str,
                         input_database_path: str,
                         input_reconstruction_path: str,
                         colmap_binary: str,
                         colmap_use_cpu: bool,
                         colmap_gpu_index: str,
                         vocab_tree_path: str,
                         image_registrator_options: List[str],
                         skip_list: List[str],
                         force: bool) -> None:
    """
    Localize images on a colmap model using default SIFT features with the kapture data.

    :param kapture_path: path to the kapture to use
    :param colmap_path: path to the colmap build
    :param input_database_path: path to the map colmap.db
    :param input_database_path: path to the map colmap.db
    :param input_reconstruction_path: path to the map reconstruction folder
    :param colmap_use_cpu: to use cpu only (and ignore gpu) or to use also gpu
    :param colmap_gpu_index: gpu index for sift extractor and mapper
    :param vocab_tree_path: path to the colmap vocabulary tree file
    :param image_registrator_options: options for the image registrator
    :param skip_list: list of steps to skip
    :param force: Silently overwrite kapture files if already exists.
    """
    os.makedirs(colmap_path, exist_ok=True)
    # Set fixed name for COLMAP database

    # Load input files first to make sure it is OK
    logger.info('loading kapture files...')
    kapture_data = kapture.io.csv.kapture_from_dir(kapture_path)

    if not (kapture_data.records_camera and kapture_data.sensors):
        raise ValueError('records_camera, sensors are mandatory')

    if kapture_data.trajectories:
        logger.warning("Input data contains trajectories: they will be ignored")
        kapture_data.trajectories.clear()
    else:
        kapture_data.trajectories = kapture.Trajectories()

    if not os.path.isfile(vocab_tree_path):
        raise ValueError(f'Vocabulary Tree file does not exist: {vocab_tree_path}')

    # COLMAP does not fully support rigs.
    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    # Prepare output
    # Set fixed name for COLMAP database
    colmap_db_path = path.join(colmap_path, 'colmap.db')
    image_list_path = path.join(colmap_path, 'images.list')
    reconstruction_path = path.join(colmap_path, "reconstruction")
    if 'delete_existing' not in skip_list:
        safe_remove_file(colmap_db_path, force)
        safe_remove_file(image_list_path, force)
        safe_remove_any_path(reconstruction_path, force)
    os.makedirs(reconstruction_path, exist_ok=True)

    # Copy colmap db to output
    if not os.path.exists(colmap_db_path):
        shutil.copy(input_database_path, colmap_db_path)

    # find correspondences between the colmap db and the kapture data
    images_all = {image_path: (ts, cam_id)
                  for ts, shot in kapture_data.records_camera.items()
                  for cam_id, image_path in shot.items()}

    colmap_db = COLMAPDatabase.connect(colmap_db_path)
    colmap_image_ids = database_extra.get_colmap_image_ids_from_db(colmap_db)
    colmap_cameras = database_extra.get_camera_ids_from_database(colmap_db)
    colmap_images = database_extra.get_images_from_database(colmap_db)
    colmap_db.close()

    # dict ( kapture_camera -> colmap_camera_id )
    colmap_camera_ids = {images_all[image_path][1]: colmap_cam_id
                         for image_path, colmap_cam_id in colmap_images if image_path in images_all}

    images_to_add = {image_path: value
                     for image_path, value in images_all.items()
                     if image_path not in colmap_image_ids}

    flatten_images_to_add = [(ts, kapture_cam_id, image_path)
                             for image_path, (ts, kapture_cam_id) in images_to_add.items()]

    if 'feature_extract' not in skip_list:
        logger.info("Step 1: Feature extraction using colmap")
        with open(image_list_path, 'w') as fid:
            for image in images_to_add.keys():
                fid.write(image + "\n")

        colmap_lib.run_feature_extractor(
            colmap_binary,
            colmap_use_cpu,
            colmap_gpu_index,
            colmap_db_path,
            get_image_fullpath(kapture_path),
            image_list_path
        )

    if 'matches' not in skip_list:
        logger.info("Step 2: Compute matches with colmap")
        colmap_lib.run_vocab_tree_matcher(
            colmap_binary,
            colmap_use_cpu,
            colmap_gpu_index,
            colmap_db_path,
            vocab_tree_path,
            image_list_path
        )

    if 'fix_db_cameras' not in skip_list:
        logger.info("Step 3: Replace colmap generated cameras with kapture cameras")
        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        database_extra.foreign_keys_off(colmap_db)

        # remove colmap generated cameras
        after_feature_extraction_colmap_cameras = database_extra.get_camera_ids_from_database(colmap_db)
        colmap_cameras_to_remove = [cam_id
                                    for cam_id in after_feature_extraction_colmap_cameras
                                    if cam_id not in colmap_cameras]
        for cam_id in colmap_cameras_to_remove:
            database_extra.remove_camera(colmap_db, cam_id)

        # put the correct cameras and image extrinsic back into the database
        cameras_to_add = kapture.Sensors()
        for image_path, (ts, kapture_cam_id) in images_to_add.items():
            if kapture_cam_id not in colmap_camera_ids:
                kapture_cam = kapture_data.sensors[kapture_cam_id]
                cameras_to_add[kapture_cam_id] = kapture_cam
        colmap_added_camera_ids = database_extra.add_cameras_to_database(cameras_to_add, colmap_db)
        colmap_camera_ids.update(colmap_added_camera_ids)

        database_extra.update_images_in_database_from_flatten(
            colmap_db,
            flatten_images_to_add,
            kapture_data.trajectories,
            colmap_camera_ids
        )

        database_extra.foreign_keys_on(colmap_db)
        colmap_db.commit()
        colmap_db.close()

    if 'image_registrator' not in skip_list:
        logger.info("Step 4: Run image_registrator")
        # run image_registrator
        colmap_lib.run_image_registrator(
            colmap_binary,
            colmap_db_path,
            input_reconstruction_path,
            reconstruction_path,
            image_registrator_options
        )

    # run model_converter
    if 'model_converter' not in skip_list:
        logger.info("Step 5: Export reconstruction results to txt")
        colmap_lib.run_model_converter(
            colmap_binary,
            reconstruction_path,
            reconstruction_path
        )
예제 #12
0
def colmap_build_sift_map(kapture_path: str,
                          colmap_path: str,
                          colmap_binary: str,
                          colmap_use_cpu: bool,
                          colmap_gpu_index: str,
                          vocab_tree_path: str,
                          point_triangulator_options: List[str],
                          skip_list: List[str],
                          force: bool) -> None:
    """
    Build a colmap model using default SIFT features with the kapture data.

    :param kapture_path: path to the kapture to use
    :param colmap_path: path to the colmap build
    :param colmap_binary: path to the colmap executable
    :param colmap_use_cpu: to use cpu only (and ignore gpu) or to use also gpu
    :param colmap_gpu_index: gpu index for sift extractor and mapper
    :param vocab_tree_path: path to the colmap vocabulary tree file
    :param point_triangulator_options: options for the point triangulator
    :param skip_list: list of steps to skip
    :param force: Silently overwrite kapture files if already exists.
    """
    os.makedirs(colmap_path, exist_ok=True)

    # Load input files first to make sure it is OK
    logger.info('loading kapture files...')
    kapture_data = kapture.io.csv.kapture_from_dir(kapture_path)

    if not (kapture_data.records_camera and kapture_data.sensors):
        raise ValueError('records_camera, sensors are mandatory')
    if not kapture_data.trajectories:
        logger.info('there are no trajectories, running mapper instead of point_triangulator')

    if not os.path.isfile(vocab_tree_path):
        raise ValueError(f'Vocabulary Tree file does not exist: {vocab_tree_path}')

    # COLMAP does not fully support rigs.
    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    # Set fixed name for COLMAP database
    colmap_db_path = path.join(colmap_path, 'colmap.db')
    image_list_path = path.join(colmap_path, 'images.list')
    reconstruction_path = path.join(colmap_path, "reconstruction")
    if 'delete_existing' not in skip_list:
        safe_remove_file(colmap_db_path, force)
        safe_remove_file(image_list_path, force)
        safe_remove_any_path(reconstruction_path, force)
    os.makedirs(reconstruction_path, exist_ok=True)

    if 'feature_extract' not in skip_list:
        logger.info("Step 1: Feature extraction using colmap")
        with open(image_list_path, 'w') as fid:
            for timestamp, sensor_id in sorted(kapture_data.records_camera.key_pairs()):
                fid.write(kapture_data.records_camera[timestamp][sensor_id] + "\n")

        colmap_lib.run_feature_extractor(
            colmap_binary,
            colmap_use_cpu,
            colmap_gpu_index,
            colmap_db_path,
            get_image_fullpath(kapture_path),
            image_list_path
        )

    # Update cameras in COLMAP:
    # - use only one camera for all images taken with the same camera (update all camera IDs)
    # - import camera intrinsics
    # - import camera pose
    if 'update_db_cameras' not in skip_list:
        logger.info("Step 2: Populate COLMAP DB with cameras and poses")
        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        database_extra.update_DB_cameras_and_poses(colmap_db, kapture_data)
        # close db before running colmap processes in order to avoid locks
        colmap_db.close()

    # Extract matches with COLMAP
    if 'matches' not in skip_list:
        logger.info("Step 3: Compute matches with colmap")

        colmap_lib.run_vocab_tree_matcher(
            colmap_binary,
            colmap_use_cpu,
            colmap_gpu_index,
            colmap_db_path,
            vocab_tree_path)

    if kapture_data.trajectories is not None:
        # Generate priors for reconstruction
        txt_path = path.join(colmap_path, "priors_for_reconstruction")
        os.makedirs(txt_path, exist_ok=True)
        if 'priors_for_reconstruction' not in skip_list:
            logger.info('Step 4: Exporting priors for reconstruction.')
            colmap_db = COLMAPDatabase.connect(colmap_db_path)
            database_extra.generate_priors_for_reconstruction(kapture_data, colmap_db, txt_path)
            colmap_db.close()

        # Point triangulator
        reconstruction_path = path.join(colmap_path, "reconstruction")
        os.makedirs(reconstruction_path, exist_ok=True)
        if 'triangulation' not in skip_list:
            logger.info("Step 5: Triangulation")
            colmap_lib.run_point_triangulator(
                colmap_binary,
                colmap_db_path,
                get_image_fullpath(kapture_path),
                txt_path,
                reconstruction_path,
                point_triangulator_options
            )
    else:
        # mapper
        reconstruction_path = path.join(colmap_path, "reconstruction")
        os.makedirs(reconstruction_path, exist_ok=True)
        if 'triangulation' not in skip_list:
            logger.info("Step 5: Triangulation")
            colmap_lib.run_mapper(
                colmap_binary,
                colmap_db_path,
                get_image_fullpath(kapture_path),
                None,
                reconstruction_path,
                point_triangulator_options
            )
            # use reconstruction 0 as main
            first_reconstruction = os.path.join(reconstruction_path, '0')
            files = os.listdir(first_reconstruction)
            for f in files:
                shutil.move(os.path.join(first_reconstruction, f), os.path.join(reconstruction_path, f))
            shutil.rmtree(first_reconstruction)

    # run model_converter
    if 'model_converter' not in skip_list:
        logger.info("Step 6: Export reconstruction results to txt")
        colmap_lib.run_model_converter(
            colmap_binary,
            reconstruction_path,
            reconstruction_path
        )
예제 #13
0
def kapture_to_openmvg(kapture_data: kapture.Kapture, kapture_path: str,
                       image_action: TransferAction, openmvg_path: str) -> Dict:
    """
    Convert the kapture data into an openMVG dataset stored as a dictionary.
    The format is defined here:
    https://openmvg.readthedocs.io/en/latest/software/SfM/SfM_OutputFormat/

    :param kapture_data: the kapture data
    :param kapture_path: top directory of the kapture data and the images
    :param image_action: action to apply on images: link, copy, move or do nothing.
    :param openmvg_path: top directory of the openmvg data and images
    :return: an SfM_data, the openmvg structure, stored as a dictionary ready to be serialized
    """

    assert kapture_data.cameras is not None
    assert kapture_data.records_camera is not None
    cameras = kapture_data.cameras
    # Check we don't have other sensors defined
    extra_sensor_number = len(kapture_data.sensors)-len(cameras)
    if extra_sensor_number > 0:
        logger.warning(f'We will ignore {extra_sensor_number} sensors that are not camera')
    records_camera = kapture_data.records_camera
    all_records_camera = list(kapture.flatten(records_camera))
    trajectories = kapture_data.trajectories

    # openmvg does not support rigs
    if kapture_data.rigs:
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    # Compute root path and camera used in records
    sub_root_path: str = ''
    image_dirs = {}
    used_cameras = {}
    for _, cam_id, name in all_records_camera:
        used_cameras[cam_id] = cam_id
        img_dir = path.dirname(name)
        image_dirs[img_dir] = img_dir
    if len(image_dirs) > 1:
        # Find if they share a top path
        image_dirs_list = list(image_dirs.keys())
        sub_root_path = path.commonpath(image_dirs_list)
    elif len(image_dirs) == 1:
        sub_root_path = next(iter(image_dirs.keys()))
    if image_action == TransferAction.skip:
        root_path = kapture_path
    else:  # We will create a new hierarchy of images
        root_path = openmvg_path
    root_path = os.path.abspath(path.join(root_path, sub_root_path))
    if image_action == TransferAction.root_link:
        if not sub_root_path:
            # We can not link directly to the top destination openmvg directory
            # We need an additional level
            root_path = path.join(root_path, 'images')
        kapture_records_path = get_image_fullpath(kapture_path, sub_root_path)
        # Do a unique images directory link
        # openmvg_root_path -> kapture/<records_dir>/openmvg_top_images_directory
        # beware that the paths are reverted in the symlink call
        os.symlink(kapture_records_path, root_path)

    sfm_data = {SFM_DATA_VERSION: SFM_DATA_VERSION_NUMBER,
                ROOT_PATH: root_path}

    polymorphic_id_current = 1
    ptr_wrapper_id_current = 1
    polymorphic_id_types = {}

    intrinsics, polymorphic_id_current, ptr_wrapper_id_current = _export_cameras(cameras, used_cameras,
                                                                                 polymorphic_id_types,
                                                                                 polymorphic_id_current,
                                                                                 ptr_wrapper_id_current)

    views, extrinsics = _export_images_and_poses(all_records_camera, cameras, trajectories,
                                                 image_action, kapture_path,
                                                 root_path, sub_root_path,
                                                 polymorphic_id_types, polymorphic_id_current, ptr_wrapper_id_current)

    sfm_data[VIEWS] = views
    sfm_data[INTRINSICS] = intrinsics
    sfm_data[EXTRINSICS] = extrinsics
    sfm_data[STRUCTURE] = []
    sfm_data[CONTROL_POINTS] = []

    return sfm_data
예제 #14
0
def import_silda(
    silda_dir_path: str,
    destination_kapture_dir_path: str,
    fallback_cam_model: str = 'FOV',
    do_split_cams: bool = False,
    corpus: Optional[str] = None,
    replace_pose_rig: bool = False,
    force_overwrite_existing: bool = False,
    images_import_strategy: TransferAction = TransferAction.link_absolute
) -> None:
    """
    Imports data from silda dataset.

    :param silda_dir_path: path to the silda top directory
    :param destination_kapture_dir_path: input path to kapture directory.
    :param fallback_cam_model: camera model to fallback when necessary
    :param do_split_cams: If true, re-organises and renames the image files to split apart cameras.
    :param corpus: the list of corpus to be imported, among 'mapping', 'query'.
    :param replace_pose_rig: if True, replaces poses of individual cameras with poses of the rig.
    :param force_overwrite_existing: if true, Silently overwrite kapture files if already exists.
    :param images_import_strategy: how to copy image files.
    """

    # sanity check
    silda_dir_path = path_secure(path.abspath(silda_dir_path))
    destination_kapture_dir_path = path_secure(
        path.abspath(destination_kapture_dir_path))
    if TransferAction.root_link == images_import_strategy and do_split_cams:
        raise ValueError(
            'impossible to only link images directory and applying split cam.')
    hide_progress_bars = logger.getEffectiveLevel() >= logging.INFO

    # prepare output directory
    kapture.io.structure.delete_existing_kapture_files(
        destination_kapture_dir_path, force_overwrite_existing)
    os.makedirs(destination_kapture_dir_path, exist_ok=True)

    # images ###########################################################################################################
    logger.info('Processing images ...')
    # silda-images
    #   ...
    #   ├── 1445_0.png
    #   ├── 1445_1.png
    #   ...
    silda_images_root_path = path.join(silda_dir_path, 'silda-images')
    # list all png files (its PNG in silda) using a generator.
    if corpus is not None:
        assert corpus in SILDA_CORPUS_SPLIT_FILENAMES
        # if corpus specified, filter by those which directory name match corpus.
        logger.debug(f'only importing {corpus} part.')
        corpus_file_path = path.join(silda_dir_path,
                                     SILDA_CORPUS_SPLIT_FILENAMES[corpus])
        with open(corpus_file_path, 'rt') as corpus_file:
            corpus_filenames = corpus_file.readlines()
            image_filenames_original = sorted(filename.strip()
                                              for filename in corpus_filenames)
    else:
        image_filenames_original = sorted(
            filename for dir_path, sd, fs in os.walk(silda_images_root_path)
            for filename in fs if filename.endswith('.png'))

    image_filenames_kapture = []
    snapshots = kapture.RecordsCamera()
    image_name_to_ids = {}  # '1445_0.png' -> (1445, 0)
    for image_filename_original in tqdm(image_filenames_original,
                                        disable=hide_progress_bars):
        # retrieve info from image filename
        name_parts_match = SILDA_IMAGE_NAME_PATTERN.match(
            image_filename_original)
        assert name_parts_match is not None
        shot_info: Dict[str, Any]
        shot_info = name_parts_match.groupdict()
        shot_info['timestamp'] = int(
            shot_info['timestamp']
        )  # To avoid warnings about type of the value
        # eg. file_info = {'filename': '1445_0.png', 'timestamp': 1445, 'cam_id': '0'}
        # create a path of the image into NLE dir
        if do_split_cams:
            # re-organise images with subfolders per corpus/camera/timestamp.png
            kapture_image_filename = path.join(
                shot_info['cam_id'],
                '{:04d}.png'.format(shot_info['timestamp']))
        else:
            # keep the original file hierarchy
            kapture_image_filename = image_filename_original

        image_filenames_kapture.append(kapture_image_filename)
        snapshots[shot_info['timestamp'],
                  shot_info['cam_id']] = kapture_image_filename
        image_name_to_ids[shot_info['filename']] = (shot_info['timestamp'],
                                                    shot_info['cam_id'])

    assert len(image_filenames_kapture) == len(image_filenames_original)
    # intrinsics #######################################################################################################
    cameras = _import_cameras(silda_dir_path, snapshots, fallback_cam_model)

    # extrinsics #######################################################################################################
    trajectories = _import_trajectories(silda_dir_path, image_name_to_ids,
                                        hide_progress_bars)

    # rigs
    rigs = _make_rigs(replace_pose_rig, trajectories)

    # pack it all together
    kapture_data = kapture.Kapture(sensors=cameras,
                                   records_camera=snapshots,
                                   trajectories=trajectories,
                                   rigs=rigs)

    logger.info('saving to Kapture  ...')
    kapture.io.csv.kapture_to_dir(destination_kapture_dir_path, kapture_data)

    # finally import images
    if images_import_strategy != TransferAction.skip:
        # importing image files
        logger.info(f'importing {len(image_filenames_original)} images ...')
        assert len(image_filenames_original) == len(image_filenames_kapture)
        image_file_paths_original = [
            path.join(silda_images_root_path, image_filename_kapture)
            for image_filename_kapture in image_filenames_original
        ]
        image_file_paths_kapture = [
            get_image_fullpath(destination_kapture_dir_path,
                               image_filename_kapture)
            for image_filename_kapture in image_filenames_kapture
        ]
        transfer_files_from_dir(image_file_paths_original,
                                image_file_paths_kapture,
                                images_import_strategy)
    logger.info('done.')
예제 #15
0
def openmvg_to_kapture(input_json: Dict[str, Union[str, Dict]],
                       kapture_images_path: str,
                       image_action=TransferAction.skip) -> kapture.Kapture:
    """
    Convert an openMVG structure to a kapture object. Also copy, move or link the images files if necessary.

    :param input_json: the openmvg JSON parsed as a dictionary
    :param kapture_images_path: top directory to create the kapture images tree
    :param image_action: action to apply on images: link, copy, move or do nothing.
    :return: the constructed kapture object
    """

    polymorphic_id_to_value = {}
    root_path: str = ''

    if input_json[ROOT_PATH]:
        root_path = input_json[ROOT_PATH]
    elif image_action == TransferAction.skip:
        logger.warning("No root_path in input file")
    else:  # It is needed to execute an action with the image file
        raise ValueError(
            f"Missing root_path to do image action '{image_action.name}'")
    openmvg_images_dir = path.basename(root_path)

    kapture_cameras = kapture.Sensors()
    if input_json.get(INTRINSICS):
        logger.info(f'Importing intrinsics')
        for sensor in input_json[INTRINSICS]:
            value = sensor[VALUE]
            if POLYMORPHIC_NAME in value:
                # new type name: store it for next instances
                polymorphic_id = value[POLYMORPHIC_ID] & GET_ID_MASK
                polymorphic_id_to_value[polymorphic_id] = value[
                    POLYMORPHIC_NAME]
                logger.debug("New camera_type: " +
                             polymorphic_id_to_value[polymorphic_id])
            else:
                if POLYMORPHIC_ID not in value:
                    raise ValueError(
                        f'{POLYMORPHIC_ID} is missing (intrinsics)')
                polymorphic_id = value[POLYMORPHIC_ID]

            if polymorphic_id not in polymorphic_id_to_value:
                raise ValueError(f'Unknown polymorphic_id {polymorphic_id}')

            camera_model = CameraModel(polymorphic_id_to_value[polymorphic_id])
            camera_data = value[PTR_WRAPPER][DATA]

            if camera_model == CameraModel.pinhole:
                # w, h, f, cx, cy
                camera = kapture.Camera(kapture.CameraType.SIMPLE_PINHOLE, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]),
                    camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1],
                ])
            elif camera_model == CameraModel.pinhole_radial_k1:
                # w, h, f, cx, cy, k
                camera = kapture.Camera(kapture.CameraType.SIMPLE_RADIAL, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]), camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1], camera_data[DISTO_K1][0]
                ])
            elif camera_model == CameraModel.pinhole_radial_k3:
                # w, h, f, cx, cy, k1, k2, k3
                camera = kapture.Camera(kapture.CameraType.RADIAL, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]), camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1], camera_data[DISTO_K3][0],
                    camera_data[DISTO_K3][1]
                ])
                # camera_data["disto_k3"][2] ignored: radial model has two distortion param, while openMVG's has three
            elif camera_model == CameraModel.pinhole_brown_t2:
                # w, h, f, cx, cy, k1, k2, k3, t1, t2
                if float(camera_data[DISTO_T2][2]) != 0:
                    # if k3 not null, use FULL_OPENCV, otherwise OPENCV
                    # w, h, fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
                    value0 = camera_data[VALUE0]
                    disto_t2 = camera_data[DISTO_T2]
                    camera = kapture.Camera(kapture.CameraType.FULL_OPENCV, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[FOCAL_LENGTH], value0[PRINCIPAL_POINT][0],
                        value0[PRINCIPAL_POINT][1], disto_t2[0], disto_t2[1],
                        disto_t2[3], disto_t2[4], disto_t2[2], 0, 0, 0
                    ])
                else:
                    # w, h, fx, fy, cx, cy, k1, k2, p1, p2
                    value0 = camera_data[VALUE0]
                    disto_t2 = camera_data[DISTO_T2]
                    camera = kapture.Camera(kapture.CameraType.OPENCV, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[FOCAL_LENGTH], value0[PRINCIPAL_POINT][0],
                        value0[PRINCIPAL_POINT][1], disto_t2[0], disto_t2[1],
                        disto_t2[3], disto_t2[4]
                    ])
            elif camera_model == CameraModel.fisheye:
                logger.warning(
                    "OpenMVG fisheye models are not compatible with OpenCV."
                    " Using SIMPLE_RADIAL_FISHEYE and forcing distortion to 0")
                # w, h, f, cx, cy, k
                value0 = camera_data[VALUE0]
                camera = kapture.Camera(
                    kapture.CameraType.SIMPLE_RADIAL_FISHEYE, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[PRINCIPAL_POINT][0], value0[PRINCIPAL_POINT][1],
                        0
                    ])
            else:
                raise ValueError(f'Camera model {camera_model} not supported')

            kapture_cameras[str(sensor[KEY])] = camera

    device_identifiers = {int: str}  # Pose id -> device id
    timestamp_for_pose = {int: int}  # Pose id -> timestamp
    records_camera = kapture.RecordsCamera()
    if input_json.get(VIEWS):
        views = input_json[VIEWS]
        if image_action == TransferAction.root_link:
            # Do a unique images directory link
            # kapture/<records_dir>/openmvg_top_images_directory -> openmvg_root_path
            kapture_records_path = get_image_fullpath(kapture_images_path)
            os.makedirs(kapture_records_path, exist_ok=True)
            os.symlink(root_path,
                       path.join(kapture_records_path, openmvg_images_dir))
        logger.info(f'Importing {len(views)} images')
        # Progress bar only in debug or info level
        if image_action != TransferAction.skip and image_action != TransferAction.root_link\
                and logger.getEffectiveLevel() <= logging.INFO:
            progress_bar = tqdm(total=len(views))
        else:
            progress_bar = None
        for view in views:
            input_data = view[VALUE][PTR_WRAPPER][DATA]
            pose_id = input_data[ID_POSE]
            # All two values should be the same (?)
            if input_data[ID_VIEW]:
                timestamp = input_data[ID_VIEW]
            else:
                timestamp = view[KEY]
            device_id = str(input_data[ID_INTRINSIC]
                            )  # device_id must be a string for kapture
            device_identifiers[pose_id] = device_id
            timestamp_for_pose[pose_id] = timestamp
            filename: str
            if input_data.get(LOCAL_PATH):
                filename = path.join(input_data[LOCAL_PATH],
                                     input_data[FILENAME])
            else:
                filename = input_data[FILENAME]
            if root_path:
                src_path = path.join(root_path, filename)
            else:
                src_path = filename

            # Add the common openmvg images directory in front of the filename
            kapture_filename = path.join(openmvg_images_dir, filename)
            if image_action != TransferAction.skip and image_action != TransferAction.root_link:
                dst_path = get_image_fullpath(kapture_images_path,
                                              kapture_filename)
                # Create destination directory if necessary
                dst_dir = path.dirname(dst_path)
                if not path.isdir(dst_dir):
                    os.makedirs(dst_dir, exist_ok=True)
                # Check if already exist
                if path.exists(dst_path):
                    os.unlink(dst_path)
                # Create file or link
                if image_action == TransferAction.copy:
                    shutil.copy2(src_path, dst_path)
                elif image_action == TransferAction.move:
                    shutil.move(src_path, dst_path)
                else:
                    # Individual link
                    if image_action == TransferAction.link_relative:
                        # Compute relative path
                        src_path = path.relpath(src_path, dst_dir)
                    os.symlink(src_path, dst_path)
                    # This might crash on Windows if the user executing this code has no admin privilege
                progress_bar and progress_bar.update(1)

            key = (timestamp, device_id)  # tuple of int,str
            records_camera[key] = path_secure(kapture_filename)
        progress_bar and progress_bar.close()

    trajectories = kapture.Trajectories()
    if input_json.get(EXTRINSICS):
        extrinsics = input_json[EXTRINSICS]
        logger.info(f'Importing {len(extrinsics)} extrinsics -> trajectories')
        for pose in extrinsics:
            pose_id = pose[KEY]
            center = pose[VALUE][CENTER]
            rotation = pose[VALUE][ROTATION]
            kap_translation = -1 * np.matmul(rotation, center)
            kap_pose = kapture.PoseTransform(
                quaternion.from_rotation_matrix(rotation), kap_translation)
            timestamp = timestamp_for_pose.get(pose_id)
            if timestamp is None:
                logger.warning(f'Missing timestamp for extrinsic {pose_id}')
                continue
            device_id = device_identifiers.get(pose_id)
            if device_id is None:
                logger.warning(f'Missing device for extrinsic {pose_id}')
                continue
            trajectories[(timestamp,
                          device_id)] = kap_pose  # tuple of int,str -> 6D pose

    kapture_data = kapture.Kapture(sensors=kapture_cameras,
                                   records_camera=records_camera,
                                   trajectories=trajectories)
    return kapture_data
예제 #16
0
def _export_images_and_poses(all_records_camera, cameras, trajectories,
                             image_action, kapture_path,
                             root_path, sub_root_path,
                             polymorphic_id_types, polymorphic_id_current, ptr_wrapper_id_current) -> Tuple[List, List]:
    views = []
    extrinsics = []
    global_timestamp = 0
    # process all images
    for timestamp, cam_id, kapture_name in all_records_camera:
        local_path = path.dirname(path.relpath(kapture_name, sub_root_path))
        filename = path.basename(kapture_name)
        if image_action != TransferAction.skip and image_action != TransferAction.root_link:
            # Process the image action
            src_path = get_image_fullpath(kapture_path, kapture_name)
            dst_path = path.join(root_path, local_path, filename)
            dst_dir = path.dirname(dst_path)
            if not path.isdir(dst_dir):
                os.makedirs(dst_dir, exist_ok=True)
            # Check if already exist
            if path.exists(dst_path):
                os.unlink(dst_path)
            # Create file or link
            if image_action == TransferAction.copy:
                shutil.copy2(src_path, dst_path)
            elif image_action == TransferAction.move:
                shutil.move(src_path, dst_path)
            else:
                # Link
                if image_action == TransferAction.link_relative:
                    # Compute relative path
                    src_path = path.relpath(src_path, dst_dir)
                os.symlink(src_path, dst_path)

        camera_params = cameras[cam_id].camera_params
        view_data = {LOCAL_PATH: local_path,
                     FILENAME: filename,
                     WIDTH: int(camera_params[0]),
                     HEIGHT: int(camera_params[1]),
                     ID_VIEW: global_timestamp,
                     ID_INTRINSIC: cam_id,
                     ID_POSE: global_timestamp}

        view = {}
        # retrieve image pose from trajectories
        if timestamp not in trajectories:
            view[POLYMORPHIC_ID] = VIEW_SPECIAL_POLYMORPHIC_ID
        else:
            # there is a pose for that timestamp
            # The poses are stored both as priors (in the 'views' table) and as known poses (in the 'extrinsics' table)
            assert cam_id in trajectories[timestamp]
            if VIEW_PRIORS not in polymorphic_id_types:
                # if this is the first time view_priors is encountered
                # set the first bit of polymorphic_id_current to 1
                view[POLYMORPHIC_ID] = polymorphic_id_current | NEW_ID_MASK
                view[POLYMORPHIC_NAME] = VIEW_PRIORS
                polymorphic_id_types[VIEW_PRIORS] = polymorphic_id_current
                polymorphic_id_current += 1
            else:
                view[POLYMORPHIC_ID] = polymorphic_id_types[VIEW_PRIORS]

            pose_tr = trajectories[timestamp].get(cam_id)
            prior_q = pose_tr.r
            prior_t = pose_tr.inverse().t_raw
            pose_data = {CENTER: prior_t,
                         ROTATION: quaternion.as_rotation_matrix(prior_q).tolist()}

            view_data[USE_POSE_CENTER_PRIOR] = True
            view_data[CENTER_WEIGHT] = [1.0, 1.0, 1.0]
            view_data[CENTER] = prior_t
            view_data[USE_POSE_ROTATION_PRIOR] = True
            view_data[ROTATION_WEIGHT] = 1.0
            view_data[ROTATION] = pose_data[ROTATION]
            extrinsics.append({KEY: global_timestamp, VALUE: pose_data})

        # it is assumed that this view is only encountered once
        # set the first bit of ptr_wrapper_id_current to 1
        view_wrapper = {ID: ptr_wrapper_id_current | NEW_ID_MASK,
                        DATA: view_data}
        ptr_wrapper_id_current += 1

        view[PTR_WRAPPER] = view_wrapper
        views.append({KEY: global_timestamp, VALUE: view})

        global_timestamp += 1

    return views, extrinsics
예제 #17
0
def kapture_to_openmvg(kapture_data: kapture.Kapture, kapture_path: str,
                       image_action: TransferAction, openmvg_path: str) -> Dict:
    """
    Convert the kapture data into an openMVG dataset stored as a dictionary.
    The format is defined here:
    https://openmvg.readthedocs.io/en/latest/software/SfM/SfM_OutputFormat/

    :param kapture_data: the kapture data
    :param kapture_path: top directory of the kapture data and the images
    :param image_action: action to apply on images: link, copy, move or do nothing.
    :param openmvg_path: top directory of the openmvg data and images
    :return: an SfM_data, the openmvg structure, stored as a dictionary ready to be serialized
    """

    assert kapture_data.cameras is not None
    assert kapture_data.records_camera is not None
    cameras = kapture_data.cameras
    # Check we don't have other sensors defined
    extra_sensor_number = len(kapture_data.sensors)-len(cameras)
    if extra_sensor_number > 0:
        logger.warning(f'We will ignore {extra_sensor_number} sensors that are not camera')
    records_camera = kapture_data.records_camera
    all_records_camera = list(kapture.flatten(records_camera))
    trajectories = kapture_data.trajectories

    # openmvg does not support rigs
    if kapture_data.rigs:
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    # Compute root path and camera used in records
    sub_root_path: str = ''
    image_dirs = {}
    used_cameras = {}
    for _, cam_id, name in all_records_camera:
        used_cameras[cam_id] = cam_id
        img_dir = path.dirname(name)
        image_dirs[img_dir] = img_dir
    if len(image_dirs) > 1:
        # Find if they share a top path
        image_dirs_list = list(image_dirs.keys())
        sub_root_path = path.commonpath(image_dirs_list)
    elif len(image_dirs) == 1:
        sub_root_path = next(iter(image_dirs.keys()))
    if image_action == TransferAction.skip:
        root_path = kapture_path
    else:  # We will create a new hierarchy of images
        root_path = openmvg_path
    root_path = os.path.abspath(path.join(root_path, sub_root_path))
    if image_action == TransferAction.root_link:
        if not sub_root_path:
            # We can not link directly to the top destination openmvg directory
            # We need an additional level
            root_path = path.join(root_path, 'images')
        kapture_records_path = get_image_fullpath(kapture_path, sub_root_path)
        # Do a unique images directory link
        # openmvg_root_path -> kapture/<records_dir>/openmvg_top_images_directory
        # beware that the paths are reverted in the symlink call
        os.symlink(kapture_records_path, root_path)
    sfm_data = {SFM_DATA_VERSION: SFM_DATA_VERSION_NUMBER,
                ROOT_PATH: root_path}

    views = []
    intrinsics = []
    extrinsics = []
    polymorphic_id_current = 1
    ptr_wrapper_id_current = 1
    polymorphic_id_types = {}

    # process all cameras
    for cam_id, camera in cameras.items():
        # Ignore not used cameras
        if not used_cameras.get(cam_id):
            logger.warning(f'Skipping camera definition {cam_id} {camera.name} without recorded images.')
            continue
        cam_type = camera.camera_type
        camera_params = camera.camera_params
        if cam_type == kapture.CameraType.SIMPLE_PINHOLE:
            # w, h, f, cx, cy
            model_used = CameraModel.pinhole
            data = _get_intrinsic_pinhole(camera_params)
        elif cam_type == kapture.CameraType.PINHOLE:
            # w, h, f, cx, cy
            model_used = CameraModel.pinhole
            faked_params = [camera_params[0], camera_params[1],  # width height
                            (camera_params[2] + camera_params[3])/2,  # fx+fy/2 as f
                            camera_params[4], camera_params[5]]  # cx cy
            data = _get_intrinsic_pinhole(faked_params)
        elif cam_type == kapture.CameraType.SIMPLE_RADIAL:
            # w, h, f, cx, cy, k
            model_used = CameraModel.pinhole_radial_k1
            data = _get_intrinsic_pinhole_radial_k1(camera_params)
        elif cam_type == kapture.CameraType.RADIAL:
            # w, h, f, cx, cy, k1, k2, k3
            model_used = CameraModel.pinhole_radial_k3
            faked_params = [camera_params[0], camera_params[1],  # width height
                            camera_params[2],  # f
                            camera_params[3], camera_params[4],  # cx cy
                            camera_params[5], camera_params[6], 0  # k1, k2, k3
                            ]
            data = _get_intrinsic_pinhole_radial_k3(faked_params)
        elif cam_type == kapture.CameraType.FULL_OPENCV or cam_type == kapture.CameraType.OPENCV:
            # w, h, f, cx, cy, k1, k2, k3, t1, t2
            model_used = CameraModel.pinhole_brown_t2
            k3 = camera_params[10] if len(camera_params) > 10 else 0
            faked_params = [camera_params[0], camera_params[1],  # width height
                            (camera_params[2] + camera_params[3])/2,  # fx+fy/2 as f
                            camera_params[4], camera_params[5],  # cx cy
                            camera_params[6], camera_params[7], k3,  # k1, k2, k3
                            camera_params[8], camera_params[9]  # p1, p2 (=t1, t2)
                            ]
            data = _get_intrinsic_pinhole_brown_t2(faked_params)
        elif cam_type == kapture.CameraType.OPENCV_FISHEYE:
            logger.warning('OpenCV fisheye model is not compatible with OpenMVG. Forcing distortion to 0')
            # w, h, f, cx, cy, k1, k2, k3, k4
            model_used = CameraModel.fisheye
            faked_params = [camera_params[0], camera_params[1],  # width height
                            (camera_params[2] + camera_params[3]) / 2,  # fx+fy/2 as f
                            camera_params[4], camera_params[5],  # cx cy
                            0, 0,  # k1, k2
                            0, 0  # k3, k4
                            ]
            data = _get_intrinsic_fisheye(faked_params)
        elif cam_type == kapture.CameraType.RADIAL_FISHEYE or cam_type == kapture.CameraType.SIMPLE_RADIAL_FISHEYE:
            logger.warning('OpenCV fisheye model is not compatible with OpenMVG. Forcing distortion to 0')
            # w, h, f, cx, cy, k1, k2, k3, k4
            model_used = CameraModel.fisheye
            faked_params = [camera_params[0], camera_params[1],  # width height
                            camera_params[2],  # f
                            camera_params[3], camera_params[4],  # cx cy
                            0, 0,  # k1, k2
                            0, 0  # k3, k4
                            ]
            data = _get_intrinsic_fisheye(faked_params)
        elif cam_type == kapture.CameraType.UNKNOWN_CAMERA:
            logger.info(f'Camera {cam_id}: Unknown camera model, using simple radial')
            # Choose simple radial model, to allow openMVG to determine distortion param
            # w, h, f, cx, cy, k
            model_used = CameraModel.pinhole_radial_k1
            faked_params = [camera_params[0], camera_params[1],  # width height
                            max(camera_params[0], camera_params[1])*DEFAULT_FOCAL_LENGTH_FACTOR,  # max(w,h)*1.2 as f
                            int(camera_params[0]/2), int(camera_params[1]/2),  # cx cy
                            0.0]  # k1
            data = _get_intrinsic_pinhole_radial_k1(faked_params)
        else:
            raise ValueError(f'Camera model {cam_type.value} not supported')

        intrinsic = {}
        if model_used not in polymorphic_id_types:
            # if this is the first time model_used is encountered
            # set the first bit of polymorphic_id_current to 1
            intrinsic[POLYMORPHIC_ID] = polymorphic_id_current | NEW_ID_MASK
            intrinsic[POLYMORPHIC_NAME] = model_used.name
            polymorphic_id_types[model_used] = polymorphic_id_current
            polymorphic_id_current += 1
        else:
            intrinsic[POLYMORPHIC_ID] = polymorphic_id_types[model_used]

        # it is assumed that this camera is only encountered once
        # set the first bit of ptr_wrapper_id_current to 1
        data_wrapper = {ID: ptr_wrapper_id_current | NEW_ID_MASK,
                        DATA: data}
        ptr_wrapper_id_current += 1

        intrinsic[PTR_WRAPPER] = data_wrapper
        intrinsics.append({KEY: cam_id, VALUE: intrinsic})

    global_timestamp = 0

    # process all images
    for timestamp, cam_id, kapture_name in all_records_camera:
        local_path = path.dirname(path.relpath(kapture_name, sub_root_path))
        filename = path.basename(kapture_name)
        if image_action != TransferAction.skip and image_action != TransferAction.root_link:
            # Process the image action
            src_path = get_image_fullpath(kapture_path, kapture_name)
            dst_path = path.join(root_path, local_path, filename)
            dst_dir = path.dirname(dst_path)
            if not path.isdir(dst_dir):
                os.makedirs(dst_dir, exist_ok=True)
            # Check if already exist
            if path.exists(dst_path):
                os.unlink(dst_path)
            # Create file or link
            if image_action == TransferAction.copy:
                shutil.copy2(src_path, dst_path)
            elif image_action == TransferAction.move:
                shutil.move(src_path, dst_path)
            else:
                # Link
                if image_action == TransferAction.link_relative:
                    # Compute relative path
                    src_path = path.relpath(src_path, dst_dir)
                os.symlink(src_path, dst_path)

        camera_params = cameras[cam_id].camera_params
        view_data = {LOCAL_PATH: local_path,
                     FILENAME: filename,
                     WIDTH: int(camera_params[0]),
                     HEIGHT: int(camera_params[1]),
                     ID_VIEW: global_timestamp,
                     ID_INTRINSIC: cam_id,
                     ID_POSE: global_timestamp}

        view = {}
        # retrieve image pose from trajectories
        if timestamp not in trajectories:
            view[POLYMORPHIC_ID] = VIEW_SPECIAL_POLYMORPHIC_ID
        else:
            # there is a pose for that timestamp
            # The poses are stored both as priors (in the 'views' table) and as known poses (in the 'extrinsics' table)
            assert cam_id in trajectories[timestamp]
            if VIEW_PRIORS not in polymorphic_id_types:
                # if this is the first time view_priors is encountered
                # set the first bit of polymorphic_id_current to 1
                view[POLYMORPHIC_ID] = polymorphic_id_current | NEW_ID_MASK
                view[POLYMORPHIC_NAME] = VIEW_PRIORS
                polymorphic_id_types[VIEW_PRIORS] = polymorphic_id_current
                polymorphic_id_current += 1
            else:
                view[POLYMORPHIC_ID] = polymorphic_id_types[VIEW_PRIORS]

            pose_tr = trajectories[timestamp].get(cam_id)
            prior_q = pose_tr.r
            prior_t = pose_tr.inverse().t_raw
            pose_data = {CENTER: prior_t,
                         ROTATION: quaternion.as_rotation_matrix(prior_q).tolist()}

            view_data[USE_POSE_CENTER_PRIOR] = True
            view_data[CENTER_WEIGHT] = [1.0, 1.0, 1.0]
            view_data[CENTER] = prior_t
            view_data[USE_POSE_ROTATION_PRIOR] = True
            view_data[ROTATION_WEIGHT] = 1.0
            view_data[ROTATION] = pose_data[ROTATION]
            extrinsics.append({KEY: global_timestamp, VALUE: pose_data})

        # it is assumed that this view is only encountered once
        # set the first bit of ptr_wrapper_id_current to 1
        view_wrapper = {ID: ptr_wrapper_id_current | NEW_ID_MASK,
                        DATA: view_data}
        ptr_wrapper_id_current += 1

        view[PTR_WRAPPER] = view_wrapper
        views.append({KEY: global_timestamp, VALUE: view})

        global_timestamp += 1

    sfm_data[VIEWS] = views
    sfm_data[INTRINSICS] = intrinsics
    sfm_data[EXTRINSICS] = extrinsics
    sfm_data[STRUCTURE] = []
    sfm_data[CONTROL_POINTS] = []

    return sfm_data
예제 #18
0
def extract_kapture_global_features(kapture_root_path: str,
                                    net,
                                    global_features_type: str,
                                    trfs,
                                    pooling='mean',
                                    gemp=3,
                                    whiten=None,
                                    threads=8,
                                    batch_size=16):
    """ Extract features from trained model (network) on a given dataset.
    """
    print(f'loading {kapture_root_path}')
    with get_all_tar_handlers(kapture_root_path,
                              mode={
                                  kapture.Keypoints: 'r',
                                  kapture.Descriptors: 'r',
                                  kapture.GlobalFeatures: 'a',
                                  kapture.Matches: 'r'
                              }) as tar_handlers:
        kdata = kapture_from_dir(kapture_root_path,
                                 None,
                                 skip_list=[
                                     kapture.Keypoints, kapture.Descriptors,
                                     kapture.Matches, kapture.Points3d,
                                     kapture.Observations
                                 ],
                                 tar_handlers=tar_handlers)
        root = get_image_fullpath(kapture_root_path, image_filename=None)
        assert kdata.records_camera is not None
        imgs = [
            image_name
            for _, _, image_name in kapture.flatten(kdata.records_camera)
        ]
        if kdata.global_features is None:
            kdata.global_features = {}

        if global_features_type in kdata.global_features:
            imgs = [
                image_name for image_name in imgs if image_name not in
                kdata.global_features[global_features_type]
            ]
        if len(imgs) == 0:
            print('All global features are already extracted')
            return

        dataset = ImageList(img_list_path=None, root=root, imgs=imgs)

        print(f'\nEvaluation on {dataset}')
        # extract DB feats
        bdescs = []
        trfs_list = [trfs] if isinstance(trfs, str) else trfs

        for trfs in trfs_list:
            kw = dict(iscuda=net.iscuda,
                      threads=threads,
                      batch_size=batch_size,
                      same_size='Pad' in trfs or 'Crop' in trfs)
            bdescs.append(
                extract_image_features(dataset, trfs, net, desc="DB", **kw))

        # pool from multiple transforms (scales)
        bdescs = tonumpy(F.normalize(pool(bdescs, pooling, gemp), p=2, dim=1))

        if whiten is not None:
            bdescs = common.whiten_features(bdescs, net.pca, **whiten)

        print('writing extracted global features')
        os.umask(0o002)
        gfeat_dtype = bdescs.dtype
        gfeat_dsize = bdescs.shape[1]
        if global_features_type not in kdata.global_features:
            kdata.global_features[
                global_features_type] = kapture.GlobalFeatures(
                    'dirtorch', gfeat_dtype, gfeat_dsize, 'L2')
            global_features_config_absolute_path = get_feature_csv_fullpath(
                kapture.GlobalFeatures, global_features_type,
                kapture_root_path)
            global_features_to_file(
                global_features_config_absolute_path,
                kdata.global_features[global_features_type])
        else:
            assert kdata.global_features[
                global_features_type].dtype == gfeat_dtype
            assert kdata.global_features[
                global_features_type].dsize == gfeat_dsize
            assert kdata.global_features[
                global_features_type].metric_type == 'L2'
        for i in tqdm.tqdm(range(dataset.nimg)):
            image_name = dataset.get_key(i)
            global_feature_fullpath = get_global_features_fullpath(
                global_features_type, kapture_root_path, image_name,
                tar_handlers)
            gfeat_i = bdescs[i, :]
            assert gfeat_i.shape == (gfeat_dsize, )
            image_global_features_to_file(global_feature_fullpath, gfeat_i)
            kdata.global_features[global_features_type].add(image_name)
            del gfeat_i

        del bdescs

        if not global_features_check_dir(
                kdata.global_features[global_features_type],
                global_features_type, kapture_root_path, tar_handlers):
            print(
                'global feature extraction ended successfully but not all files were saved'
            )
        else:
            print('Features extracted.')
예제 #19
0
def merge_keep_ids(
        kapture_list: List[
            kapture.
            Kapture],  # noqa: C901: function a bit long but not too complex
        skip_list: List[Type],
        data_paths: List[str],
        kapture_path: str,
        images_import_method: TransferAction) -> kapture.Kapture:
    """
    Merge multiple kapture while keeping ids (sensor_id) identical in merged and inputs.

    :param kapture_list: list of kapture to merge.
    :param skip_list: optional types not to merge. sensors and rigs are unskippable
    :param data_paths: list of path to root path directory in same order as mentioned in kapture_list.
    :param kapture_path: directory root path to the merged kapture.
    :param images_import_method: method to transfer image files
    :return: merged kapture
    """
    merged_kapture = kapture.Kapture()

    # get the union of all sensors
    new_sensors = merge_sensors(
        [every_kapture.sensors for every_kapture in kapture_list])
    # if merge_sensors returned an empty object, keep merged_kapture.sensors to None
    merged_kapture.sensors = get_new_if_not_empty(new_sensors,
                                                  merged_kapture.sensors)

    # get the union of all rigs
    new_rigs = merge_rigs(
        [every_kapture.rigs for every_kapture in kapture_list])
    # if merge_rigs returned an empty object, keep merged_kapture.rigs to None
    merged_kapture.rigs = get_new_if_not_empty(new_rigs, merged_kapture.rigs)

    # all fields below can be skipped with skip_list
    # we do not assign the properties when the merge evaluate to false, we keep it as None
    if kapture.Trajectories not in skip_list:
        new_trajectories = merge_trajectories(
            [every_kapture.trajectories for every_kapture in kapture_list])
        merged_kapture.trajectories = get_new_if_not_empty(
            new_trajectories, merged_kapture.trajectories)

    if kapture.RecordsCamera not in skip_list:
        new_records_camera = merge_records_camera(
            [every_kapture.records_camera for every_kapture in kapture_list])
        merged_kapture.records_camera = get_new_if_not_empty(
            new_records_camera, merged_kapture.records_camera)

        merge_records_data(
            [[
                image_name for _, _, image_name in kapture.flatten(
                    every_kapture.records_camera)
            ] if every_kapture.records_camera is not None else []
             for every_kapture in kapture_list], [
                 get_image_fullpath(data_path, image_filename=None)
                 for data_path in data_paths
             ], kapture_path, images_import_method)

    if kapture.RecordsLidar not in skip_list:
        new_records_lidar = merge_records_lidar(
            [every_kapture.records_lidar for every_kapture in kapture_list])
        merged_kapture.records_lidar = get_new_if_not_empty(
            new_records_lidar, merged_kapture.records_lidar)
    if kapture.RecordsWifi not in skip_list:
        new_records_wifi = merge_records_wifi(
            [every_kapture.records_wifi for every_kapture in kapture_list])
        merged_kapture.records_wifi = get_new_if_not_empty(
            new_records_wifi, merged_kapture.records_wifi)
    if kapture.RecordsBluetooth not in skip_list:
        new_records_bluetooth = merge_records_bluetooth([
            every_kapture.records_bluetooth for every_kapture in kapture_list
        ])
        merged_kapture.records_bluetooth = get_new_if_not_empty(
            new_records_bluetooth, merged_kapture.records_bluetooth)
    if kapture.RecordsGnss not in skip_list:
        new_records_gnss = merge_records_gnss(
            [every_kapture.records_gnss for every_kapture in kapture_list])
        merged_kapture.records_gnss = get_new_if_not_empty(
            new_records_gnss, merged_kapture.records_gnss)
    if kapture.RecordsAccelerometer not in skip_list:
        new_records_accelerometer = merge_records_accelerometer([
            every_kapture.records_accelerometer
            for every_kapture in kapture_list
        ])
        merged_kapture.records_accelerometer = get_new_if_not_empty(
            new_records_accelerometer, merged_kapture.records_accelerometer)
    if kapture.RecordsGyroscope not in skip_list:
        new_records_gyroscope = merge_records_gyroscope([
            every_kapture.records_gyroscope for every_kapture in kapture_list
        ])
        merged_kapture.records_gyroscope = get_new_if_not_empty(
            new_records_gyroscope, merged_kapture.records_gyroscope)
    if kapture.RecordsMagnetic not in skip_list:
        new_records_magnetic = merge_records_magnetic(
            [every_kapture.records_magnetic for every_kapture in kapture_list])
        merged_kapture.records_magnetic = get_new_if_not_empty(
            new_records_magnetic, merged_kapture.records_magnetic)

    # for the reconstruction, except points and observations, the files are copied with shutil.copy
    # if kapture_path evaluates to False, all copies will be skipped (but classes will be filled normally)
    if kapture.Keypoints not in skip_list:
        keypoints = [every_kapture.keypoints for every_kapture in kapture_list]
        keypoints_not_none = [k for k in keypoints if k is not None]
        if len(keypoints_not_none) > 0:
            new_keypoints = merge_keypoints(keypoints, data_paths,
                                            kapture_path)
            merged_kapture.keypoints = get_new_if_not_empty(
                new_keypoints, merged_kapture.keypoints)
    if kapture.Descriptors not in skip_list:
        descriptors = [
            every_kapture.descriptors for every_kapture in kapture_list
        ]
        descriptors_not_none = [k for k in descriptors if k is not None]
        if len(descriptors_not_none) > 0:
            new_descriptors = merge_descriptors(descriptors, data_paths,
                                                kapture_path)
            merged_kapture.descriptors = get_new_if_not_empty(
                new_descriptors, merged_kapture.descriptors)
    if kapture.GlobalFeatures not in skip_list:
        global_features = [
            every_kapture.global_features for every_kapture in kapture_list
        ]
        global_features_not_none = [
            k for k in global_features if k is not None
        ]
        if len(global_features_not_none) > 0:
            new_global_features = merge_global_features(
                global_features, data_paths, kapture_path)
            merged_kapture.global_features = get_new_if_not_empty(
                new_global_features, merged_kapture.global_features)
    if kapture.Matches not in skip_list:
        matches = [every_kapture.matches for every_kapture in kapture_list]
        new_matches = merge_matches(matches, data_paths, kapture_path)
        merged_kapture.matches = get_new_if_not_empty(new_matches,
                                                      merged_kapture.matches)

    if kapture.Points3d not in skip_list and kapture.Observations not in skip_list:
        points_and_obs = [(every_kapture.points3d, every_kapture.observations)
                          for every_kapture in kapture_list]
        new_points, new_observations = merge_points3d_and_observations(
            points_and_obs)
        merged_kapture.points3d = get_new_if_not_empty(new_points,
                                                       merged_kapture.points3d)
        merged_kapture.observations = get_new_if_not_empty(
            new_observations, merged_kapture.observations)
    elif kapture.Points3d not in skip_list:
        points = [every_kapture.points3d for every_kapture in kapture_list]
        new_points = merge_points3d(points)
        merged_kapture.points3d = get_new_if_not_empty(new_points,
                                                       merged_kapture.points3d)
    return merged_kapture
예제 #20
0
def extract_kapture_keypoints(args):
    """
    Extract r2d2 keypoints and descritors to the kapture format directly 
    """
    print('extract_kapture_keypoints...')
    kdata = kapture_from_dir(args.kapture_root,
                             matches_pairsfile_path=None,
                             skip_list=[
                                 kapture.GlobalFeatures, kapture.Matches,
                                 kapture.Points3d, kapture.Observations
                             ])

    assert kdata.records_camera is not None
    image_list = [
        filename for _, _, filename in kapture.flatten(kdata.records_camera)
    ]
    if kdata.keypoints is not None and kdata.descriptors is not None:
        image_list = [
            name for name in image_list
            if name not in kdata.keypoints or name not in kdata.descriptors
        ]

    if len(image_list) == 0:
        print('All features were already extracted')
        return
    else:
        print(f'Extracting r2d2 features for {len(image_list)} images')

    iscuda = common.torch_set_gpu(args.gpu)

    # load the network...
    net = load_network(args.model)
    if iscuda: net = net.cuda()

    # create the non-maxima detector
    detector = NonMaxSuppression(rel_thr=args.reliability_thr,
                                 rep_thr=args.repeatability_thr)

    keypoints_dtype = None if kdata.keypoints is None else kdata.keypoints.dtype
    descriptors_dtype = None if kdata.descriptors is None else kdata.descriptors.dtype

    keypoints_dsize = None if kdata.keypoints is None else kdata.keypoints.dsize
    descriptors_dsize = None if kdata.descriptors is None else kdata.descriptors.dsize

    for image_name in image_list:
        img_path = get_image_fullpath(args.kapture_root, image_name)

        print(f"\nExtracting features for {img_path}")
        img = Image.open(img_path).convert('RGB')
        W, H = img.size
        img = norm_RGB(img)[None]
        if iscuda: img = img.cuda()

        # extract keypoints/descriptors for a single image
        xys, desc, scores = extract_multiscale(net,
                                               img,
                                               detector,
                                               scale_f=args.scale_f,
                                               min_scale=args.min_scale,
                                               max_scale=args.max_scale,
                                               min_size=args.min_size,
                                               max_size=args.max_size,
                                               verbose=True)

        xys = xys.cpu().numpy()
        desc = desc.cpu().numpy()
        scores = scores.cpu().numpy()
        idxs = scores.argsort()[-args.top_k or None:]

        xys = xys[idxs]
        desc = desc[idxs]
        if keypoints_dtype is None or descriptors_dtype is None:
            keypoints_dtype = xys.dtype
            descriptors_dtype = desc.dtype

            keypoints_dsize = xys.shape[1]
            descriptors_dsize = desc.shape[1]

            kdata.keypoints = kapture.Keypoints('r2d2', keypoints_dtype,
                                                keypoints_dsize)
            kdata.descriptors = kapture.Descriptors('r2d2', descriptors_dtype,
                                                    descriptors_dsize)

            keypoints_config_absolute_path = get_csv_fullpath(
                kapture.Keypoints, args.kapture_root)
            descriptors_config_absolute_path = get_csv_fullpath(
                kapture.Descriptors, args.kapture_root)

            keypoints_to_file(keypoints_config_absolute_path, kdata.keypoints)
            descriptors_to_file(descriptors_config_absolute_path,
                                kdata.descriptors)
        else:
            assert kdata.keypoints.type_name == 'r2d2'
            assert kdata.descriptors.type_name == 'r2d2'
            assert kdata.keypoints.dtype == xys.dtype
            assert kdata.descriptors.dtype == desc.dtype
            assert kdata.keypoints.dsize == xys.shape[1]
            assert kdata.descriptors.dsize == desc.shape[1]

        keypoints_fullpath = get_keypoints_fullpath(args.kapture_root,
                                                    image_name)
        print(f"Saving {xys.shape[0]} keypoints to {keypoints_fullpath}")
        image_keypoints_to_file(keypoints_fullpath, xys)
        kdata.keypoints.add(image_name)

        descriptors_fullpath = get_descriptors_fullpath(
            args.kapture_root, image_name)
        print(f"Saving {desc.shape[0]} descriptors to {descriptors_fullpath}")
        image_descriptors_to_file(descriptors_fullpath, desc)
        kdata.descriptors.add(image_name)

    if not keypoints_check_dir(kdata.keypoints, args.kapture_root) or \
            not descriptors_check_dir(kdata.descriptors, args.kapture_root):
        print(
            'local feature extraction ended successfully but not all files were saved'
        )
def create_3D_model_from_depth_from_loaded_data(
        kdata: kapture.Kapture, input_path: str, tar_handlers: TarCollection,
        output_path: str, keypoints_type: Optional[str], depth_sensor_id: str,
        topk: int, method: Method, cellsizes: List[str], force: bool):
    """
    Create 3D model from a kapture dataset that has registered depth data
    Assumes the kapture data is already loaded
    """
    logger.info(f'create 3D model using depth data')

    if os.path.exists(output_path) and not force:
        print(f'outpath already exists, use --force to overwrite')
        return -1

    if kdata.rigs is not None:
        assert kdata.trajectories is not None
        kapture.rigs_remove_inplace(kdata.trajectories, kdata.rigs)

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(kdata.keypoints)
    assert keypoints_type is not None
    assert kdata.keypoints is not None
    assert keypoints_type in kdata.keypoints

    if method == Method.voxelgrid:
        vg = VoxelGrid(cellsizes)

    # add all 3D points to map that correspond to a keypoint
    logger.info('adding points from scan to kapture')
    points3d = []
    observations = kapture.Observations()

    progress_bar = tqdm(total=len(
        list(kapture.flatten(kdata.records_camera, is_sorted=True))),
                        disable=logger.level >= logging.CRITICAL)
    for timestamp, sensor_id, sensing_filepath in kapture.flatten(
            kdata.records_camera, is_sorted=True):
        logger.info(
            f'total 3d points: {len(points3d)}, processing {sensing_filepath}')
        # check if images have a pose
        if timestamp not in kdata.trajectories:
            logger.info('{} does not have a pose. skipping ...'.format(
                sensing_filepath))
            continue

        # check if depth map exists
        depth_map_record = ''
        if timestamp in kdata.records_depth:
            if depth_sensor_id is None:
                depth_id = sensor_id + '_depth'
            else:
                depth_id = depth_sensor_id
            if depth_id in kdata.records_depth[timestamp]:
                depth_map_record = kdata.records_depth[timestamp][depth_id]
        depth_map_size = tuple(
            [int(x) for x in kdata.sensors[depth_id].camera_params[0:2]])
        depth_path = get_depth_map_fullpath(input_path, depth_map_record)
        if not os.path.exists(depth_path):
            logger.info('no 3D data found for {}. skipping ...'.format(
                sensing_filepath))
            continue
        depth_map = depth_map_from_file(depth_path, depth_map_size)
        img = Image.open(get_image_fullpath(input_path,
                                            sensing_filepath)).convert('RGB')

        assert img.size[0] == depth_map_size[0]
        assert img.size[1] == depth_map_size[1]

        kps_raw = load_keypoints(keypoints_type, input_path, sensing_filepath,
                                 kdata.keypoints[keypoints_type].dtype,
                                 kdata.keypoints[keypoints_type].dsize,
                                 tar_handlers)

        _, camera_sensor_C, camera_dist = get_camera_matrix_from_kapture(
            np.zeros((1, 0, 2), dtype=np.float64), kdata.sensors[sensor_id])
        cv2_keypoints, depth_sensor_C, depth_dist = get_camera_matrix_from_kapture(
            kps_raw, kdata.sensors[depth_id])
        assert np.isclose(depth_sensor_C, camera_sensor_C).all()
        assert np.isclose(depth_dist, camera_dist).all()

        if np.count_nonzero(camera_dist) > 0:
            epsilon = np.finfo(np.float64).eps
            stop_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                             cv2.TERM_CRITERIA_EPS, 500, epsilon)
            undistorted_cv2_keypoints = cv2.undistortPointsIter(
                cv2_keypoints,
                camera_sensor_C,
                camera_dist,
                R=None,
                P=camera_sensor_C,
                criteria=stop_criteria)
        else:
            undistorted_cv2_keypoints = cv2_keypoints

        cv2_keypoints = cv2_keypoints.reshape((kps_raw.shape[0], 2))
        undistorted_cv2_keypoints = undistorted_cv2_keypoints.reshape(
            (kps_raw.shape[0], 2))

        points3d_img = []
        rgb_img = []
        kp_idxs = []
        for idx_kp, kp in enumerate(cv2_keypoints[0:topk]):
            u = round(kp[0])
            v = round(kp[1])

            undist_kp = undistorted_cv2_keypoints[idx_kp]
            undist_u = round(undist_kp[0])
            undist_v = round(undist_kp[1])

            if u >= 0 and u < depth_map_size[
                    0] and v >= 0 and v < depth_map_size[1]:
                if depth_map[v, u] == 0:
                    continue
                pt3d = project_kp_to_3D(undist_u, undist_v, depth_map[v, u],
                                        depth_sensor_C[0,
                                                       2], depth_sensor_C[1,
                                                                          2],
                                        depth_sensor_C[0,
                                                       0], depth_sensor_C[1,
                                                                          1])
                points3d_img.append(pt3d)
                rgb_img.append(img.getpixel((u, v)))
                kp_idxs.append(idx_kp)
        # transform to world coordinates (pt3d from a depth map is in camera coordinates)
        # we use sensor_id here because we assume that the image and the corresponding depthmap have the same pose
        # and sometimes, the pose might only be provided for the images
        cam_to_world = kdata.trajectories[timestamp][sensor_id].inverse()
        if len(points3d_img) == 0:
            continue
        points3d_img = cam_to_world.transform_points(np.array(points3d_img))
        for idx_kp, pt3d, rgb in zip(kp_idxs, points3d_img, rgb_img):
            if not np.isnan(pt3d).any():
                # apply transform (alignment)
                if method == Method.voxelgrid:
                    assert vg is not None
                    if not vg.exists(pt3d):
                        # add 3D point
                        points3d.append(list(pt3d) + list(rgb))
                        # add observation
                        observations.add(
                            len(points3d) - 1, keypoints_type,
                            sensing_filepath, idx_kp)
                        vg.add(pt3d, len(points3d) - 1, sensing_filepath)
                    else:
                        ret = vg.append(pt3d, sensing_filepath)
                        if ret is not None:
                            observations.add(ret[0], keypoints_type,
                                             sensing_filepath, idx_kp)
                elif method == Method.all:
                    # add 3D point
                    points3d.append(list(pt3d) + list(rgb))
                    # add observation
                    observations.add(
                        len(points3d) - 1, keypoints_type, sensing_filepath,
                        idx_kp)
        # save_3Dpts_to_ply(points3d, os.path.join(output_path, 'map.ply'))
        progress_bar.update(1)
    progress_bar.close()

    kdata.points3d = kapture.Points3d(np.array(points3d))
    kdata.observations = observations

    logger.info('saving ...')
    kapture_to_dir(output_path, kdata)
    # save_3Dpts_to_ply(points3d, os.path.join(output_path, 'map.ply'))

    logger.info('all done')
예제 #22
0
def import_silda(
    silda_dirpath: str,
    destination_kapture_dirpath: str,
    fallback_cam_model: str = 'FOV',
    do_split_cams: bool = False,
    corpus: Optional[str] = None,
    replace_pose_rig: bool = False,
    force_overwrite_existing: bool = False,
    images_import_strategy: TransferAction = TransferAction.link_absolute
) -> None:
    """
    Imports data from silda dataset.

    :param silda_dirpath: path to the silda top directory
    :param destination_kapture_dirpath: input path to kapture directory.
    :param fallback_cam_model: camera model to fallback when necessary
    :param do_split_cams: If true, re-organises and renames the image files to split apart cameras.
    :param corpus: the list of corpus to be imported, among 'mapping', 'query'.
    :param replace_pose_rig: if True, replaces poses of individual cameras with poses of the rig.
    :param force_overwrite_existing: if true, Silently overwrite kapture files if already exists.
    :param images_import_strategy: how to copy image files.
    """

    # sanity check
    silda_dirpath = path_secure(path.abspath(silda_dirpath))
    destination_kapture_dirpath = path_secure(
        path.abspath(destination_kapture_dirpath))
    if TransferAction.root_link == images_import_strategy and do_split_cams:
        raise ValueError(
            'impossible to only link images directory and applying split cam.')
    hide_progress_bars = logger.getEffectiveLevel() >= logging.INFO

    # prepare output directory
    kapture.io.structure.delete_existing_kapture_files(
        destination_kapture_dirpath, force_overwrite_existing)
    os.makedirs(destination_kapture_dirpath, exist_ok=True)

    # images ###########################################################################################################
    logger.info('Processing images ...')
    # silda-images
    #   ...
    #   ├── 1445_0.png
    #   ├── 1445_1.png
    #   ...
    silda_images_root_path = path.join(silda_dirpath, 'silda-images')
    # list all png files (its PNG in silda) using a generator.
    if corpus is not None:
        assert corpus in SILDA_CORPUS_SPLIT_FILENAMES
        # if corpus specified, filter by those which directory name match corpus.
        logger.debug(f'only importing {corpus} part.')
        coprus_filepath = path.join(silda_dirpath,
                                    SILDA_CORPUS_SPLIT_FILENAMES[corpus])
        with open(coprus_filepath, 'rt') as corpus_file:
            corpus_filenames = corpus_file.readlines()
            image_filenames_original = sorted(filename.strip()
                                              for filename in corpus_filenames)
    else:
        image_filenames_original = sorted(
            filename for dirpath, sd, fs in os.walk(silda_images_root_path)
            for filename in fs if filename.endswith('.png'))

    image_filenames_kapture = []
    snapshots = kapture.RecordsCamera()
    image_name_to_ids = {}  # '1445_0.png' -> (1445, 0)
    for image_filename_original in tqdm(image_filenames_original,
                                        disable=hide_progress_bars):
        # retrieve info from image filename
        shot_info = SILDA_IMAGE_NAME_PATTERN.match(image_filename_original)
        assert shot_info is not None
        shot_info = shot_info.groupdict()
        shot_info['timestamp'] = int(
            shot_info['timestamp']
        )  # To avoid warnings about type of the value
        # eg. file_info = {'filename': '1445_0.png', 'timestamp': 1445, 'cam_id': '0'}
        # create a path of the image into NLE dir
        if do_split_cams:
            # re-organise images with subfolders per corpus/camera/timestamp.png
            kapture_image_filename = path.join(
                shot_info['cam_id'],
                '{:04d}.png'.format(shot_info['timestamp']))
        else:
            # keep the original file hierarchy
            kapture_image_filename = image_filename_original

        image_filenames_kapture.append(kapture_image_filename)
        snapshots[shot_info['timestamp'],
                  shot_info['cam_id']] = kapture_image_filename
        image_name_to_ids[shot_info['filename']] = (shot_info['timestamp'],
                                                    shot_info['cam_id'])

    assert len(image_filenames_kapture) == len(image_filenames_original)
    # intrinsics #######################################################################################################
    logger.info('Processing sensors ...')
    cameras = kapture.Sensors()
    # use hard coded intrinsics
    # evaluated using colmap
    # 1 OPENCV_FISHEYE 1024 1024 393.299 394.815 512 512 -0.223483 0.117325 -0.0326138 0.00361082
    #                  fx, fy, cx, cy, omega
    # 1 FOV 1024 1024 300 300 512 512 0.899632
    cam_id_list = sorted(
        set(cam_id for _, cam_id, _ in kapture.flatten(snapshots)))
    for cam_id in cam_id_list:
        # pick a image for that cam id
        random_image_intrinsic = next(
            f'{timestamp}_{cam_id}.intrinsics'  # keep only filename (thats what silda expect)
            for timestamp, cid, filename in kapture.flatten(snapshots)
            if cid == cam_id)
        logger.debug(
            f'camera {cam_id} intrinsics : picking at random: ("{random_image_intrinsic}")'
        )
        intrinsic_filepath = path.join(silda_dirpath, 'camera-intrinsics',
                                       random_image_intrinsic)
        logger.debug(f'loading file: "{intrinsic_filepath}"')
        silda_proj_params = np.loadtxt(intrinsic_filepath)
        # only retrieve principal point from intrinsics,
        # because the rest correspond to a fisheye model not available in colmap.
        principal_point = (silda_proj_params[0:2] *
                           SILDA_IMAGE_SIZE).flatten().tolist()
        projection = fallback_cam_model
        if 'OPENCV_FISHEYE' == projection:
            focal_length = [393.299, 394.815]
            fisheye_coefficients = [
                -0.223483, 0.117325, -0.0326138, 0.00361082
            ]
            #          //    fx, fy, cx, cy, k1, k2, k3, k4
            proj_params = focal_length + principal_point + fisheye_coefficients
        elif 'FOV' == projection:
            # use hard coded intrinsics from Torsten reconstruction, ie. :
            #       217.294036, 217.214703, 512.000000, 507.897400, -0.769113
            focal_length = [217.294036, 217.214703]
            # principal_point = [512.000000, 507.897400]
            omega = [-0.769113]
            #                  fx, fy, cx, cy, omega
            proj_params = focal_length + principal_point + omega
        else:
            raise ValueError(
                'Only accepts OPENCV_FISHEYE, or FOV as projection model.')

        camera = kapture.Camera(projection,
                                SILDA_IMAGE_SIZE.tolist() + proj_params)
        cameras[cam_id] = camera

    # extrinsics #######################################################################################################
    logger.info('Processing trajectories ...')
    trajectories = kapture.Trajectories()
    with open(path.join(silda_dirpath, 'silda-train-poses.txt')) as file:
        lines = file.readlines()
        lines = (line.rstrip().split() for line in lines)
        extrinsics = {
            line[0]: np.array(line[1:8], dtype=np.float)
            for line in lines
        }

    for silda_image_name, pose_params in tqdm(extrinsics.items(),
                                              disable=hide_progress_bars):
        # Silda poses are 7-dim vectors with the rotation quaternion,
        # and the translation vector. The order needs to be:
        # qw,qx,qy,qz,tx,ty,tz
        # The parameters should be described in terms of camera to world transformations
        if silda_image_name not in image_name_to_ids:
            # if this is not referenced: means its part of the corpus to be ignored.
            continue
        pose = kapture.PoseTransform(pose_params[0:4],
                                     pose_params[4:7]).inverse()
        timestamp, cam_id = image_name_to_ids[silda_image_name]
        trajectories[timestamp, cam_id] = pose

    # rigs
    logger.info('Making up a rig ...')
    rigs = kapture.Rigs()
    pose_babord = kapture.PoseTransform(t=[0, 0, 0],
                                        r=quaternion.from_rotation_vector(
                                            [0, -np.pi / 2, 0]))
    pose_tribord = kapture.PoseTransform(t=[0, 0, 0],
                                         r=quaternion.from_rotation_vector(
                                             [0, np.pi / 2, 0]))
    rigs['silda_rig', '0'] = pose_babord
    rigs['silda_rig', '1'] = pose_tribord
    if replace_pose_rig:
        logger.info('replacing camera poses with rig poses.')
        kapture.rigs_recover_inplace(trajectories, rigs)

    # pack it all together
    kapture_data = kapture.Kapture(sensors=cameras,
                                   records_camera=snapshots,
                                   trajectories=trajectories,
                                   rigs=rigs)

    logger.info('saving to Kapture  ...')
    kapture.io.csv.kapture_to_dir(destination_kapture_dirpath, kapture_data)

    # finally import images
    if images_import_strategy != TransferAction.skip:
        # importing image files
        logger.info(f'importing {len(image_filenames_original)} images ...')
        assert len(image_filenames_original) == len(image_filenames_kapture)
        image_filepaths_original = [
            path.join(silda_images_root_path, image_filename_kapture)
            for image_filename_kapture in image_filenames_original
        ]
        image_filepaths_kapture = [
            get_image_fullpath(destination_kapture_dirpath,
                               image_filename_kapture)
            for image_filename_kapture in image_filenames_kapture
        ]
        transfer_files_from_dir(image_filepaths_original,
                                image_filepaths_kapture,
                                images_import_strategy)
    logger.info('done.')
예제 #23
0
def export_openmvg_sfm_data(kapture_path: str,
                            kapture_data: kapture.Kapture,
                            openmvg_sfm_data_file_path: str,
                            openmvg_image_root_path: str,
                            image_action: TransferAction,
                            image_path_flatten: bool,
                            force: bool,
                            kapture_to_openmvg_view_ids: dict = {}) -> Dict:
    """
    Convert the kapture data into an openMVG dataset stored as a dictionary.
    The format is defined here:
    https://openmvg.readthedocs.io/en/latest/software/SfM/SfM_OutputFormat/

    :param kapture_data: the kapture data
    :param kapture_path: top directory of the kapture data and the images
    :param openmvg_sfm_data_file_path: input path to the SfM data file to be written.
    :param openmvg_image_root_path: input path to openMVG image directory to be created.
    :param image_action: action to apply on images: link, copy, move or do nothing.
    :param image_path_flatten: flatten image path (eg. to avoid image name collision in openMVG regions).
    :param force: if true, will remove existing openMVG data without prompting the user.
    :param kapture_to_openmvg_view_ids: input/output mapping of kapture image name to corresponding openmvg view id.
    :return: an SfM_data, the openmvg structure, stored as a dictionary ready to be serialized
    """

    if kapture_data.cameras is None or kapture_data.records_camera is None:
        raise ValueError(
            'export_openmvg_sfm_data needs kapture camera and records_camera.')

    if image_action == TransferAction.root_link:
        raise NotImplementedError(
            'root link is not implemented, use skip instead.')

    # refer to the original image dir when skipping image transfer.
    if image_action == TransferAction.skip:
        openmvg_image_root_path = get_image_fullpath(kapture_path)

    if openmvg_image_root_path is None:
        raise ValueError(
            f'openmvg_image_root_path must be defined to be able to perform {image_action}.'
        )

    # make sure directory is ready to contain openmvg_sfm_data_file_path
    os.makedirs(path.dirname(openmvg_sfm_data_file_path), exist_ok=True)

    # Check we don't have other sensors defined
    if len(kapture_data.sensors) != len(kapture_data.cameras):
        extra_sensor_number = len(kapture_data.sensors) - len(
            kapture_data.cameras)
        logger.warning(
            f'We will ignore {extra_sensor_number} sensors that are not camera'
        )

    # openmvg does not support rigs
    if kapture_data.rigs:
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    # Compute root path and camera used in records
    kapture_to_openmvg_cam_ids = {}  # kapture_cam_id -> openmvg_cam_id
    for i, (_, _, kapture_image_name) in enumerate(
            kapture.flatten(kapture_data.records_camera)):
        if kapture_image_name not in kapture_to_openmvg_view_ids:
            kapture_to_openmvg_view_ids[kapture_image_name] = i

    # polymorphic_status = PolymorphicStatus({}, 1, 1)
    polymorphic_registry = CerealPointerRegistry(
        id_key=JSON_KEY.POLYMORPHIC_ID, value_key=JSON_KEY.POLYMORPHIC_NAME)
    ptr_wrapper_registry = CerealPointerRegistry(id_key=JSON_KEY.ID,
                                                 value_key=JSON_KEY.DATA)

    logger.debug(f'exporting intrinsics ...')
    openmvg_sfm_data_intrinsics = export_openmvg_intrinsics(
        kapture_cameras=kapture_data.cameras,
        kapture_to_openmvg_cam_ids=kapture_to_openmvg_cam_ids,
        polymorphic_registry=polymorphic_registry,
        ptr_wrapper_registry=ptr_wrapper_registry,
    )

    logger.debug(f'exporting views ...')
    openmvg_sfm_data_views = export_openmvg_views(
        kapture_cameras=kapture_data.cameras,
        kapture_images=kapture_data.records_camera,
        kapture_trajectories=kapture_data.trajectories,
        kapture_to_openmvg_cam_ids=kapture_to_openmvg_cam_ids,
        kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids,
        polymorphic_registry=polymorphic_registry,
        ptr_wrapper_registry=ptr_wrapper_registry,
        image_path_flatten=image_path_flatten,
    )

    logger.debug(f'exporting poses ...')
    openmvg_sfm_data_poses = export_openmvg_poses(
        kapture_images=kapture_data.records_camera,
        kapture_trajectories=kapture_data.trajectories,
        kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids)

    # structure : correspond to kapture observations + 3D points
    logger.debug(f'exporting structure ...')
    openmvg_sfm_data_structure = export_openmvg_structure(
        kapture_points_3d=kapture_data.points3d,
        kapture_to_openmvg_view_ids=kapture_to_openmvg_view_ids,
        kapture_observations=kapture_data.observations,
        kapture_keypoints=kapture_data.keypoints,
        kapture_path=kapture_path)

    openmvg_sfm_data = {
        JSON_KEY.SFM_DATA_VERSION: OPENMVG_SFM_DATA_VERSION_NUMBER,
        JSON_KEY.ROOT_PATH: path.abspath(openmvg_image_root_path),
        JSON_KEY.INTRINSICS: openmvg_sfm_data_intrinsics,
        JSON_KEY.VIEWS: openmvg_sfm_data_views,
        JSON_KEY.EXTRINSICS: openmvg_sfm_data_poses,
        JSON_KEY.STRUCTURE: openmvg_sfm_data_structure,
        JSON_KEY.CONTROL_POINTS: [],
    }

    logger.debug(f'Saving to openmvg {openmvg_sfm_data_file_path}...')
    with open(openmvg_sfm_data_file_path, "w") as fid:
        json.dump(openmvg_sfm_data, fid, indent=4)

    # do the actual image transfer
    if not image_action == TransferAction.skip:
        job_copy = (
            (  # source path -> dest path
                get_image_fullpath(kapture_path, kapture_image_name),
                path.join(
                    openmvg_image_root_path,
                    get_openmvg_image_path(kapture_image_name,
                                           image_path_flatten))) for _, _,
            kapture_image_name in kapture.flatten(kapture_data.records_camera))
        source_filepath_list, destination_filepath_list = zip(*job_copy)
        transfer_files_from_dir(
            source_filepath_list=source_filepath_list,
            destination_filepath_list=destination_filepath_list,
            copy_strategy=image_action,
            force_overwrite=force)
예제 #24
0
def colmap_build_map_from_loaded_data(kapture_data: kapture.Kapture,
                                      kapture_path: str,
                                      colmap_path: str,
                                      colmap_binary: str,
                                      pairsfile_path: Optional[str],
                                      use_colmap_matches_importer: bool,
                                      point_triangulator_options: List[str],
                                      skip_list: List[str],
                                      force: bool) -> None:
    """
    Build a colmap model using custom features with the kapture data.

    :param kapture_data: kapture data to use
    :param kapture_path: path to the kapture to use
    :param colmap_path: path to the colmap build
    :param colmap_binary: path to the colmap executable
    :param pairsfile_path: Optional[str],
    :param use_colmap_matches_importer: bool,
    :param point_triangulator_options: options for the point triangulator
    :param skip_list: list of steps to skip
    :param force: Silently overwrite kapture files if already exists.
    """
    os.makedirs(colmap_path, exist_ok=True)

    if not (kapture_data.records_camera and kapture_data.sensors and kapture_data.keypoints and kapture_data.matches):
        raise ValueError('records_camera, sensors, keypoints, matches are mandatory')
    if not kapture_data.trajectories:
        logger.info('there are no trajectories, running mapper instead of point_triangulator')

    # COLMAP does not fully support rigs.
    if kapture_data.rigs is not None and kapture_data.trajectories is not None:
        # make sure, rigs are not used in trajectories.
        logger.info('remove rigs notation.')
        rigs_remove_inplace(kapture_data.trajectories, kapture_data.rigs)
        kapture_data.rigs.clear()

    # Set fixed name for COLMAP database
    colmap_db_path = path.join(colmap_path, 'colmap.db')
    reconstruction_path = path.join(colmap_path, "reconstruction")
    priors_txt_path = path.join(colmap_path, "priors_for_reconstruction")
    if 'delete_existing' not in skip_list:
        safe_remove_file(colmap_db_path, force)
        safe_remove_any_path(reconstruction_path, force)
        safe_remove_any_path(priors_txt_path, force)
    os.makedirs(reconstruction_path, exist_ok=True)

    if 'colmap_db' not in skip_list:
        logger.info('Using precomputed keypoints and matches')
        logger.info('Step 1: Export kapture format to colmap')

        colmap_db = COLMAPDatabase.connect(colmap_db_path)
        if kapture_data.descriptors is not None:
            kapture_data.descriptors.clear()
        database_extra.kapture_to_colmap(kapture_data, kapture_path, colmap_db,
                                         export_two_view_geometry=not use_colmap_matches_importer)
        # close db before running colmap processes in order to avoid locks
        colmap_db.close()

        if use_colmap_matches_importer:
            logger.info('Step 2: Run geometric verification')
            logger.debug('running colmap matches_importer...')
            colmap_lib.run_matches_importer_from_kapture(
                colmap_binary,
                colmap_use_cpu=True,
                colmap_gpu_index=None,
                colmap_db_path=colmap_db_path,
                kapture_data=kapture_data,
                force=force
            )
        else:
            logger.info('Step 2: Run geometric verification - skipped')

    if kapture_data.trajectories is not None:
        # Generate priors for reconstruction
        os.makedirs(priors_txt_path, exist_ok=True)
        if 'priors_for_reconstruction' not in skip_list:
            logger.info('Step 3: Exporting priors for reconstruction.')
            colmap_db = COLMAPDatabase.connect(colmap_db_path)
            database_extra.generate_priors_for_reconstruction(kapture_data, colmap_db, priors_txt_path)
            colmap_db.close()

        # Point triangulator
        reconstruction_path = path.join(colmap_path, "reconstruction")
        os.makedirs(reconstruction_path, exist_ok=True)
        if 'triangulation' not in skip_list:
            logger.info("Step 4: Triangulation")
            colmap_lib.run_point_triangulator(
                colmap_binary,
                colmap_db_path,
                get_image_fullpath(kapture_path),
                priors_txt_path,
                reconstruction_path,
                point_triangulator_options
            )
    else:
        # mapper
        reconstruction_path = path.join(colmap_path, "reconstruction")
        os.makedirs(reconstruction_path, exist_ok=True)
        if 'triangulation' not in skip_list:
            logger.info("Step 4: Triangulation")
            colmap_lib.run_mapper(
                colmap_binary,
                colmap_db_path,
                get_image_fullpath(kapture_path),
                None,
                reconstruction_path,
                point_triangulator_options
            )
            # use reconstruction 0 as main
            first_reconstruction = os.path.join(reconstruction_path, '0')
            files = os.listdir(first_reconstruction)
            for f in files:
                shutil.move(os.path.join(first_reconstruction, f), os.path.join(reconstruction_path, f))
            shutil.rmtree(first_reconstruction)

    # run model_converter
    if 'model_converter' not in skip_list:
        logger.info("Step 5: Export reconstruction results to txt")
        colmap_lib.run_model_converter(
            colmap_binary,
            reconstruction_path,
            reconstruction_path
        )
def extract_kapture_global(kapture_root,
                           config,
                           output_dir='',
                           overwrite=False):
    logging.info('Extracting NetVLAD features with configuration:\n', config)
    # use kapture io to identify image paths and loop
    kdata = kapture_from_dir(kapture_root, matches_pairsfile_path=None,
                             skip_list= [kapture.Matches,
                                         kapture.Points3d,
                                         kapture.Observations,
                                         kapture.Keypoints,
                                         kapture.Descriptors])
    assert kdata.records_camera is not None

    export_dir = output_dir if output_dir else kapture_root  # root of output directory for features
    os.makedirs(export_dir, exist_ok=True)

    image_list = [filename for _, _, filename in kapture.flatten(kdata.records_camera)]

    # resume extraction if some features exist
    try:
        # load features if there are any
        kdata.global_features = global_features_from_dir(export_dir, None)
        if kdata.global_features is not None and not overwrite:
            image_list = [name for name in image_list if name not in kdata.global_features]
    except FileNotFoundError:
        pass
    except:
        logging.exception("Error with importing existing global features.")

    # clear features first if overwriting
    if overwrite: delete_existing_kapture_files(export_dir, True, only=[kapture.GlobalFeatures])

    if len(image_list) == 0:
        print('All features were already extracted')
        return
    else:
        print(f'Extracting NetVLAD features for {len(image_list)} images')

    # for the global descriptor type specification
    global_dtype = None if kdata.global_features is None else kdata.global_features.dtype
    global_dsize = None if kdata.global_features is None else kdata.global_features.dsize

    # setup network
    tf.reset_default_graph()
    if config['grayscale']:
        tf_batch = tf.placeholder(
                dtype=tf.float32, shape=[None, None, None, 1])
    else:
        tf_batch = tf.placeholder(
                dtype=tf.float32, shape=[None, None, None, 3])
    # load network and checkpoint
    net = nets.vgg16NetvladPca(tf_batch)
    saver = tf.train.Saver()
    sess = tf.Session()
    checkpoint = chkpt_path + '/' + config['checkpoint']
    saver.restore(sess, checkpoint)

    for image_name in image_list:
        img_path = get_image_fullpath(kapture_root, image_name)
        if img_path.endswith('.txt'):
            args.images = open(img_path).read().splitlines() + args.images
            continue

        print(f"\nExtracting features for {img_path}")

        if config['grayscale']:
            image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
            image = np.expand_dims(
                np.expand_dims(image, axis=0), axis=-1)
        else:
            image = cv2.imread(img_path, cv2.COLOR_BGR2RGB)
            image = np.expand_dims(image, axis=0)
        descriptor = sess.run(net, feed_dict={tf_batch: image})[:, :config['pca_dim']]
        descriptor = np.squeeze(descriptor)


        # write global descriptor type specification
        if global_dtype is None:
            global_dtype = descriptor.dtype
            global_dsize = len(descriptor)

            kdata.global_features = kapture.GlobalFeatures('netvlad', global_dtype, global_dsize)

            global_descriptors_config_abs_path = get_csv_fullpath(kapture.GlobalFeatures, export_dir)
            descriptors_to_file(global_descriptors_config_abs_path, kdata.global_features)
        else:
            assert kdata.global_features.type_name == "netvlad"
            assert kdata.global_features.dtype == descriptor.dtype
            assert kdata.global_features.dsize == len(descriptor)
        # get output paths
        global_descriptors_abs_path = get_global_features_fullpath(export_dir, image_name)

        image_global_features_to_file(global_descriptors_abs_path, descriptor)
        kdata.global_features.add(image_name)
    # sess.close()  # close session before initializing again for next submap

    if not global_features_check_dir(kdata.global_features, export_dir):
        print('global feature extraction ended successfully but not all files were saved')