コード例 #1
0
def merge_records_data(image_list: List[List[str]],
                       image_paths: List[str],
                       kapture_path: str,
                       images_import_method: TransferAction):
    """
    Merge several records data. keep only the first image.

    :param image_list: list of image_names
    :param image dir paths
    :param directory root path to the merged kapture
    :param images_import_method: choose how to import actual image files
    """
    assert len(image_list) > 0
    assert len(image_list) == len(image_paths)

    added_images = set()
    for images_filenames, record_dirpath in zip(image_list, image_paths):
        images_filenames_to_add = {images_filename
                                   for images_filename in images_filenames
                                   if images_filename not in added_images}
        import_record_data_from_dir_auto(record_dirpath, kapture_path, images_filenames_to_add, images_import_method)
        diff = set(images_filenames).difference(images_filenames_to_add)
        if len(diff) > 0:
            getLogger().warning(f'Cannot import some images because they were already added: {diff}')
        added_images.update(images_filenames_to_add)
コード例 #2
0
def import_image_folder(
        images_path: str,
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports the images of a folder to a kapture. This creates only images and cameras.

    :param images_path: path to directory containing the images.
    :param kapture_path: path to kapture root directory.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path,
                                  force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()

    file_list = [
        os.path.relpath(os.path.join(dirpath, filename), images_path)
        for dirpath, dirs, filenames in os.walk(images_path)
        for filename in filenames
    ]
    file_list = sorted(file_list)

    logger.info('starting conversion...')
    for n, filename in enumerate(file_list):
        # test if file is a valid image
        try:
            # lazy load
            with Image.open(path.join(images_path, filename)) as im:
                width, height = im.size
                model_params = [width, height]
        except (OSError, PIL.UnidentifiedImageError):
            # It is not a valid image: skip it
            logger.info(f'Skipping invalid image file {filename}')
            continue

        camera_id = f'sensor{n}'
        images[(n, camera_id)] = path_secure(filename)  # don't forget windows
        cameras[camera_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA,
                                            model_params)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(images_path, kapture_path, filename_list,
                                     images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
コード例 #3
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):
            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename
        kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg'  # kapture depth files are not png
        depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).    
    ----
    We use the extr. kinect camera parameters from https://projet.liris.cnrs.fr/voir/activities-dataset/kinect-calibration.html. 
    """
    sensors = kapture.Sensors()
    # camera_type = kapture.CameraType.OPENCV
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02,
    #                  2.5673002693536984e-01, -9.3976085633794137e-01, -1.8605549188751580e-03, -2.2232238578189420e-03]  # w, h, f, cx, cy, k1, k2, p1, p2, k3
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02]  # w, h, fx, fy, cx, cy
    camera_params = [640, 480, 525, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    # depth_camera_type = kapture.CameraType.OPENCV
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02,
    #                        -1.8932947734719333e-01, 1.1358015104098631e+00, -4.4260345347128536e-03, -5.4869578635708153e-03, -2.2460143607712921e+00] # w, h, f, cx, cy, k1, k2, p1, p2, k3
    depth_camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02] # w, h, fx, fy, cx, cy
    depth_camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=depth_camera_params,
        sensor_type='depth'
    )
    sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera(
        name=REG_DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    R = np.array([[9.9996518012567637e-01, 2.6765126468950343e-03, -7.9041012313000904e-03],
                  [-2.7409311281316700e-03, 9.9996302803027592e-01, -8.1504520778013286e-03],
                  [7.8819942130445332e-03, 8.1718328771890631e-03, 9.9993554558014031e-01]])
    T = np.array([-2.5558943178152542e-02, 1.0109636268061706e-04, 2.0318321729487039e-03])
    Rt = np.vstack((np.hstack((R, T.reshape(3, 1))), np.array([0, 0, 0, 1])))
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        if '.reg' in depth_map_filename:
            continue
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)
        # register depth to rgb
        reg_depth_map = register_depth(get_K(depth_camera_type, depth_camera_params), get_K(camera_type, camera_params),
                                       Rt, depth_map, camera_params[0], camera_params[1])
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #4
0
def import_robotcar_seasons(
        robotcar_path:
    str,  # noqa: C901: function a bit long but not too complex
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip,
        import_feature_db: bool = False,
        skip_reconstruction: bool = False,
        rig_collapse: bool = False,
        use_colmap_intrinsics: bool = False,
        import_v1: bool = False) -> None:
    """
    Read the RobotCar Seasons data, creates several kaptures with training and query data.

    :param robotcar_path: path to the robotcar top directory
    :param kapture_path: path to the kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param import_feature_db: if True, will import the features from the database
    :param skip_reconstruction: if True, will skip the reconstruction part from the training data
    :param rig_collapse: if True, will collapse the rig
    :param use_colmap_intrinsics: if True, will use the colmap intrinsics
    :param import_v1: if True, will use the version 1 of the format
    """

    os.makedirs(kapture_path, exist_ok=True)

    cameras = import_robotcar_cameras(path.join(robotcar_path, 'intrinsics'))
    rigs = import_robotcar_rig(path.join(robotcar_path, 'extrinsics'))

    logger.info("Importing test data")
    # Test data
    image_pattern = re.compile(
        r'(?P<condition>.+)/(?P<camera>\w+)/(?P<timestamp>\d+)\.jpg')
    queries_path = path.join(robotcar_path, '3D-models', 'individual',
                             'queries_per_location')
    kapture_imported_query = {}
    for root, dirs, files in os.walk(queries_path):
        for query_file in files:
            records_camera = kapture.RecordsCamera()
            # Get list of query images
            with open(path.join(queries_path, query_file)) as f:
                for line in f:
                    matches = image_pattern.match(line)
                    image_path = line.strip()
                    if not matches:
                        logger.warning(f"Error matching line in {image_path}")
                        continue
                    matches = matches.groupdict()
                    timestamp = int(matches['timestamp'])
                    camera = str(matches['camera'])
                    # condition = str(matches['condition']) : not used ?
                    records_camera[timestamp, camera] = image_path

                (query_name, _) = query_file.split('.')
                kapture_test = kapture.Kapture(sensors=cameras,
                                               rigs=rigs,
                                               records_camera=records_camera)
                kapture_imported_query[int(
                    query_name.split('_')[-1])] = kapture_test

    # Training data
    logger.info("Importing training data")
    colmap_reconstructions_path = path.join(robotcar_path, '3D-models',
                                            'individual',
                                            'colmap_reconstructions')
    kapture_imported_training = {}
    for root, dirs, files in os.walk(colmap_reconstructions_path):
        for colmap_reconstruction in dirs:
            (loc_id, _) = colmap_reconstruction.split('_')
            kapture_reconstruction_dir = path.join(kapture_path,
                                                   f"{int(loc_id):02d}",
                                                   "mapping")
            delete_existing_kapture_files(kapture_reconstruction_dir,
                                          force_overwrite_existing)
            logger.info(f'Converting reconstruction {loc_id} to kapture  ...')
            kapture_reconstruction_data = import_robotcar_colmap_location(
                robotcar_path,
                path.join(colmap_reconstructions_path, colmap_reconstruction),
                kapture_reconstruction_dir, rigs, skip_reconstruction)
            # replace intrinsics with the ones found in the text files
            if not use_colmap_intrinsics:
                kapture_reconstruction_data.sensors = cameras
            kapture_imported_training[int(
                loc_id)] = kapture_reconstruction_data

    if not import_v1:
        _import_robotcar_v2_train(robotcar_path, kapture_imported_query,
                                  kapture_imported_training, image_pattern)

    # apply rig collapse
    if rig_collapse:
        logger.info('replacing camera poses with rig poses.')
        for kapture_mapping in kapture_imported_training.values():
            kapture.rigs_recover_inplace(kapture_mapping.trajectories, rigs,
                                         ['rear'])

    # IO operations
    robotcar_image_path = path.join(robotcar_path, "images")
    for loc_id, kapture_query in kapture_imported_query.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing test data: {loc_id_str}')
        kapture_test_dir = path.join(kapture_path, loc_id_str, "query")
        delete_existing_kapture_files(kapture_test_dir,
                                      force_overwrite_existing)
        if not kapture_query.records_camera:  # all images were removed
            continue
        kapture_to_dir(kapture_test_dir, kapture_query)
        query_images = [
            f for _, _, f in kapture.flatten(kapture_query.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path, kapture_test_dir,
                                         query_images, images_import_method)

    for loc_id, kapture_mapping in kapture_imported_training.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing mapping data: {loc_id_str}')
        kapture_reconstruction_dir = path.join(kapture_path, f"{loc_id:02d}",
                                               "mapping")
        kapture_to_dir(kapture_reconstruction_dir, kapture_mapping)
        mapping_images = [
            f for _, _, f in kapture.flatten(kapture_mapping.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_reconstruction_dir,
                                         mapping_images, images_import_method)

    if import_feature_db:
        _import_colmap_overcast_reference(robotcar_path, kapture_path,
                                          force_overwrite_existing)
コード例 #5
0
def import_extended_cmu_seasons(
        cmu_path: str,
        top_kaptures_path: str,
        slice_range: List[int],
        import_all_files: bool = False,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Import extended CMU data to kapture. Will make training and query kaptures for every CMU slice.

    :param cmu_path: path to the top directory of the CMU dataset files
    :param top_kaptures_path: top directory for the kaptures to create
    :param slice_range: range of CMU slices to import
    :param import_all_files: if Tre, will import all files
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """

    os.makedirs(top_kaptures_path, exist_ok=True)

    cameras = import_extended_cmu_seasons_intrinsics(
        path.join(cmu_path, 'intrinsics.txt'))

    for slice_n in slice_range:
        # prepare paths
        slice_path = os.path.join(cmu_path, f'slice{slice_n}')
        training_images_path = os.path.join(slice_path, 'database')
        query_images_path = os.path.join(slice_path, 'query')
        gt_trajectories_path = os.path.join(
            slice_path, f'ground-truth-database-images-slice{slice_n}.txt')
        query_image_list = os.path.join(slice_path,
                                        f'test-images-slice{slice_n}.txt')
        query_gt_path = os.path.join(slice_path, 'camera-poses')
        query_gt_list = [
            os.path.join(query_gt_path, f) for f in os.listdir(query_gt_path)
        ]

        # Import training images
        kapture_training_path = path.join(top_kaptures_path, f'slice{slice_n}',
                                          "mapping")
        delete_existing_kapture_files(kapture_training_path,
                                      force_overwrite_existing)
        training_records_camera, training_trajectories = import_extended_cmu_seasons_images(
            gt_trajectories_path)
        training_kapture = kapture.Kapture(
            sensors=cameras,
            records_camera=training_records_camera,
            trajectories=training_trajectories)
        if import_all_files:
            _add_images_from_folder(training_images_path, training_kapture)
        kapture_to_dir(kapture_training_path, training_kapture)
        # finally import images
        if images_import_method != TransferAction.skip:
            filename_list = [
                f
                for _, _, f in kapture.flatten(training_kapture.records_camera)
            ]
            logger.info(f'importing {len(filename_list)} image files ...')
            import_record_data_from_dir_auto(training_images_path,
                                             kapture_training_path,
                                             filename_list,
                                             images_import_method)
        # Import query images
        kapture_query_path = path.join(top_kaptures_path, f'slice{slice_n}',
                                       "query")
        delete_existing_kapture_files(kapture_query_path,
                                      force_erase=force_overwrite_existing)
        query_records_camera, query_trajectories = import_extended_cmu_seasons_images(
            query_image_list)
        query_kapture = kapture.Kapture(sensors=cameras,
                                        records_camera=query_records_camera,
                                        trajectories=query_trajectories)

        # import query gt when possible
        query_gt_kapture = []
        for query_gt_path in query_gt_list:
            query_gt_records_camera, query_gt_trajectories = import_extended_cmu_seasons_images(
                query_gt_path)
            query_gt_kapture.append(
                kapture.Kapture(sensors=cameras,
                                records_camera=query_gt_records_camera,
                                trajectories=query_gt_trajectories))
        data_to_merge = [query_kapture] + query_gt_kapture
        query_kapture = merge_keep_ids(
            data_to_merge,
            skip_list=[],
            data_paths=["" for _ in range(len(data_to_merge))],
            kapture_path="",
            images_import_method=TransferAction.skip)
        if import_all_files:
            _add_images_from_folder(query_images_path, query_kapture)
        kapture_to_dir(kapture_query_path, query_kapture)
        # finally import images
        if images_import_method != TransferAction.skip:
            filename_list = [
                f for _, _, f in kapture.flatten(query_kapture.records_camera)
            ]
            logger.info(f'importing {len(filename_list)} image files ...')
            import_record_data_from_dir_auto(query_images_path,
                                             kapture_query_path, filename_list,
                                             images_import_method)
コード例 #6
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):

            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).
    """
    sensors = kapture.Sensors()
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #7
0
def import_colmap(kapture_dirpath: Optional[str],
                  colmap_database_filepath: str = None,
                  colmap_reconstruction_dirpath: str = None,
                  colmap_images_dirpath: str = None,
                  colmap_rig_filepath: str = None,
                  no_geometric_filtering: bool = False,
                  skip_reconstruction: bool = False,
                  force_overwrite_existing: bool = False,
                  images_import_strategy: TransferAction = TransferAction.link_absolute
                  ) -> kapture.Kapture:
    """
    Converts colmap files to kapture object.

    :param kapture_dirpath: path to kapture directory. Is used to store keypoints, descriptors and matches files.
                            If not given (None), keypoints, descriptors and matches are skipped.
    :param colmap_database_filepath: optional path to colmap database file.
    :param colmap_reconstruction_dirpath: optional path to colmap reconstruction directory.
    :param colmap_images_dirpath: directory path to colmap images. If given, a link to it will be created.
    :param colmap_rig_filepath: optional path to colmap rig file.
    :param no_geometric_filtering:
    :param skip_reconstruction: skip the import of the kapture/reconstruction part,
                                ie. Keypoints, Descriptors, Matches, Points3d, Observations.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_strategy: input choice: how to copy image files.
    :return: kapture object
    """

    # sanity checks
    if kapture_dirpath and colmap_images_dirpath and images_import_strategy == TransferAction.skip:
        logger.warning(f'Images from colmap will not be copied (skip).')

    # prepare output directory
    if kapture_dirpath:
        kapture.io.structure.delete_existing_kapture_files(kapture_dirpath, force_erase=force_overwrite_existing)
        os.makedirs(kapture_dirpath, exist_ok=True)

    # 1: import database
    kapture_from_database = None
    if colmap_database_filepath:
        logger.debug(f'importing from database "{colmap_database_filepath}"')
        kapture_from_database = import_colmap_database(
            colmap_database_filepath, kapture_dirpath, no_geometric_filtering, skip_reconstruction)

    # 2: import reconstruction text files.
    kapture_data_reconstructed = None
    if colmap_reconstruction_dirpath:
        # do not overwrite keypoints files if any from database import
        what_to_skip_during_import_txt = set()
        # if keypoints already loaded from DB,
        # do not load them again and overwrite them
        # because keypoints from txt have less info:
        if kapture_from_database and kapture_from_database.keypoints:
            what_to_skip_during_import_txt.add(kapture.Keypoints)
        # skip_reconstruction=skip keypoints, Points3d, Observations
        if skip_reconstruction:
            what_to_skip_during_import_txt |= {kapture.Keypoints, kapture.Points3d, kapture.Observations}
        logger.debug(f'importing from reconstruction "{colmap_reconstruction_dirpath}"')
        kapture_data_reconstructed = import_colmap_from_reconstruction_files(
            colmap_reconstruction_dirpath, kapture_dirpath, what_to_skip_during_import_txt)

    # Merge data from database and reconstruction files
    if colmap_database_filepath and colmap_reconstruction_dirpath:
        # if both are present:
        # - kapture_data.sensors: merge both, with priority to reconstruction.
        # - kapture_data.trajectories: keep only reconstruction trajectories.
        # - kapture_data.observations: keep only reconstruction observations.
        # - kapture_data.points3d: only exists in colmap reconstruction
        # - kapture_data.*: anything else, keep only database

        # by default take all from database
        kapture_data = kapture_from_database
        # just replace trajectories, observations, points3d
        kapture_data.trajectories = kapture_data_reconstructed.trajectories
        kapture_data.observations = kapture_data_reconstructed.observations
        kapture_data.points3d = kapture_data_reconstructed.points3d
        # do a merge for sensors. If conflict prefer reconstruction:
        kapture_data.sensors.update(kapture_data_reconstructed.sensors)

    elif colmap_database_filepath:
        kapture_data = kapture_from_database
    elif colmap_reconstruction_dirpath:
        kapture_data = kapture_data_reconstructed
    else:
        raise ValueError('Neither database nor reconstruction files where given.')

    # if there is a rig ! lets restore it, and restore also timestamps
    if colmap_rig_filepath:
        rigs, records_camera, trajectories = import_colmap_rig(
            colmap_rig_filepath,
            kapture_data.records_camera,
            kapture_data.trajectories)

        kapture_data.rigs = rigs
        if records_camera:
            if not len(list(kapture.flatten(kapture_data.records_camera))) == len(
                    list(kapture.flatten(records_camera))):
                raise ValueError('inconsistent timestamp reconstruction in images')
            kapture_data.records_camera = records_camera

        if trajectories:
            if not len(list(kapture.flatten(kapture_data.trajectories))) == len(list(kapture.flatten(trajectories))):
                raise ValueError('inconsistent timestamp reconstruction in trajectories')
            kapture_data.trajectories = trajectories

    # finally import images
    if kapture_dirpath and colmap_images_dirpath and images_import_strategy != TransferAction.skip:
        filename_list = [f for _, _, f in kapture.flatten(kapture_data.records_camera)]
        logger.info(f'importing {len(filename_list)} image files ...')
        import_record_data_from_dir_auto(
            colmap_images_dirpath,
            kapture_dirpath,
            filename_list,
            images_import_strategy
        )

    return kapture_data
コード例 #8
0
def import_opensfm(
        opensfm_rootdir: str,
        kapture_rootdir: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.copy) -> None:
    disable_tqdm = logger.getEffectiveLevel() != logging.INFO
    # load reconstruction
    opensfm_reconstruction_filepath = path.join(opensfm_rootdir,
                                                'reconstruction.json')
    with open(opensfm_reconstruction_filepath, 'rt') as f:
        opensfm_reconstruction = json.load(f)
    # remove the single list @ root
    opensfm_reconstruction = opensfm_reconstruction[0]

    # prepare space for output
    os.makedirs(kapture_rootdir, exist_ok=True)
    delete_existing_kapture_files(kapture_rootdir,
                                  force_erase=force_overwrite_existing)

    # import cameras
    kapture_sensors = kapture.Sensors()
    assert 'cameras' in opensfm_reconstruction
    # import cameras
    for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items(
    ):
        camera = import_camera(osfm_camera, name=osfm_camera_id)
        kapture_sensors[osfm_camera_id] = camera

    # import shots
    logger.info('importing images and trajectories ...')
    kapture_images = kapture.RecordsCamera()
    kapture_trajectories = kapture.Trajectories()
    opensfm_image_dirpath = path.join(opensfm_rootdir, 'images')
    assert 'shots' in opensfm_reconstruction
    image_timestamps, image_sensors = {}, {
    }  # used later to retrieve the timestamp of an image.
    for timestamp, (image_filename, shot) in enumerate(
            opensfm_reconstruction['shots'].items()):
        sensor_id = shot['camera']
        image_timestamps[image_filename] = timestamp
        image_sensors[image_filename] = sensor_id
        # in OpenSfm, (sensor, timestamp) is not unique.
        rotation_vector = shot['rotation']
        q = quaternion.from_rotation_vector(rotation_vector)
        translation = shot['translation']
        # capture_time = shot['capture_time'] # may be invalid
        # gps_position = shot['gps_position']
        kapture_images[timestamp, sensor_id] = image_filename
        kapture_trajectories[timestamp,
                             sensor_id] = kapture.PoseTransform(r=q,
                                                                t=translation)

    # copy image files
    filename_list = [f for _, _, f in kapture.flatten(kapture_images)]
    import_record_data_from_dir_auto(
        source_record_dirpath=opensfm_image_dirpath,
        destination_kapture_dirpath=kapture_rootdir,
        filename_list=filename_list,
        copy_strategy=images_import_method)

    # gps from pre-extracted exif, in exif/image_name.jpg.exif
    kapture_gnss = None
    opensfm_exif_dirpath = path.join(opensfm_rootdir, 'exif')
    opensfm_exif_suffix = '.exif'
    if path.isdir(opensfm_exif_dirpath):
        logger.info('importing GNSS from exif ...')
        camera_ids = set(image_sensors.values())
        # add a gps sensor for each camera
        map_cam_to_gnss_sensor = {
            cam_id: 'GPS_' + cam_id
            for cam_id in camera_ids
        }
        for gnss_id in map_cam_to_gnss_sensor.values():
            kapture_sensors[gnss_id] = kapture.Sensor(
                sensor_type='gnss', sensor_params=['EPSG:4326'])
        # build epsg_code for all cameras
        kapture_gnss = kapture.RecordsGnss()
        opensfm_exif_filepath_list = (
            path.join(dirpath, filename)
            for dirpath, _, filename_list in os.walk(opensfm_exif_dirpath)
            for filename in filename_list
            if filename.endswith(opensfm_exif_suffix))
        for opensfm_exif_filepath in tqdm(opensfm_exif_filepath_list,
                                          disable=disable_tqdm):
            image_filename = path.relpath(
                opensfm_exif_filepath,
                opensfm_exif_dirpath)[:-len(opensfm_exif_suffix)]
            image_timestamp = image_timestamps[image_filename]
            image_sensor_id = image_sensors[image_filename]
            gnss_timestamp = image_timestamp
            gnss_sensor_id = map_cam_to_gnss_sensor[image_sensor_id]
            with open(opensfm_exif_filepath, 'rt') as f:
                js_root = json.load(f)
                if 'gps' not in js_root:
                    logger.warning(f'NO GPS data in "{opensfm_exif_filepath}"')
                    continue

                gps_coords = {
                    'x': js_root['gps']['longitude'],
                    'y': js_root['gps']['latitude'],
                    'z': js_root['gps'].get('altitude', 0.0),
                    'dop': js_root['gps'].get('dop', 0),
                    'utc': 0,
                }
                logger.debug(
                    f'found GPS data for ({gnss_timestamp}, {gnss_sensor_id}) in "{opensfm_exif_filepath}"'
                )
                kapture_gnss[gnss_timestamp,
                             gnss_sensor_id] = kapture.RecordGnss(**gps_coords)

    # import features (keypoints + descriptors)
    kapture_keypoints = None  # kapture.Keypoints(type_name='opensfm', dsize=4, dtype=np.float64)
    kapture_descriptors = None  # kapture.Descriptors(type_name='opensfm', dsize=128, dtype=np.uint8)
    opensfm_features_dirpath = path.join(opensfm_rootdir, 'features')
    opensfm_features_suffix = '.features.npz'
    if path.isdir(opensfm_features_dirpath):
        logger.info('importing keypoints and descriptors ...')
        opensfm_features_file_list = (path.join(
            dp, fn) for dp, _, fs in os.walk(opensfm_features_dirpath)
                                      for fn in fs)
        opensfm_features_file_list = (
            filepath for filepath in opensfm_features_file_list
            if filepath.endswith(opensfm_features_suffix))
        for opensfm_feature_filename in tqdm(opensfm_features_file_list,
                                             disable=disable_tqdm):
            image_filename = path.relpath(
                opensfm_feature_filename,
                opensfm_features_dirpath)[:-len(opensfm_features_suffix)]
            opensfm_image_features = np.load(opensfm_feature_filename)
            opensfm_image_keypoints = opensfm_image_features['points']
            opensfm_image_descriptors = opensfm_image_features['descriptors']
            logger.debug(
                f'parsing keypoints and descriptors in {opensfm_feature_filename}'
            )
            if kapture_keypoints is None:
                # print(type(opensfm_image_keypoints.dtype))
                # HAHOG = Hessian Affine feature point detector + HOG descriptor
                kapture_keypoints = kapture.Keypoints(
                    type_name='HessianAffine',
                    dsize=opensfm_image_keypoints.shape[1],
                    dtype=opensfm_image_keypoints.dtype)
            if kapture_descriptors is None:
                kapture_descriptors = kapture.Descriptors(
                    type_name='HOG',
                    dsize=opensfm_image_descriptors.shape[1],
                    dtype=opensfm_image_descriptors.dtype)

            # convert keypoints file
            keypoint_filpath = kapture.io.features.get_features_fullpath(
                data_type=kapture.Keypoints,
                kapture_dirpath=kapture_rootdir,
                image_filename=image_filename)
            kapture.io.features.image_keypoints_to_file(
                filepath=keypoint_filpath,
                image_keypoints=opensfm_image_keypoints)
            # register the file
            kapture_keypoints.add(image_filename)

            # convert descriptors file
            descriptor_filpath = kapture.io.features.get_features_fullpath(
                data_type=kapture.Descriptors,
                kapture_dirpath=kapture_rootdir,
                image_filename=image_filename)
            kapture.io.features.image_descriptors_to_file(
                filepath=descriptor_filpath,
                image_descriptors=opensfm_image_descriptors)
            # register the file
            kapture_descriptors.add(image_filename)

    # import matches
    kapture_matches = kapture.Matches()
    opensfm_matches_suffix = '_matches.pkl.gz'
    opensfm_matches_dirpath = path.join(opensfm_rootdir, 'matches')
    if path.isdir(opensfm_matches_dirpath):
        logger.info('importing matches ...')
        opensfm_matches_file_list = (path.join(
            dp, fn) for dp, _, fs in os.walk(opensfm_matches_dirpath)
                                     for fn in fs)
        opensfm_matches_file_list = (
            filepath for filepath in opensfm_matches_file_list
            if filepath.endswith(opensfm_matches_suffix))

        for opensfm_matches_filename in tqdm(opensfm_matches_file_list,
                                             disable=disable_tqdm):
            image_filename_1 = path.relpath(
                opensfm_matches_filename,
                opensfm_matches_dirpath)[:-len(opensfm_matches_suffix)]
            logger.debug(f'parsing mathes in {image_filename_1}')
            with gzip.open(opensfm_matches_filename, 'rb') as f:
                opensfm_matches = pickle.load(f)
                for image_filename_2, opensfm_image_matches in opensfm_matches.items(
                ):
                    image_pair = (image_filename_1, image_filename_2)
                    # register the pair to kapture
                    kapture_matches.add(*image_pair)
                    # convert the bin file to kapture
                    kapture_matches_filepath = kapture.io.features.get_matches_fullpath(
                        image_filename_pair=image_pair,
                        kapture_dirpath=kapture_rootdir)
                    kapture_image_matches = np.hstack([
                        opensfm_image_matches.astype(np.float64),
                        # no macthes scoring = assume all to one
                        np.ones(shape=(opensfm_image_matches.shape[0], 1),
                                dtype=np.float64)
                    ])
                    kapture.io.features.image_matches_to_file(
                        kapture_matches_filepath, kapture_image_matches)

    # import 3-D points
    if 'points' in opensfm_reconstruction:
        logger.info('importing points 3-D')
        opensfm_points = opensfm_reconstruction['points']
        points_data = []
        for point_id in sorted(opensfm_points):
            point_data = opensfm_points[point_id]
            point_data = point_data['coordinates'] + point_data['color']
            points_data.append(point_data)
        kapture_points = kapture.Points3d(points_data)
    else:
        kapture_points = None

    # saving kapture csv files
    logger.info('saving kapture files')
    kapture_data = kapture.Kapture(sensors=kapture_sensors,
                                   records_camera=kapture_images,
                                   records_gnss=kapture_gnss,
                                   trajectories=kapture_trajectories,
                                   keypoints=kapture_keypoints,
                                   descriptors=kapture_descriptors,
                                   matches=kapture_matches,
                                   points3d=kapture_points)
    kapture.io.csv.kapture_to_dir(dirpath=kapture_rootdir,
                                  kapture_data=kapture_data)
コード例 #9
0
def import_bundler(
        bundler_path: str,
        image_list_path: str,
        image_dir_path: str,
        kapture_dir_path: str,
        ignore_trajectories: bool,
        add_reconstruction: bool,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports bundler data and save them as kapture.

    :param bundler_path: path to the bundler model file
    :param image_list_path: path to the file containing the list of image names
    :param image_dir_path: input path to bundler image directory.
    :param kapture_dir_path: path to kapture top directory
    :param ignore_trajectories: if True, will not import the trajectories
    :param add_reconstruction: if True, will create 3D points and observations
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path,
                                  force_erase=force_overwrite_existing)

    logger.info('loading all content...')
    # if there is a filter list, parse it
    with open(image_list_path) as file:
        file_content = file.readlines()
    # remove end line char and empty lines
    image_list = [line.rstrip() for line in file_content if line != '\n']

    with open(bundler_path) as file:
        bundler_content = file.readlines()
    # remove end line char and empty lines
    bundler_content = [
        line.rstrip() for line in bundler_content if line != '\n'
    ]
    assert bundler_content[0] == "# Bundle file v0.3"
    # <num_cameras> <num_points>
    line_1 = bundler_content[1].split()
    number_of_cameras = int(line_1[0])
    number_of_points = int(line_1[1])
    offset = 2
    number_of_lines_per_camera = 5  # 1 camera + 3 rotation + 1 translation

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories() if not ignore_trajectories else None
    points3d = [] if add_reconstruction else None
    keypoints = kapture.Keypoints('sift', np.float32,
                                  2) if add_reconstruction else None
    observations = kapture.Observations() if add_reconstruction else None
    image_mapping = []  # bundler camera_id -> (name, width, height)
    for i in range(0, number_of_cameras):
        start_index = i * number_of_lines_per_camera + offset
        file_name = image_list[i]

        # process camera info
        line_camera = bundler_content[start_index].split()
        focal_length = float(line_camera[0])
        k1 = float(line_camera[1])
        k2 = float(line_camera[2])

        # lazy open
        with Image.open(path.join(image_dir_path, file_name)) as im:
            width, height = im.size

        image_mapping.append((file_name, width, height))
        camera = kapture.Camera(
            MODEL,
            [width, height, focal_length, width / 2, height / 2, k1, k2])
        camera_id = f'sensor{i}'
        cameras[camera_id] = camera

        # process extrinsics
        rotation_matrix = [[float(v) for v in line.split()]
                           for line in bundler_content[start_index +
                                                       1:start_index + 4]]

        quaternion_wxyz = quaternion.from_rotation_matrix(rotation_matrix)
        translation = np.array(
            [float(v) for v in bundler_content[start_index + 4].split()])
        pose = kapture.PoseTransform(quaternion_wxyz, translation)

        # The Bundler model uses a coordinate system that differs from the *computer vision camera
        #  coordinate system*. More specifically, they use the camera coordinate system typically used
        #  in *computer graphics*. In this camera coordinate system, the camera is looking down the
        #  `-z`-axis, with the `x`-axis pointing to the right and the `y`-axis pointing upwards.
        # rotation Pi around the x axis to get the *computer vision camera
        #  coordinate system*
        rotation_around_x = quaternion.quaternion(0.0, 1.0, 0.0, 0.0)
        transformation = kapture.PoseTransform(rotation_around_x,
                                               np.array([0, 0, 0]))

        images[(i, camera_id)] = file_name
        if trajectories is not None:
            # transformation.inverse() is equal to transformation (rotation around -Pi or Pi around X is the same)
            trajectories[(i, camera_id)] = kapture.PoseTransform.compose(
                [transformation, pose, transformation])

    if points3d is not None and number_of_points > 0:
        assert keypoints is not None
        assert observations is not None
        offset += number_of_cameras * number_of_lines_per_camera
        number_of_lines_per_point = 3  # position color viewlist

        # (image_name, bundler_keypoint_id ) -> keypoint_id
        known_keypoints = {}
        local_keypoints = {}

        for i in range(0, number_of_points):
            start_index = i * number_of_lines_per_point + offset
            position = [float(v) for v in bundler_content[start_index].split()]
            # apply transformation
            position = [position[0], -position[1], -position[2]]
            color = [
                float(v) for v in bundler_content[start_index + 1].split()
            ]

            # <view list>: length of the list + [<camera> <key> <x> <y>]
            # x, y origin is the center of the image
            view_list = bundler_content[start_index + 2].split()
            number_of_observations = int(view_list[0])

            for j in range(number_of_observations):
                camera_id = int(view_list[1 + 4 * j + 0])
                keypoint_id = int(view_list[1 + 4 * j + 1])
                x = float(view_list[1 + 4 * j + 2])
                y = float(view_list[1 + 4 * j + 3])

                file_name, width, height = image_mapping[camera_id]
                # put (0,0) in upper left corner
                x += (width / 2)
                y += (height / 2)

                # init local_keypoints if needed
                if file_name not in local_keypoints:
                    local_keypoints[file_name] = []
                # do not add the same keypoint twice
                if (file_name, keypoint_id) not in known_keypoints:
                    # in the kapture format, keypoint id is different. Note that it starts from 0
                    known_keypoints[(file_name, keypoint_id)] = len(
                        local_keypoints[file_name])
                    local_keypoints[file_name].append([x, y])
                keypoint_idx = known_keypoints[(file_name, keypoint_id)]
                observations.add(i, file_name, keypoint_idx)
            points3d.append(position + color)
        points3d = np.array(points3d)

        # finally, convert local_keypoints to np.ndarray and add them to the global keypoints variable
        keypoints = kapture.Keypoints('sift', np.float32, 2)
        for image_filename, keypoints_array in local_keypoints.items():
            keypoints_np_array = np.array(keypoints_array).astype(np.float32)
            keypoints_out_path = kapture.io.features.get_keypoints_fullpath(
                kapture_dir_path, image_filename)
            kapture.io.features.image_keypoints_to_file(
                keypoints_out_path, keypoints_np_array)
            keypoints.add(image_filename)

    if points3d is not None:
        points3d = kapture.Points3d(points3d)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(image_dir_path, kapture_dir_path,
                                     filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras,
                                       records_camera=images,
                                       trajectories=trajectories,
                                       points3d=points3d,
                                       keypoints=keypoints,
                                       observations=observations)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #10
0
def import_image_list(images_list_filenames: List[str],
                      images_dirpath: str,
                      kapture_path: str,
                      force_overwrite_existing: bool = False,
                      images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports the list of images to a kapture. This creates only images and cameras.

    :param images_list_filenames: list of text files containing image file names
    :param images_dirpath: path to images directory.
    :param kapture_path: path to kapture root directory.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    assert isinstance(images_list_filenames, list)
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()

    offset = 0
    logger.info('starting conversion...')
    for images_list_filename in images_list_filenames:
        logger.info(f'loading {images_list_filename}')
        with open(images_list_filename) as file:
            images_list = file.readlines()
            # remove end line char and empty lines
            images_list = [line.rstrip() for line in images_list if line != '\n']

            for i in range(0, len(images_list)):
                line = images_list[i].split()
                image_file_name = line[0]
                if len(line) > 1:
                    model = line[1]
                    model_params = line[2:]
                else:
                    model = kapture.CameraType.UNKNOWN_CAMERA.value
                    try:
                        # lazy open
                        with Image.open(path.join(images_dirpath, image_file_name)) as im:
                            width, height = im.size
                            model_params = [width, height]
                    except (OSError, PIL.UnidentifiedImageError):
                        # It is not a valid image: skip it
                        logger.info(f'Skipping invalid image file {image_file_name}')
                        continue

                camera_id = f'sensor{i + offset}'
                cameras[camera_id] = kapture.Camera(model, model_params)
                images[(i + offset, camera_id)] = image_file_name
            offset += len(images_list)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(images_dirpath, kapture_path, filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
コード例 #11
0
ファイル: import_opensfm.py プロジェクト: zsqiang001/kapture
def import_opensfm(
        opensfm_root_dir: str,
        kapture_root_dir: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.copy) -> None:
    """
    Convert an openSfM structure to a kapture on disk. Also copy, move or link the images files if necessary.

    :param opensfm_root_dir: the openSfM top directory
    :param kapture_root_dir: top directory of kapture created
    :param force_overwrite_existing: if true, will remove existing kapture data without prompting the user
    :param images_import_method: action to apply on images: link, copy, move or do nothing.
    :return: the constructed kapture object
    """
    disable_tqdm = logger.getEffectiveLevel() != logging.INFO
    # load reconstruction
    opensfm_reconstruction_filepath = path.join(opensfm_root_dir,
                                                'reconstruction.json')
    with open(opensfm_reconstruction_filepath, 'rt') as f:
        opensfm_reconstruction = json.load(f)
    # remove the single list @ root
    opensfm_reconstruction = opensfm_reconstruction[0]

    # prepare space for output
    os.makedirs(kapture_root_dir, exist_ok=True)
    delete_existing_kapture_files(kapture_root_dir,
                                  force_erase=force_overwrite_existing)

    # import cameras
    kapture_sensors = kapture.Sensors()
    assert 'cameras' in opensfm_reconstruction
    # import cameras
    for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items(
    ):
        camera = import_camera(osfm_camera, name=osfm_camera_id)
        kapture_sensors[osfm_camera_id] = camera

    # import shots
    logger.info('importing images and trajectories ...')
    kapture_images = kapture.RecordsCamera()
    kapture_trajectories = kapture.Trajectories()
    opensfm_image_dir_path = path.join(opensfm_root_dir, 'images')
    assert 'shots' in opensfm_reconstruction
    image_timestamps, image_sensors = {}, {
    }  # used later to retrieve the timestamp of an image.
    for timestamp, (image_filename, shot) in enumerate(
            opensfm_reconstruction['shots'].items()):
        sensor_id = shot['camera']
        image_timestamps[image_filename] = timestamp
        image_sensors[image_filename] = sensor_id
        # in OpenSfm, (sensor, timestamp) is not unique.
        rotation_vector = shot['rotation']
        q = quaternion.from_rotation_vector(rotation_vector)
        translation = shot['translation']
        # capture_time = shot['capture_time'] # may be invalid
        # gps_position = shot['gps_position']
        kapture_images[timestamp, sensor_id] = image_filename
        kapture_trajectories[timestamp,
                             sensor_id] = kapture.PoseTransform(r=q,
                                                                t=translation)

    # copy image files
    filename_list = [f for _, _, f in kapture.flatten(kapture_images)]
    import_record_data_from_dir_auto(
        source_record_dirpath=opensfm_image_dir_path,
        destination_kapture_dirpath=kapture_root_dir,
        filename_list=filename_list,
        copy_strategy=images_import_method)

    # Imports Gnss
    kapture_gnss = _import_gnss(opensfm_root_dir, kapture_sensors,
                                image_sensors, image_timestamps, disable_tqdm)
    # Imports descriptors, keypoints and matches
    kapture_descriptors, kapture_keypoints, kapture_matches = _import_features_and_matches(
        opensfm_root_dir, kapture_root_dir, disable_tqdm)

    # import 3-D points
    if 'points' in opensfm_reconstruction:
        logger.info('importing points 3-D')
        opensfm_points = opensfm_reconstruction['points']
        points_data = []
        for point_id in sorted(opensfm_points):
            point_data = opensfm_points[point_id]
            point_data = point_data['coordinates'] + point_data['color']
            points_data.append(point_data)
        kapture_points = kapture.Points3d(points_data)
    else:
        kapture_points = None

    # saving kapture csv files
    logger.info('saving kapture files')
    kapture_data = kapture.Kapture(sensors=kapture_sensors,
                                   records_camera=kapture_images,
                                   records_gnss=kapture_gnss,
                                   trajectories=kapture_trajectories,
                                   keypoints=kapture_keypoints,
                                   descriptors=kapture_descriptors,
                                   matches=kapture_matches,
                                   points3d=kapture_points)
    kapture_to_dir(kapture_root_dir, kapture_data)
コード例 #12
0
def import_12scenes(d12scenes_path: str,
                    kapture_dir_path: str,
                    force_overwrite_existing: bool = False,
                    images_import_method: TransferAction = TransferAction.skip,
                    partition: Optional[str] = None
                    ) -> None:
    """
    Imports RGB-D Dataset 12-Scenes dataset and save them as kapture.

    :param d12scenes_path: path to the 12scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d12images_path = os.path.join(d12scenes_path, 'data')
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d12images_path))
                     for dp, _, fs in os.walk(d12images_path) for fn in fs)

    logger.info('populating 12-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = int(file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d12scenes_path, 'split.txt')
        if not path.isfile(partition_filepath):
            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')

        with open(partition_filepath, 'rt') as file:
            # note from dsac++; the first sequence is used for testing, everything else for training
            d7s_split_exp = r'^sequence(?P<sequence>\d+) \[frames=(?P<count>\d+)\]  \[start=(?P<start_frame>\d+) ;' \
                            r' end=(?P<end_frame>\d+)\]$'
            d7s_split_re = re.compile(d7s_split_exp)
            split_sequences = [re.match(d7s_split_re, line) for line in file.readlines()]
            if len(split_sequences) < 1 or not split_sequences[0]:
                raise ValueError('failed to parse split.txt file')
            test_split = (int(split_sequences[0].group('start_frame')), int(split_sequences[0].group('end_frame')))

            # filter out
            if partition == "query":
                shots = {frame: shot
                         for frame, shot in shots.items()
                         if test_split[0] <= frame <= test_split[1]
                         }
            elif partition == "mapping":
                shots = {frame: shot
                         for frame, shot in shots.items()
                         if frame < test_split[0] or frame > test_split[1]
                         }
            else:
                raise ValueError('invalid partition name')

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 12scenes sequence is valid.')

    # eg. shots['000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename
        kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg'  # kapture depth files are not png
        depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d12images_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        with open(pose_filepath, 'r') as file:
            if 'INF' in file.read():
                timestamp = shot['timestamp']
                image_name = shot['color']
                logger.debug(f'ts={timestamp}, name={image_name}: ignored inf pose')
                continue
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    Read info.txt
    """
    info_filepath = path.join(d12scenes_path, 'info.txt')
    if not path.isfile(info_filepath):
        raise FileNotFoundError(f'info file is missing: {info_filepath}.')

    with open(info_filepath, 'rt') as file:
        info_dict = {}
        for line in file.readlines():
            line_splits = line.rstrip().split(' = ')
            info_dict[line_splits[0]] = line_splits[1]

    sensors = kapture.Sensors()
    camera_type = kapture.CameraType.PINHOLE
    assert 'm_calibrationColorIntrinsic' in info_dict
    assert 'm_colorWidth' in info_dict
    assert 'm_colorHeight' in info_dict
    rgb_intrinsics = [float(v) for v in info_dict['m_calibrationColorIntrinsic'].split(' ')]
    # w, h, fx, fy, cx, cy
    rgb_camera_params = [int(info_dict['m_colorWidth']), int(info_dict['m_colorHeight']),
                         rgb_intrinsics[0], rgb_intrinsics[5], rgb_intrinsics[2], rgb_intrinsics[6]]
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=rgb_camera_params
    )

    assert 'm_calibrationDepthIntrinsic' in info_dict
    assert 'm_depthWidth' in info_dict
    assert 'm_depthHeight' in info_dict
    depth_intrinsics = [float(v) for v in info_dict['m_calibrationDepthIntrinsic'].split(' ')]
    # w, h, fx, fy, cx, cy
    depth_camera_params = [int(info_dict['m_depthWidth']), int(info_dict['m_depthHeight']),
                           depth_intrinsics[0], depth_intrinsics[5], depth_intrinsics[2], depth_intrinsics[6]]
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=depth_camera_params,
        sensor_type='depth'
    )

    sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera(
        name=REG_DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=rgb_camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d12images_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        if '.reg' in depth_map_filename:
            continue
        depth_map_filepath_12scenes = path.join(d12images_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_12scenes))
        # depth maps is in mm in 12scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)
        # register depth to rgb
        reg_depth_map = register_depth(get_K(camera_type, depth_camera_params), get_K(camera_type, rgb_camera_params),
                                       np.eye(4), depth_map, rgb_camera_params[0], rgb_camera_params[1])
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #13
0
def import_nvm(nvm_file_path: str,
               nvm_images_path: str,
               kapture_path: str,
               filter_list_path: Optional[str],
               ignore_trajectories: bool,
               add_reconstruction: bool,
               force_overwrite_existing: bool = False,
               images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports nvm data to kapture format.

    :param nvm_file_path: path to nvm file
    :param nvm_images_path: path to NVM images directory.
    :param kapture_path: path to kapture root directory.
    :param filter_list_path: path to the optional file containing a list of images to process
    :param ignore_trajectories: if True, will not create trajectories
    :param add_reconstruction: if True, will add observations, keypoints and 3D points.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """

    # TODO implement [optional calibration]
    # doc : http://ccwu.me/vsfm/doc.html#nvm
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)

    logger.info('loading all content...')
    # if there is a filter list, parse it
    # keep it as Set[str] to easily find images
    if filter_list_path:
        with open(filter_list_path) as file:
            file_content = file.readlines()
        # remove end line char and empty lines
        filter_list = {line.rstrip() for line in file_content if line != '\n'}
    else:
        filter_list = None

    # now do the nvm
    with open(nvm_file_path) as file:
        nvm_content = file.readlines()
    # remove end line char and empty lines
    nvm_content = [line.rstrip() for line in nvm_content if line != '\n']
    # only NVM_V3 is supported
    assert nvm_content[0] == "NVM_V3"
    # offset represents the line pointer
    offset = 1
    # camera_id_offset keeps tracks of used camera_id in case of multiple reconstructed models
    camera_id_offset = 0
    # point_id_offset keeps tracks of used point_id in case of multiple reconstructed models
    point_id_offset = 0

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories() if not ignore_trajectories else None
    observations = kapture.Observations() if add_reconstruction else None if add_reconstruction else None
    keypoints = kapture.Keypoints('sift', np.float32, 2) if add_reconstruction else None
    points3d = [] if add_reconstruction else None

    # break if number of cameras == 0 or reached end of file
    while True:
        # <Model1> <Model2> ...
        # Each reconstructed <model> contains the following
        # <Number of cameras> <List of cameras>
        # <Number of 3D points> <List of points>
        # In practice,
        # <Number of cameras>
        # <List of cameras>, one per line
        # <Number of 3D points>
        # <List of points>, one per line
        number_of_cameras = int(nvm_content[offset])
        offset += 1
        if number_of_cameras == 0:  # a line with <0> signify the end of models
            break

        logger.debug('importing model cameras...')
        # parse all cameras for current model
        image_idx_to_image_name = parse_cameras(number_of_cameras,
                                                nvm_content,
                                                offset,
                                                camera_id_offset,
                                                filter_list,
                                                nvm_images_path,
                                                cameras,
                                                images,
                                                trajectories)
        offset += number_of_cameras
        camera_id_offset += number_of_cameras

        # parse all points3d
        number_of_points = int(nvm_content[offset])
        offset += 1
        if points3d is not None and number_of_points > 0:
            assert keypoints is not None
            assert observations is not None
            logger.debug('importing model points...')
            parse_points3d(kapture_path,
                           number_of_points,
                           nvm_content,
                           offset,
                           point_id_offset,
                           image_idx_to_image_name,
                           filter_list,
                           points3d,
                           keypoints,
                           observations)

        point_id_offset += number_of_points
        offset += number_of_points
        # reached end of file?
        if offset >= len(nvm_content):
            break

    # do not export values if none were found.
    if points3d is not None:
        points3d = kapture.Points3d(points3d)

    # import (copy) image files.
    logger.info('import image files ...')
    images_filenames = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(nvm_images_path, kapture_path, images_filenames, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories,
                                       points3d=points3d, keypoints=keypoints, observations=observations)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
コード例 #14
0
def import_idl_dataset_cvpr17(idl_dataset_path: str,
                              gt_path: Union[str, None],
                              kapture_path: str,
                              force_overwrite_existing: bool = False,
                              images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Reads the IDL dataset and copy it to a kapture.

    :param idl_dataset_path: path to the IDL dataset
    :param gt_path: ground truth data path
    :param kapture_path: path to the kapture top directory to create
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """

    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories()

    file_list = [os.path.relpath(os.path.join(dirpath, filename), idl_dataset_path)
                 for dirpath, dirs, filenames in os.walk(idl_dataset_path)
                 for filename in filenames]
    file_list = sorted(file_list)

    logger.info('starting conversion...')
    for n, filename in enumerate(file_list):
        # test if file is a valid image
        try:
            # lazy load
            with Image.open(path.join(idl_dataset_path, filename)) as im:
                width, height = im.size
                model_params = [width, height]
        except Exception:
            continue

        camera_id = f'sensor{n}'
        images[(n, camera_id)] = path_secure(filename)  # don't forget windows
        model = kapture.CameraType.UNKNOWN_CAMERA
        if gt_path is not None:
            # replace image extension with .camera
            file_gt_path = os.path.splitext(os.path.join(gt_path, filename))[0] + ".camera"

            if os.path.isfile(file_gt_path):
                with open(file_gt_path) as fin:
                    lines = fin.readlines()
                    lines = (line.rstrip().split() for line in lines)  # split fields
                    lines = list(lines)
                fx = float(lines[0][0])
                cx = float(lines[0][2])
                fy = float(lines[1][1])
                cy = float(lines[1][2])
                width_file = float(lines[8][0])
                height_file = float(lines[8][1])
                assert (width_file == width)
                assert (height_file == height)
                model = kapture.CameraType.PINHOLE
                model_params = [width, height, fx, fy, cx, cy]

                rotation_matrix = [[float(v) for v in line] for line in lines[4:7]]
                rotation = quaternion.from_rotation_matrix(rotation_matrix)
                center_of_projection = [float(v) for v in lines[7]]
                pose = kapture.PoseTransform(rotation, center_of_projection).inverse()
                trajectories[(n, camera_id)] = pose
        cameras[camera_id] = kapture.Camera(model, model_params)

    # if no trajectory were added, no need to create the file
    if not trajectories:
        trajectories = None

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(idl_dataset_path, kapture_path, filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
コード例 #15
0
def import_robotcar_seasons(
        robotcar_path: str,
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip,
        skip_reconstruction: bool = False,
        rig_collapse: bool = False,
        use_colmap_intrinsics: bool = False,
        import_v1: bool = False) -> None:
    """
    Read the RobotCar Seasons data, creates several kaptures with training and query data.
    :param robotcar_path: path to the robotcar top directory
    :param kapture_path: path to the kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param skip_reconstruction: if True, will skip the reconstruction part from the training data
    :param rig_collapse: if True, will collapse the rig
    """

    kapture_path = path.join(kapture_path, "base")
    os.makedirs(kapture_path, exist_ok=True)

    cameras = import_robotcar_cameras(path.join(robotcar_path, 'intrinsics'))
    rigs = import_robotcar_rig(path.join(robotcar_path, 'extrinsics'))

    logger.info("Importing test data")
    # Test data
    image_pattern = re.compile(
        r'(?P<condition>.+)/(?P<camera>\w+)/(?P<timestamp>\d+)\.jpg')
    queries_path = path.join(robotcar_path, '3D-models', 'individual',
                             'queries_per_location')
    kapture_imported_query = {}
    for root, dirs, files in os.walk(queries_path):
        for query_file in files:
            records_camera = kapture.RecordsCamera()
            # Get list of query images
            with open(path.join(queries_path, query_file)) as f:
                for line in f:
                    matches = image_pattern.match(line)
                    image_path = line.strip()
                    if not matches:
                        logger.warning(f"Error matching line in {image_path}")
                        continue
                    matches = matches.groupdict()
                    timestamp = int(matches['timestamp'])
                    camera = str(matches['camera'])
                    condition = str(matches['condition'])
                    records_camera[timestamp, camera] = image_path

                (query_name, _) = query_file.split('.')
                kapture_test = kapture.Kapture(sensors=cameras,
                                               rigs=rigs,
                                               records_camera=records_camera)
                kapture_imported_query[int(
                    query_name.split('_')[-1])] = kapture_test

    # Reference map data
    logger.info("Importing reference map")
    colmap_reconstructions_path = path.join(robotcar_path, '3D-models',
                                            'individual',
                                            'colmap_reconstructions')
    kapture_imported_mapping = {}
    for root, dirs, files in os.walk(colmap_reconstructions_path):
        for colmap_reconstruction in dirs:
            (loc_id, _) = colmap_reconstruction.split('_')
            kapture_reconstruction_dir = path.join(kapture_path,
                                                   f"{int(loc_id):02d}",
                                                   "mapping")
            delete_existing_kapture_files(kapture_reconstruction_dir,
                                          force_erase=force_overwrite_existing)
            logger.info(f'Converting reconstruction {loc_id} to kapture  ...')
            kapture_reconstruction_data = import_robotcar_colmap_location(
                robotcar_path,
                path.join(colmap_reconstructions_path, colmap_reconstruction),
                kapture_reconstruction_dir, rigs, skip_reconstruction)
            # replace intrinsics with the ones found in the text files
            if not use_colmap_intrinsics:
                kapture_reconstruction_data.sensors = cameras
            kapture_imported_mapping[int(loc_id)] = kapture_reconstruction_data

    if not import_v1:
        queries_per_location = {
            image_name: (ts, cam_id, loc_id)
            for loc_id, kdata_test in kapture_imported_query.items() for ts,
            cam_id, image_name in kapture.flatten(kdata_test.records_camera)
        }
        kapture_imported_training = {}  # stores kapture for each submap
        # read robotcar_v2_train.txt
        v2_train_data = read_robotcar_v2_train(robotcar_path)
        for image_name, pose in v2_train_data.items():
            ts, cam_id, loc_id = queries_per_location[image_name]
            assert cam_id == 'rear'
            # create kapture object for submap if it doesn't exist
            if loc_id not in kapture_imported_training:
                kapture_loc_id = kapture.Kapture(sensors=cameras, rigs=rigs)
                kapture_loc_id.records_camera = kapture.RecordsCamera()
                kapture_loc_id.trajectories = kapture.Trajectories()
                kapture_imported_training[loc_id] = kapture_loc_id
            kapture_imported_training[loc_id].records_camera[
                ts, cam_id] = image_name
            kapture_imported_training[loc_id].trajectories[ts, cam_id] = pose
            matches = image_pattern.match(image_name)
            if not matches:
                logger.warning(f"Error matching line in {image_name}")
                continue
            matches = matches.groupdict()
            condition = str(matches['condition'])
            timestamp = str(matches['timestamp'])
            camera = str(matches['camera'])
            # added left and right images in records_camera
            left_image_name = condition + '/' + 'left' + '/' + timestamp + '.jpg'
            right_image_name = condition + '/' + 'right' + '/' + timestamp + '.jpg'
            kapture_imported_training[loc_id].records_camera[
                ts, 'left'] = left_image_name
            kapture_imported_training[loc_id].records_camera[
                ts, 'right'] = right_image_name

            # remove entries from query
            del kapture_imported_query[loc_id].records_camera[ts][cam_id]
            del kapture_imported_query[loc_id].records_camera[ts]['left']
            del kapture_imported_query[loc_id].records_camera[ts]['right']
            del kapture_imported_query[loc_id].records_camera[ts]

        # all remaining query images are kept; reading robotcar_v2_test.txt is not necessary

    # apply rig collapse
    if rig_collapse:
        logger.info('replacing camera poses with rig poses.')
        for kdata_mapping in kapture_imported_mapping.values():
            kapture.rigs_recover_inplace(kdata_mapping.trajectories, rigs,
                                         'rear')
        for kdata_training in kapture_imported_training.values():
            kapture.rigs_recover_inplace(kdata_training.trajectories, rigs,
                                         'rear')

    # IO operations
    robotcar_image_path = path.join(robotcar_path, "images")
    for loc_id, kdata_query in kapture_imported_query.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing test data: {loc_id_str}')
        kapture_test_dir = path.join(kapture_path, loc_id_str, "query")
        delete_existing_kapture_files(kapture_test_dir,
                                      force_erase=force_overwrite_existing)
        if not kdata_query.records_camera:  # all images were removed
            continue
        kapture_to_dir(kapture_test_dir, kdata_query)
        query_images = [
            f for _, _, f in kapture.flatten(kdata_query.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path, kapture_test_dir,
                                         query_images, images_import_method)

    for loc_id, kdata_mapping in kapture_imported_mapping.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing mapping data: {loc_id_str}')
        kapture_reconstruction_dir = path.join(kapture_path, f"{loc_id:02d}",
                                               "mapping")
        delete_existing_kapture_files(kapture_reconstruction_dir,
                                      force_erase=force_overwrite_existing)
        kapture_to_dir(kapture_reconstruction_dir, kdata_mapping)
        mapping_images = [
            f for _, _, f in kapture.flatten(kdata_mapping.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_reconstruction_dir,
                                         mapping_images, images_import_method)

    for loc_id, kdata_training in kapture_imported_training.items():
        loc_id_str = f"{loc_id:02d}"
        logger.info(f'writing training data: {loc_id_str}')
        kapture_training_dir = path.join(kapture_path, f"{loc_id:02d}",
                                         "training")
        delete_existing_kapture_files(kapture_training_dir,
                                      force_erase=force_overwrite_existing)
        kapture_to_dir(kapture_training_dir, kdata_training)
        mapping_images = [
            f for _, _, f in kapture.flatten(kdata_training.records_camera)
        ]
        import_record_data_from_dir_auto(robotcar_image_path,
                                         kapture_training_dir, mapping_images,
                                         images_import_method)
コード例 #16
0
def import_virtual_gallery(input_root_path: str,
                           configuration: str,
                           light_range: List[int],
                           loop_range: List[int],
                           camera_range: List[int],
                           occlusion_range: List[int],
                           as_rig: bool,
                           images_import_method: TransferAction,
                           kapture_path: str,
                           force_overwrite_existing: bool = False) -> None:
    """
    Creates a kapture with a virtual gallery.

    :param input_root_path: root path of virtual gallery
    :param configuration: training, testing or all (both)
    :param light_range: list of lights to include
    :param loop_range: list of training loops to include
    :param camera_range: list of training cameras to include
    :param occlusion_range: list of testing occlusion levels to include
    :param as_rig: in training trajectories, writes the position of the rig instead of individual cameras
    :param kapture_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    """
    # Check for existing files
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_overwrite_existing)

    offset = 0
    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories()
    rigs = kapture.Rigs()

    # Process all training data
    if configuration == "training" or configuration == "all":
        logger.info("Reading training files")
        camera_range_set = set(camera_range)
        training_intrinsics = import_training_intrinsics(input_root_path, light_range, loop_range, camera_range_set)
        training_extrinsics = import_training_extrinsics(input_root_path, light_range, loop_range, camera_range_set)

        convert_training_intrinsics(training_intrinsics, cameras)
        convert_training_extrinsics(offset, training_extrinsics, images, trajectories, as_rig)
        rigs.update(training_rig_config)

        offset += len(training_extrinsics)
    # Process all testing data
    if configuration == "testing" or configuration == "all":
        logger.info("Reading testing files")
        testing_intrinsics = import_testing_intrinsics(input_root_path, light_range, occlusion_range)
        testing_extrinsics = import_testing_extrinsics(input_root_path, light_range, occlusion_range)

        convert_testing_intrinsics(testing_intrinsics, cameras)
        convert_testing_extrinsics(offset, testing_extrinsics, images, trajectories)

        offset += len(testing_extrinsics)

    logger.info("Writing imported data to disk")
    kapture_data = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories, rigs=rigs or None)
    # import images
    image_list = [name for _, _, name in kapture.flatten(kapture_data.records_camera)]
    import_record_data_from_dir_auto(input_root_path, kapture_path, image_list, images_import_method)
    kapture_to_dir(kapture_path, kapture_data)
コード例 #17
0
def import_7scenes(
        d7scenes_path: str,
        kapture_dir_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path,
                                  force_erase=force_overwrite_existing)

    logger.info('loading all content ...')
    POSE_SUFFIX = 'pose'
    RGB_SUFFIX = 'color'
    DEPTH_SUFFIX = 'depth'
    CAMERA_ID = 'kinect'
    d7s_filename_re = re.compile(
        r'frame-(?P<timestamp>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate
    d7s_filenames = (path.basename(path.join(dp, fn))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)
    d7s_filenames = {
        filename: d7s_filename_re.match(filename).groupdict()
        for filename in d7s_filenames if d7s_filename_re.match(filename)
    }
    # d7s_filenames -> timestamp, suffix, ext
    if not d7s_filenames:
        raise ValueError(
            'no pose file found: make sure the path to 7scenes sequence is valid.'
        )

    # images
    logger.info('populating image files ...')
    d7s_filenames_images = ((int(v['timestamp']), filename)
                            for filename, v in d7s_filenames.items()
                            if v['suffix'] == RGB_SUFFIX)
    snapshots = kapture.RecordsCamera()
    for timestamp, image_filename in sorted(d7s_filenames_images):
        snapshots[timestamp, CAMERA_ID] = image_filename

    # poses
    logger.info('import poses files ...')
    d7s_filenames_poses = ((int(v['timestamp']), filename)
                           for filename, v in d7s_filenames.items()
                           if v['suffix'] == POSE_SUFFIX)
    trajectories = kapture.Trajectories()
    for timestamp, pose_filename in d7s_filenames_poses:
        pose_filepath = path.join(d7scenes_path, pose_filename)
        pose_mat = np.loadtxt(
            pose_filepath
        )  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat,
                                                    t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[timestamp, CAMERA_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the 
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used 
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).
    """
    sensors = kapture.Sensors()
    sensors[CAMERA_ID] = kapture.Camera(
        name='kinect',
        camera_type=kapture.CameraType.SIMPLE_PINHOLE,
        camera_params=[640, 480, 585, 320, 240]  # w, h, f, cx, cy
    )

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path,
                                     image_filenames, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(records_camera=snapshots,
                                       trajectories=trajectories,
                                       sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)