Exemplo n.º 1
0
 def test_rig_remove_inplace(self):
     trajectories = deepcopy(self._trajectories_rigs)
     rigs = deepcopy(self._rigs)
     kapture.rigs_remove_inplace(trajectories, rigs)
     self.assertTrue(
         equal_trajectories(trajectories, self._trajectories_cams))
     self.assertTrue(equal_rigs(rigs, self._rigs))
Exemplo n.º 2
0
 def test_rig_remove_inplace_consistency(self):
     # compare inplace and not inplace
     trajectories_inplace = deepcopy(self._trajectories_rigs)
     rigs_inplace = deepcopy(self._rigs)
     kapture.rigs_remove_inplace(trajectories_inplace, rigs_inplace)
     trajectories_not_inplace = kapture.rigs_remove(self._trajectories_rigs, self._rigs)
     self.assertTrue(equal_trajectories(trajectories_inplace, trajectories_not_inplace))
     # make sure rigs are untouched.
     self.assertTrue(equal_rigs(rigs_inplace, self._rigs))
def compute_distance_pairs(mapping_path: str, query_path: Optional[str],
                           output_path: str, topk: int, block_size: int,
                           min_distance: float, max_distance: float,
                           max_angle: float, keep_rejected: bool):
    """
    compute image pairs from distance, and write the result in a text file
    """
    skip_heavy = [
        kapture.RecordsLidar, kapture.RecordsWifi, kapture.Keypoints,
        kapture.Descriptors, kapture.GlobalFeatures, kapture.Matches,
        kapture.Points3d, kapture.Observations
    ]

    logger.info(f'compute_distance_pairs. loading mapping: {mapping_path}')
    kdata = kapture_from_dir(mapping_path, skip_list=skip_heavy)
    assert kdata.sensors is not None
    assert kdata.records_camera is not None
    assert kdata.trajectories is not None

    if query_path is None or mapping_path == query_path:
        logger.info('computing mapping pairs from distance...')
        kdata_query = None
    else:
        logger.info('computing query pairs from distance...')
        kdata_query = kapture_from_dir(query_path, skip_list=skip_heavy)
        assert kdata_query.sensors is not None
        assert kdata_query.records_camera is not None
        assert kdata_query.trajectories is not None

    os.umask(0o002)
    p = pathlib.Path(output_path)
    os.makedirs(str(p.parent.resolve()), exist_ok=True)

    with open(output_path, 'w') as fid:
        if kdata_query is None:
            kdata_query = kdata
        if kdata_query.rigs is not None:
            assert kdata_query.trajectories is not None  # for ide
            kapture.rigs_remove_inplace(kdata_query.trajectories,
                                        kdata_query.rigs)
        records_camera_list = [
            k for k in sorted(kapture.flatten(kdata_query.records_camera),
                              key=lambda x: x[2])
        ]
        number_of_iteration = math.ceil(len(records_camera_list) / block_size)
        table_to_file(fid, [], header='# query_image, map_image, score')
        for i in tqdm(range(number_of_iteration),
                      disable=logging.getLogger().level >= logging.CRITICAL):
            sliced_records = kapture.RecordsCamera()
            for ts, sensor_id, img_name in records_camera_list[i *
                                                               block_size:(i +
                                                                           1) *
                                                               block_size]:
                if (ts, sensor_id) not in kdata_query.trajectories:
                    continue
                sliced_records[(ts, sensor_id)] = img_name
            kdata_slice_query = kapture.Kapture(
                sensors=kdata_query.sensors,
                records_camera=sliced_records,
                trajectories=kdata_query.trajectories)
            image_pairs = get_pairs_distance(kdata, kdata_slice_query, topk,
                                             min_distance, max_distance,
                                             max_angle, keep_rejected)
            table_to_file(fid, image_pairs)
    logger.info('all done')
def create_3D_model_from_depth_from_loaded_data(
        kdata: kapture.Kapture, input_path: str, tar_handlers: TarCollection,
        output_path: str, keypoints_type: Optional[str], depth_sensor_id: str,
        topk: int, method: Method, cellsizes: List[str], force: bool):
    """
    Create 3D model from a kapture dataset that has registered depth data
    Assumes the kapture data is already loaded
    """
    logger.info(f'create 3D model using depth data')

    if os.path.exists(output_path) and not force:
        print(f'outpath already exists, use --force to overwrite')
        return -1

    if kdata.rigs is not None:
        assert kdata.trajectories is not None
        kapture.rigs_remove_inplace(kdata.trajectories, kdata.rigs)

    if keypoints_type is None:
        keypoints_type = try_get_only_key_from_collection(kdata.keypoints)
    assert keypoints_type is not None
    assert kdata.keypoints is not None
    assert keypoints_type in kdata.keypoints

    if method == Method.voxelgrid:
        vg = VoxelGrid(cellsizes)

    # add all 3D points to map that correspond to a keypoint
    logger.info('adding points from scan to kapture')
    points3d = []
    observations = kapture.Observations()

    progress_bar = tqdm(total=len(
        list(kapture.flatten(kdata.records_camera, is_sorted=True))),
                        disable=logger.level >= logging.CRITICAL)
    for timestamp, sensor_id, sensing_filepath in kapture.flatten(
            kdata.records_camera, is_sorted=True):
        logger.info(
            f'total 3d points: {len(points3d)}, processing {sensing_filepath}')
        # check if images have a pose
        if timestamp not in kdata.trajectories:
            logger.info('{} does not have a pose. skipping ...'.format(
                sensing_filepath))
            continue

        # check if depth map exists
        depth_map_record = ''
        if timestamp in kdata.records_depth:
            if depth_sensor_id is None:
                depth_id = sensor_id + '_depth'
            else:
                depth_id = depth_sensor_id
            if depth_id in kdata.records_depth[timestamp]:
                depth_map_record = kdata.records_depth[timestamp][depth_id]
        depth_map_size = tuple(
            [int(x) for x in kdata.sensors[depth_id].camera_params[0:2]])
        depth_path = get_depth_map_fullpath(input_path, depth_map_record)
        if not os.path.exists(depth_path):
            logger.info('no 3D data found for {}. skipping ...'.format(
                sensing_filepath))
            continue
        depth_map = depth_map_from_file(depth_path, depth_map_size)
        img = Image.open(get_image_fullpath(input_path,
                                            sensing_filepath)).convert('RGB')

        assert img.size[0] == depth_map_size[0]
        assert img.size[1] == depth_map_size[1]

        kps_raw = load_keypoints(keypoints_type, input_path, sensing_filepath,
                                 kdata.keypoints[keypoints_type].dtype,
                                 kdata.keypoints[keypoints_type].dsize,
                                 tar_handlers)

        _, camera_sensor_C, camera_dist = get_camera_matrix_from_kapture(
            np.zeros((1, 0, 2), dtype=np.float64), kdata.sensors[sensor_id])
        cv2_keypoints, depth_sensor_C, depth_dist = get_camera_matrix_from_kapture(
            kps_raw, kdata.sensors[depth_id])
        assert np.isclose(depth_sensor_C, camera_sensor_C).all()
        assert np.isclose(depth_dist, camera_dist).all()

        if np.count_nonzero(camera_dist) > 0:
            epsilon = np.finfo(np.float64).eps
            stop_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                             cv2.TERM_CRITERIA_EPS, 500, epsilon)
            undistorted_cv2_keypoints = cv2.undistortPointsIter(
                cv2_keypoints,
                camera_sensor_C,
                camera_dist,
                R=None,
                P=camera_sensor_C,
                criteria=stop_criteria)
        else:
            undistorted_cv2_keypoints = cv2_keypoints

        cv2_keypoints = cv2_keypoints.reshape((kps_raw.shape[0], 2))
        undistorted_cv2_keypoints = undistorted_cv2_keypoints.reshape(
            (kps_raw.shape[0], 2))

        points3d_img = []
        rgb_img = []
        kp_idxs = []
        for idx_kp, kp in enumerate(cv2_keypoints[0:topk]):
            u = round(kp[0])
            v = round(kp[1])

            undist_kp = undistorted_cv2_keypoints[idx_kp]
            undist_u = round(undist_kp[0])
            undist_v = round(undist_kp[1])

            if u >= 0 and u < depth_map_size[
                    0] and v >= 0 and v < depth_map_size[1]:
                if depth_map[v, u] == 0:
                    continue
                pt3d = project_kp_to_3D(undist_u, undist_v, depth_map[v, u],
                                        depth_sensor_C[0,
                                                       2], depth_sensor_C[1,
                                                                          2],
                                        depth_sensor_C[0,
                                                       0], depth_sensor_C[1,
                                                                          1])
                points3d_img.append(pt3d)
                rgb_img.append(img.getpixel((u, v)))
                kp_idxs.append(idx_kp)
        # transform to world coordinates (pt3d from a depth map is in camera coordinates)
        # we use sensor_id here because we assume that the image and the corresponding depthmap have the same pose
        # and sometimes, the pose might only be provided for the images
        cam_to_world = kdata.trajectories[timestamp][sensor_id].inverse()
        if len(points3d_img) == 0:
            continue
        points3d_img = cam_to_world.transform_points(np.array(points3d_img))
        for idx_kp, pt3d, rgb in zip(kp_idxs, points3d_img, rgb_img):
            if not np.isnan(pt3d).any():
                # apply transform (alignment)
                if method == Method.voxelgrid:
                    assert vg is not None
                    if not vg.exists(pt3d):
                        # add 3D point
                        points3d.append(list(pt3d) + list(rgb))
                        # add observation
                        observations.add(
                            len(points3d) - 1, keypoints_type,
                            sensing_filepath, idx_kp)
                        vg.add(pt3d, len(points3d) - 1, sensing_filepath)
                    else:
                        ret = vg.append(pt3d, sensing_filepath)
                        if ret is not None:
                            observations.add(ret[0], keypoints_type,
                                             sensing_filepath, idx_kp)
                elif method == Method.all:
                    # add 3D point
                    points3d.append(list(pt3d) + list(rgb))
                    # add observation
                    observations.add(
                        len(points3d) - 1, keypoints_type, sensing_filepath,
                        idx_kp)
        # save_3Dpts_to_ply(points3d, os.path.join(output_path, 'map.ply'))
        progress_bar.update(1)
    progress_bar.close()

    kdata.points3d = kapture.Points3d(np.array(points3d))
    kdata.observations = observations

    logger.info('saving ...')
    kapture_to_dir(output_path, kdata)
    # save_3Dpts_to_ply(points3d, os.path.join(output_path, 'map.ply'))

    logger.info('all done')