def get_interpolated_pose(kdata_map: kapture.Kapture, kdata_query: kapture.Kapture, weights: Dict[str, List[Tuple[str, float]]]): """ compute the approximated pose for all query images given the precomputed weights :param kdata_map: map images + their poses as kapture data :type kdata_map: kapture.Kapture :param kdata_query: query images as kapture data :type kdata_query: kapture.Kapture :param weights: weights for the pose interpolation :type weights: Dict[str, List[Tuple[str, float]]] """ output_trajectories = kapture.Trajectories() assert kdata_map.trajectories is not None assert kdata_map.records_camera is not None reverse_map_records_camera = {image_name: (timestamp, sensor_id) for timestamp, sensor_id, image_name in kapture.flatten(kdata_map.records_camera)} if kdata_map.rigs is not None: input_trajectories = kapture.rigs_remove(kdata_map.trajectories, kdata_map.rigs) else: input_trajectories = kdata_map.trajectories assert kdata_query.records_camera is not None reverse_query_records_camera = {image_name: (timestamp, sensor_id) for timestamp, sensor_id, image_name in kapture.flatten(kdata_query.records_camera)} for query_image_name, weighted_map_images in weights.items(): pose_inv_list = [input_trajectories[reverse_map_records_camera[name]].inverse() for name, _ in weighted_map_images] weight_list = [w for _, w in weighted_map_images] output_trajectories[reverse_query_records_camera[query_image_name]] = average_pose_transform_weighted( pose_inv_list, weight_list ).inverse() return output_trajectories
def test_rig_remove(self): rigs = kapture.Rigs() rigs['rig0', 'cam0'] = kapture.PoseTransform(r=[1, 0, 0, 0], t=[100, 0, 0]) rigs['rig0', 'cam1'] = kapture.PoseTransform(r=[1, 0, 0, 0], t=[-100, 0, 0]) trajectories = kapture.Trajectories() trajectories[0, 'rig0'] = kapture.PoseTransform(r=[1, 0, 0, 0], t=[0, 0, 0]) trajectories[1, 'rig0'] = kapture.PoseTransform(r=[1, 0, 0, 0], t=[0, 0, 10]) trajectories[2, 'rig0'] = kapture.PoseTransform(r=[1, 0, 0, 0], t=[0, 0, 20]) trajectories_ = kapture.rigs_remove(trajectories, rigs) # timestamps should be unchanged self.assertEqual(trajectories_.keys(), trajectories.keys()) self.assertNotEqual(trajectories_.key_pairs(), trajectories.key_pairs()) self.assertEqual(len(trajectories_.key_pairs()), len(trajectories.key_pairs()) * len(rigs.key_pairs())) self.assertIn((0, 'cam0'), trajectories_.key_pairs()) self.assertIn((0, 'cam1'), trajectories_.key_pairs()) self.assertIn((2, 'cam0'), trajectories_.key_pairs()) self.assertIn((2, 'cam1'), trajectories_.key_pairs()) self.assertAlmostEqual(trajectories_[2, 'cam1'].t_raw, [-100.0, 0.0, 20.0]) self.assertAlmostEqual(trajectories_[2, 'cam1'].r_raw, [1.0, 0.0, 0.0, 0.0])
def get_poses( k_data: kapture.Kapture, image_set: Union[Set[str], List[str]] ) -> List[Tuple[str, kapture.PoseTransform]]: """ Computes the poses for a set of images within a kapture. :param k_data: the kapture :param image_set: set of image names :return: list of (image name,pose) """ assert k_data.trajectories is not None if isinstance(image_set, list): image_set = set(image_set) assert isinstance(image_set, set) assert isinstance(k_data, kapture.Kapture) # apply rigs to trajectories if k_data.rigs is not None: trajectories = kapture.rigs_remove(k_data.trajectories, k_data.rigs) else: trajectories = k_data.trajectories poses = [] for timestamp, device_id, filename in kapture.flatten( k_data.records_camera, is_sorted=True): if filename in image_set and (timestamp, device_id) in trajectories: pose = trajectories[(timestamp, device_id)] poses.append((filename, pose)) return poses
def test_rig_remove_inplace_consistency(self): # compare inplace and not inplace trajectories_inplace = deepcopy(self._trajectories_rigs) rigs_inplace = deepcopy(self._rigs) kapture.rigs_remove_inplace(trajectories_inplace, rigs_inplace) trajectories_not_inplace = kapture.rigs_remove(self._trajectories_rigs, self._rigs) self.assertTrue(equal_trajectories(trajectories_inplace, trajectories_not_inplace)) # make sure rigs are untouched. self.assertTrue(equal_rigs(rigs_inplace, self._rigs))
def test_rig_remove(self): trajectories_wo_rigs = kapture.rigs_remove(self._trajectories_rigs, self._rigs) self.assertTrue( equal_trajectories(trajectories_wo_rigs, self._trajectories_cams))
def get_pairs_distance(kdata: kapture.Kapture, kdata_query: kapture.Kapture, topk: Optional[int], min_distance: float, max_distance: float, max_angle: float, keep_rejected: bool): """ get pairs as list from distance """ if kdata.rigs is None: map_trajectories = kdata.trajectories else: map_trajectories = kapture.rigs_remove(kdata.trajectories, kdata.rigs) imgs_map = [(img, map_trajectories[ts, sensor_id].inverse()) for ts, sensor_id, img in kapture.flatten(kdata.records_camera) if (ts, sensor_id) in map_trajectories] if kdata_query.rigs is None: query_trajectories = kdata_query.trajectories else: query_trajectories = kapture.rigs_remove(kdata_query.trajectories, kdata_query.rigs) imgs_query = [ (img, query_trajectories[ts, sensor_id].inverse()) for ts, sensor_id, img in kapture.flatten(kdata_query.records_camera) if (ts, sensor_id) in query_trajectories ] positions_scores = get_position_diff(imgs_query, imgs_map) rotation_scores = get_rotations_diff(imgs_query, imgs_map) # is_rejected = (distance < min_distance or distance > max_distance or rotation_distance > max_angle) ones = np.ones(positions_scores.shape) invalid = (positions_scores < (ones * min_distance)) | \ (positions_scores > (ones * max_distance)) | \ (rotation_scores > (ones * max_angle)) score = (ones * 2.0) - \ (np.minimum(positions_scores, ones * max_distance)/max_distance + np.minimum(rotation_scores, ones * max_angle)/max_angle) similarity_dict = {} for i, line in enumerate(score): scores = line indexes = np.argsort(-scores) query_name = imgs_query[i][0] pairs = [] k = 0 for j in indexes: if topk is not None and k >= topk: break if not keep_rejected and invalid[i, j]: continue map_name = imgs_map[j][0] if query_name == map_name: continue pairs.append((map_name, scores[j])) k += 1 similarity_dict[query_name] = pairs image_pairs = [] for query_image_name, images_to_match in sorted(similarity_dict.items()): for mapping_image_name, score in images_to_match: image_pairs.append([query_image_name, mapping_image_name, score]) return image_pairs
def pose_approximation_from_pairsfile(input_path: str, pairsfile_path: str, output_path: str, query_path: Optional[str], topk: Optional[int], method: str, additional_parameters: dict, force: bool): """ localize from pairsfile """ os.makedirs(output_path, exist_ok=True) delete_existing_kapture_files(output_path, force_erase=force) logger.info(f'pose_approximation. loading mapping: {input_path}') kdata = kapture_from_dir(input_path, None, skip_list=[ kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures, kapture.Matches, kapture.Points3d, kapture.Observations ]) if query_path is not None: logger.info(f'pose_approximation. loading query: {query_path}') kdata_query = kapture_from_dir(query_path, skip_list=[ kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures, kapture.Matches, kapture.Points3d, kapture.Observations ]) else: kdata_query = kdata logger.info(f'pose_approximation. loading pairs: {pairsfile_path}') similarity_dict = get_ordered_pairs_from_file(pairsfile_path, kdata_query.records_camera, kdata.records_camera, topk) query_images = set(similarity_dict.keys()) kdata_result = kapture.Kapture(sensors=kapture.Sensors(), records_camera=kapture.RecordsCamera(), trajectories=kapture.Trajectories()) for timestamp, cam_id, image_name in kapture.flatten( kdata_query.records_camera): if image_name not in query_images: continue if cam_id not in kdata_result.sensors: kdata_result.sensors[cam_id] = kdata_query.sensors[cam_id] kdata_result.records_camera[(timestamp, cam_id)] = image_name if kdata.rigs is None: map_trajectories = kdata.trajectories else: map_trajectories = kapture.rigs_remove(kdata.trajectories, kdata.rigs) training_trajectories_reversed = { image_name: map_trajectories[(timestamp, cam_id)] for timestamp, cam_id, image_name in kapture.flatten( kdata.records_camera) if (timestamp, cam_id) in map_trajectories } records_camera_reversed = { image_name: (timestamp, cam_id) for timestamp, cam_id, image_name in kapture.flatten( kdata_result.records_camera) } for image_name, similar_images in similarity_dict.items(): pose_inv_list = [ training_trajectories_reversed[k].inverse() for k, _ in similar_images ] timestamp = records_camera_reversed[image_name][0] cam_id = records_camera_reversed[image_name][1] if method == 'equal_weighted_barycenter': weight_list = [ 1.0 / len(pose_inv_list) for _ in range(len(pose_inv_list)) ] else: assert 'alpha' in additional_parameters alpha = additional_parameters['alpha'] weights = np.zeros((len(pose_inv_list), )) for i, (_, score) in enumerate(similar_images): weights[i] = score weights[:] = weights[:]**(alpha) weights[:] = weights[:] / np.sum(weights[:]) weight_list = weights.tolist() final_pose = average_pose_transform_weighted(pose_inv_list, weight_list).inverse() kdata_result.trajectories[(timestamp, cam_id)] = final_pose kapture_to_dir(output_path, kdata_result) logger.info('all done')
def evaluate_command_line() -> None: """ Do the evaluation using the parameters given on the command line. """ parser = argparse.ArgumentParser( description='Evaluation script for kapture data.') parser_verbosity = parser.add_mutually_exclusive_group() parser_verbosity.add_argument( '-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO, action=kapture.utils.logging.VerbosityParser, help= 'verbosity level (debug, info, warning, critical, ... or int value) [warning]' ) parser_verbosity.add_argument('-q', '--silent', '--quiet', action='store_const', dest='verbose', const=logging.CRITICAL) parser.add_argument( '-i', '--inputs', nargs='+', help= 'input path to kapture data root directory. You can compare multiple models' ) parser.add_argument( '--labels', nargs='+', default=[], help='labels for inputs. must be of same length as inputs') parser.add_argument( '-gt', '--ground-truth', required=True, help='input path to data ground truth root directory in kapture format' ) parser.add_argument('-o', '--output', help='output directory.', required=True) parser.add_argument( '-l', '--image-list', default="", help= 'optional, path to a text file containing the list of images to consider' ' (1 line per image or a pairsfile). if not present, all gt images are used' ) parser.add_argument( '--bins', nargs='+', default=["0.25 2", "0.5 5", "5 10"], help='the desired positions/rotations thresholds for bins' 'format is string : position_threshold_in_m space rotation_threshold_in_degree' ) parser.add_argument( '-p', '--plot-rotation-threshold', default=-1, type=float, help= 'rotation threshold for position error threshold plot. negative values -> ignore rotation' ) parser.add_argument('--plot-max', default=100, type=int, help='maximum distance in cm shown in plot') parser.add_argument('--plot-title', default="", help='title for position error threshold plot') parser.add_argument( '--plot-loc', default="best", choices=[ 'best', 'upper right', 'upper left', 'lower left', 'lower right', 'right', 'center left', 'center right', 'lower center', 'upper center', 'center' ], help='position of plot legend. loc param for plt.legend.') parser.add_argument('--plot-font-size', default=15, type=int, help='value for plt.rcParams[\'font.size\']') parser.add_argument('--plot-legend-font-size', default=8, type=int, help='value for plt.rcParams[\'legend.fontsize\']') parser.add_argument('-f', '-y', '--force', action='store_true', default=False, help='Force delete output directory if already exists') args = parser.parse_args() logger.setLevel(args.verbose) if args.verbose <= logging.DEBUG: # also let kapture express its logs kapture.utils.logging.getLogger().setLevel(args.verbose) kapture_localization.utils.logging.getLogger().setLevel(args.verbose) assert (len(args.inputs) > 0) if len(args.labels) == 0: args.labels = [f'input{i}' for i in range(1, len(args.inputs) + 1)] assert (len(args.labels) == len(args.inputs)) try: logger.debug(''.join( ['\n\t{:13} = {}'.format(k, v) for k, v in vars(args).items()])) os.makedirs(args.output, exist_ok=True) logger.debug('loading: {}'.format(args.inputs)) all_kapture_to_eval = [ csv.kapture_from_dir(folder) for folder in args.inputs ] logger.info('loading ground truth data') gt_kapture = csv.kapture_from_dir(args.ground_truth) assert gt_kapture.records_camera is not None assert gt_kapture.trajectories is not None if args.image_list: with open(args.image_list, 'r') as fid: table = table_from_file(fid) image_set = {line[0] for line in table} else: if gt_kapture.rigs is not None: gt_trajectories = kapture.rigs_remove(gt_kapture.trajectories, gt_kapture.rigs) else: gt_trajectories = gt_kapture.trajectories image_set = set(image_name for ts, sensor_id, image_name in kapture.flatten(gt_kapture.records_camera) if (ts, sensor_id) in gt_trajectories) if len(image_set) == 0: logger.info( 'image_set is empty, for some reason, I could not find images to evaluate' ) exit(0) results = [ evaluate(kapture_to_eval, gt_kapture, image_set) for kapture_to_eval in all_kapture_to_eval ] save_evaluation(results, args.output, args.labels, args.bins, args.plot_rotation_threshold, args.plot_max, args.plot_title, args.plot_loc, args.plot_font_size, args.plot_legend_font_size, args.force) except Exception as e: logger.critical(e) if args.verbose > 1: raise