def compute_num_inliers(res_dict, cfg): '''Compile match numbers at different stages into the dictionary.''' # if cfg.method_dict['config_{}_{}'.format(cfg.dataset, # cfg.task)]['use_custom_matches']: # raise NotImplementedError( # 'Probably read only once? What to do with runs?') # Load pre-computed pairs with the new visibility criteria data_dir = get_data_path(cfg) pairs_per_th = get_pairs_per_threshold(data_dir) stereo_thresholds = list(pairs_per_th.keys()) epipolar_err_dict = {} # Load epipolar error file for th in [None] + stereo_thresholds: epipolar_err_dict['matcher'] = load_h5( get_stereo_epipolar_pre_match_file(cfg, th)) epipolar_err_dict['filter'] = load_h5( get_stereo_epipolar_refined_match_file(cfg, th)) epipolar_err_dict['geom'] = load_h5( get_stereo_epipolar_final_match_file(cfg, th)) for key_stage, values1 in epipolar_err_dict.items(): # Simply return average of all pairs num_matches = [] for key, values2 in values1.items(): num_matches.append(len(values2)) # Save the number of inliers vis_label = '' if th is None else '_th_{}'.format(th) res_dict['num_matches_{}{}'.format(key_stage, vis_label)] = float( np.mean(num_matches) if len(num_matches) > 0 else 0)
def compute_matching_scores_depth_projection(res_dict, cfg): '''Compute matching scores (with depth) and add them to the dictionary.''' px_th_list = cfg.matching_score_and_repeatability_px_threshold # Load pre-computed pairs with the new visibility criteria data_dir = get_data_path(cfg) pairs_per_th = get_pairs_per_threshold(data_dir) stereo_thresholds = list(pairs_per_th.keys()) reprojection_err_dict = {} # Load epipolar error file for th in [None] + stereo_thresholds: reprojection_err_dict['pre_match'] = load_h5( get_stereo_depth_projection_pre_match_file(cfg, th)) reprojection_err_dict['refined_match'] = load_h5( get_stereo_depth_projection_refined_match_file(cfg, th)) reprojection_err_dict['final_match'] = load_h5( get_stereo_depth_projection_final_match_file(cfg, th)) for key_stage, values1 in reprojection_err_dict.items(): acc = [] for px_th in px_th_list: ms = [] # Simply return average of all pairs for key, values2 in values1.items(): if len(values2) > 0: ms += [np.mean(values2 < px_th)] else: ms += [0] acc += [float(np.mean(ms) if len(ms) > 0 else 0)] # Now compute average number of keypoints vis_label = '' if th is None else '_th_{}'.format(th) res_dict['matching_scores_depth_projection_{}{}'.format( key_stage, vis_label)] = acc
def compute_repeatability(res_dict, cfg): '''Compute repeatability and add it to the dictionary.''' px_th_list = cfg.matching_score_and_repeatability_px_threshold # Load pre-computed pairs with the new visibility criteria data_dir = get_data_path(cfg) pairs_per_th = get_pairs_per_threshold(data_dir) stereo_thresholds = list(pairs_per_th.keys()) # Load epipolar error file for th in [None] + stereo_thresholds: ms_list_list = [[] for i in range(len(px_th_list))] repeatability_dict = load_h5(get_repeatability_score_file(cfg, th)) for key, values in repeatability_dict.items(): # Simply return average of all pairs for idx in range(len(px_th_list)): ms_list_list[idx] += [values[idx]] # Now compute average number of keypoints acc = [] for px_th, ms_list in zip(px_th_list, ms_list_list): acc += [float(np.mean(ms_list) if len(ms_list) > 0 else 0)] vis_label = '' if th is None else '_th_{}'.format(th) res_dict['repeatability{}'.format(vis_label)] = acc
def main(cfg): '''Main function to compute matches. Parameters ---------- cfg: Namespace Configurations for running this part of the code. ''' if os.path.exists(get_match_file(cfg)): print(' -- already exists, skipping match computation') return # Get data directory data_dir = get_data_path(cfg) # Load pre-computed pairs with the new visibility criteria print('Reading list of all possible pairs') pairs = get_pairs_per_threshold(data_dir)['0.0'] print('{} pre-computed pairs'.format(len(pairs))) # Load descriptors descriptors_dict = load_h5(get_desc_file(cfg)) keypoints_dict = load_h5(get_kp_file(cfg)) # Feature Matching print('Computing matches') num_cores = cfg.num_opencv_threads if cfg.num_opencv_threads > 0 else int( len(os.sched_getaffinity(0)) * 0.9) if WITH_FAISS: num_cores = min(4, num_cores) result = Parallel(n_jobs=num_cores)( delayed(compute_matches)(np.asarray(descriptors_dict[pair.split( '-')[0]]), np.asarray(descriptors_dict[pair.split( '-')[1]]), cfg, np.asarray(keypoints_dict[pair.split( '-')[0]]), np.asarray(keypoints_dict[pair.split('-')[1]])) for pair in tqdm(pairs)) # Make match dictionary matches_dict = {} timings_list = [] for i, pair in enumerate(pairs): matches_dict[pair] = result[i][0] timings_list.append(result[i][1]) # Check match directory if not os.path.exists(get_match_path(cfg)): os.makedirs(get_match_path(cfg)) # Finally save packed matches save_h5(matches_dict, get_match_file(cfg)) # Save computational cost save_h5({'cost': np.mean(timings_list)}, get_match_cost_file(cfg)) print('Matching cost (averaged over image pairs): {:0.2f} sec'.format( np.mean(timings_list)))
def compute_qt_auc(res_dict, deprecated_images, cfg): '''Compute pose accuracy (stereo) and add it to the dictionary.''' # Load pre-computed pairs with the new visibility criteria data_dir = get_data_path(cfg) pairs_per_th = get_pairs_per_threshold(data_dir) stereo_thresholds = list(pairs_per_th.keys()) # Load pose error for stereo for th in [None] + stereo_thresholds: pose_err_dict = load_h5_valid_image( get_stereo_pose_file(cfg, th) ,deprecated_images) # Gather err_q, err_t err_qt = [] for key, value in pose_err_dict.items(): err_qt += [value] if len(err_qt) > 0: err_qt = np.asarray(err_qt) # Take the maximum among q and t errors err_qt = np.max(err_qt, axis=1) # Convert to degree err_qt = err_qt * 180.0 / np.pi # Make infs to a large value so that np.histogram can be used. err_qt[err_qt == np.inf] = 1e6 # Create histogram bars = np.arange(11) qt_hist, _ = np.histogram(err_qt, bars) # Normalize histogram with all possible pairs num_pair = float(len(err_qt)) qt_hist = qt_hist.astype(float) / num_pair # Make cumulative qt_acc = np.cumsum(qt_hist) else: qt_acc = [0] * 10 # Save to dictionary label = '' if th is None else '_th_{}'.format(th) res_dict['qt_01_10{}'.format(label)] = qt_hist.tolist() res_dict['qt_auc_05{}'.format(label)] = np.mean(qt_acc[:5]) res_dict['qt_auc_10{}'.format(label)] = np.mean(qt_acc)
def is_stereo_complete(cfg): '''Checks if stereo evaluation is complete.''' # Load pre-computed pairs with the new visibility criteria data_dir = get_data_path(cfg) pairs_per_th = get_pairs_per_threshold(data_dir) # Check if all files exist files = [] for th in [None] + list(pairs_per_th.keys()): files += [get_stereo_pose_file(cfg, th)] any_missing = False for f in files: if not os.path.exists(f): any_missing = True break return not any_missing
def compute_matching_scores_epipolar(res_dict, cfg): '''Compute Matching Scores (with calib) and add them to the dictionary.''' # Load pre-computed pairs with the new visibility criteria data_dir = get_data_path(cfg) pairs_per_th = get_pairs_per_threshold(data_dir) stereo_thresholds = list(pairs_per_th.keys()) epipolar_err_dict = {} # Load epipolar error file values = {} for th in [None] + stereo_thresholds: # Init empty list epipolar_err_dict['pre_match'] = load_h5( get_stereo_epipolar_pre_match_file(cfg, th)) epipolar_err_dict['refined_match'] = load_h5( get_stereo_epipolar_refined_match_file(cfg, th)) epipolar_err_dict['final_match'] = load_h5( get_stereo_epipolar_final_match_file(cfg, th)) for key_stage, values1 in epipolar_err_dict.items(): if key_stage not in values: values[key_stage] = [] # Simply return average of all pairs ms_list = [] for key, values2 in values1.items(): if len(values2) > 0: ms_list += [ np.mean( values2 < cfg.matching_score_epipolar_threshold) ] else: ms_list += [0] # Now compute average number of keypoints vis_label = '' if th is None else '_th_{}'.format(th) values[key_stage].append( float(np.mean(ms_list) if len(ms_list) > 0 else 0))
def main(cfg): '''Main function to compute model. Parameters ---------- cfg: Namespace Configurations for running this part of the code. ''' if os.path.exists(get_geom_file(cfg)): print(' -- already exists, skipping model computation') return # Get data directory keypoints_dict = load_h5(get_kp_file(cfg)) # Load keypoints and matches matches_dict = load_h5(get_filter_match_file_for_computing_model(cfg)) # Feature Matching print('Computing model') num_cores = cfg.num_opencv_threads if cfg.num_opencv_threads > 0 else int( len(os.sched_getaffinity(0)) * 0.9) # Load camera information data_dir = get_data_path(cfg) images_list = get_fullpath_list(data_dir, 'images') image_names = get_item_name_list(images_list) calib_list = get_fullpath_list(data_dir, 'calibration') calib_dict = load_calib(calib_list) pairs_per_th = get_pairs_per_threshold(data_dir) # Get data directory try: desc_dict = defaultdict(list) desc_dict = load_h5(get_desc_file(cfg)) for k, v in desc_dict.items(): desc_dict[k] = v except Exception: desc_dict = defaultdict(list) try: aff_dict = defaultdict(list) aff_dict1 = load_h5(get_affine_file(cfg)) for k, v in aff_dict1.items(): aff_dict[k] = v except Exception: aff_dict = defaultdict(list) try: ori_dict = defaultdict(list) ori_dict1 = load_h5(get_angle_file(cfg)) for k, v in ori_dict1.items(): ori_dict[k] = v except Exception: ori_dict = defaultdict(list) try: scale_dict = defaultdict(list) scale_dict1 = load_h5(get_scale_file(cfg)) for k, v in scale_dict1.items(): scale_dict[k] = v except Exception: scale_dict = defaultdict(list) random.shuffle(pairs_per_th['0.0']) result = Parallel(n_jobs=num_cores)(delayed(compute_model)( cfg, np.asarray(matches_dict[pair]), np.asarray(keypoints_dict[pair.split('-')[0]]), np.asarray(keypoints_dict[pair.split('-')[1]]), calib_dict[pair.split( '-')[0]], calib_dict[pair.split('-')[1]], images_list[ image_names.index(pair.split('-')[0])], images_list[ image_names.index(pair.split('-')[1])], np.asarray(scale_dict[pair.split('-')[0]]), np.asarray(scale_dict[pair.split('-')[1]]), np.asarray(ori_dict[pair.split('-')[0]]), np.asarray(ori_dict[pair.split('-')[1]]), np.asarray(aff_dict[pair.split('-')[0]]), np.asarray(aff_dict[pair.split('-')[1]]), np.asarray(desc_dict[pair.split('-')[0]]), np.asarray(desc_dict[pair.split('-')[1]])) for pair in tqdm(pairs_per_th['0.0'])) # Make model dictionary model_dict = {} inl_dict = {} timings_list = [] for i, pair in enumerate(pairs_per_th['0.0']): model_dict[pair] = result[i][0] inl_dict[pair] = result[i][1] timings_list.append(result[i][2]) # Check model directory if not os.path.exists(get_geom_path(cfg)): os.makedirs(get_geom_path(cfg)) # Finally save packed models save_h5(model_dict, get_geom_file(cfg)) save_h5(inl_dict, get_geom_inl_file(cfg)) # Save computational cost save_h5({'cost': np.mean(timings_list)}, get_geom_cost_file(cfg)) print('Geometry cost (averaged over image pairs): {:0.2f} sec'.format( np.mean(timings_list)))
def main(cfg): '''Visualization of stereo keypoints and matches. Parameters ---------- cfg: Namespace Configurations for running this part of the code. ''' # Files should not be named to prevent (easy) abuse # Instead we use 0, ..., cfg.num_viz_stereo_pairs viz_folder_hq, viz_folder_lq = get_stereo_viz_folder(cfg) print(' -- Visualizations, stereo: "{}/{}"'.format(cfg.dataset, cfg.scene)) t_start = time() # Load deprecated images list deprecated_images_all = load_json(cfg.json_deprecated_images) if cfg.dataset in deprecated_images_all and cfg.scene in deprecated_images_all[ cfg.dataset]: deprecated_images = deprecated_images_all[cfg.dataset][cfg.scene] else: deprecated_images = [] # Load keypoints, matches and errors keypoints_dict = load_h5_valid_image(get_kp_file(cfg), deprecated_images) matches_dict = load_h5_valid_image(get_match_file(cfg), deprecated_images) ransac_inl_dict = load_h5_valid_image(get_geom_inl_file(cfg), deprecated_images) # Hacky: We need to recompute the errors, loading only for the keys data_dir = get_data_path(cfg) pairs_all = get_pairs_per_threshold(data_dir)['0.1'] pairs = [] for pair in pairs_all: if all([key not in deprecated_images for key in pair.split('-')]): pairs += [pair] # Create results folder if it does not exist if not os.path.exists(viz_folder_hq): os.makedirs(viz_folder_hq) if not os.path.exists(viz_folder_lq): os.makedirs(viz_folder_lq) # Sort alphabetically and pick different images sorted_keys = sorted(pairs) picked = [] pairs = [] for pair in sorted_keys: fn1, fn2 = pair.split('-') if fn1 not in picked and fn2 not in picked: picked += [fn1, fn2] pairs += [pair] if len(pairs) == cfg.num_viz_stereo_pairs: break # Load depth maps depth = {} if cfg.dataset != 'googleurban': for pair in pairs: files = pair.split('-') for f in files: if f not in depth: depth[f] = load_depth( os.path.join(data_dir, 'depth_maps', '{}.h5'.format(f))) # Generate and save the images for i, pair in enumerate(pairs): # load metadata fn1, fn2 = pair.split('-') calib_dict = load_calib([ os.path.join(data_dir, 'calibration', 'calibration_{}.h5'.format(fn1)), os.path.join(data_dir, 'calibration', 'calibration_{}.h5'.format(fn2)) ]) calc1 = calib_dict[fn1] calc2 = calib_dict[fn2] inl = ransac_inl_dict[pair] # Get depth for keypoints kp1 = keypoints_dict[fn1] kp2 = keypoints_dict[fn2] # Normalize keypoints kp1n = normalize_keypoints(kp1, calc1['K']) kp2n = normalize_keypoints(kp2, calc2['K']) # Get {R, t} from calibration information R_1, t_1 = calc1['R'], calc1['T'].reshape((3, 1)) R_2, t_2 = calc2['R'], calc2['T'].reshape((3, 1)) # Compute dR, dt dR = np.dot(R_2, R_1.T) dT = t_2 - np.dot(dR, t_1) if cfg.dataset == 'phototourism': kp1_int = np.round(kp1).astype(int) kp2_int = np.round(kp2).astype(int) kp1_int[:, 1] = np.clip(kp1_int[:, 1], 0, depth[fn1].shape[0] - 1) kp1_int[:, 0] = np.clip(kp1_int[:, 0], 0, depth[fn1].shape[1] - 1) kp2_int[:, 1] = np.clip(kp2_int[:, 1], 0, depth[fn2].shape[0] - 1) kp2_int[:, 0] = np.clip(kp2_int[:, 0], 0, depth[fn2].shape[1] - 1) d1 = np.expand_dims(depth[fn1][kp1_int[:, 1], kp1_int[:, 0]], axis=-1) d2 = np.expand_dims(depth[fn2][kp2_int[:, 1], kp2_int[:, 0]], axis=-1) # Project with depth kp1n_p, kp2n_p = get_projected_kp(kp1n, kp2n, d1, d2, dR, dT) kp1_p = unnormalize_keypoints(kp1n_p, calc2['K']) kp2_p = unnormalize_keypoints(kp2n_p, calc1['K']) # Re-index keypoints from matches kp1_inl = kp1[inl[0]] kp2_inl = kp2[inl[1]] kp1_p_inl = kp1_p[inl[0]] kp2_p_inl = kp2_p[inl[1]] kp1n_inl = kp1n[inl[0]] kp2n_inl = kp2n[inl[1]] kp1n_p_inl = kp1n_p[inl[0]] kp2n_p_inl = kp2n_p[inl[1]] d1_inl = d1[inl[0]] d2_inl = d2[inl[1]] # Filter out keypoints with invalid depth nonzero_index = np.nonzero(np.squeeze(d1_inl * d2_inl)) zero_index = np.where(np.squeeze(d1_inl * d2_inl) == 0)[0] kp1_inl_nonzero = kp1_inl[nonzero_index] kp2_inl_nonzero = kp2_inl[nonzero_index] kp1_p_inl_nonzero = kp1_p_inl[nonzero_index] kp2_p_inl_nonzero = kp2_p_inl[nonzero_index] kp1n_inl_nonzero = kp1n_inl[nonzero_index] kp2n_inl_nonzero = kp2n_inl[nonzero_index] kp1n_p_inl_nonzero = kp1n_p_inl[nonzero_index] kp2n_p_inl_nonzero = kp2n_p_inl[nonzero_index] # Compute symmetric distance using the depth image d = get_truesym(kp1_inl_nonzero, kp2_inl_nonzero, kp1_p_inl_nonzero, kp2_p_inl_nonzero) else: # All points are valid for computing the epipolar distance. zero_index = [] # Compute symmetric epipolar distance for every match. kp1_inl_nonzero = kp1[inl[0]] kp2_inl_nonzero = kp2[inl[1]] kp1n_inl_nonzero = kp1n[inl[0]] kp2n_inl_nonzero = kp2n[inl[1]] # d = np.zeros(inl.shape[1]) d = get_episym(kp1n_inl_nonzero, kp2n_inl_nonzero, dR, dT) # canvas im, v_offset, h_offset = build_composite_image( os.path.join( data_dir, 'images', fn1 + ('.png' if cfg.dataset == 'googleurban' else '.jpg')), os.path.join( data_dir, 'images', fn2 + ('.png' if cfg.dataset == 'googleurban' else '.jpg')), margin=5, axis=1 if (not cfg.viz_composite_vert or cfg.dataset == 'googleurban' or cfg.dataset == 'pragueparks') else 0) plt.figure(figsize=(10, 10)) plt.imshow(im) linewidth = 2 # Plot matches on points without depth for idx in range(len(zero_index)): plt.plot( (kp1_inl[idx, 0] + h_offset[0], kp2_inl[idx, 0] + h_offset[1]), (kp1_inl[idx, 1] + v_offset[0], kp2_inl[idx, 1] + v_offset[1]), color='b', linewidth=linewidth) # Plot matches # Points are normalized by the focals, which are on average ~670. max_dist = 5 if cfg.dataset == 'googleurban': max_dist = 2e-4 if cfg.dataset == 'pragueparks': max_dist = 2e-4 cmap = matplotlib.cm.get_cmap('summer') order = list(range(len(d))) random.shuffle(order) for idx in order: if d[idx] <= max_dist: min_val = 0 max_val = 255 - min_val col = cmap( int(max_val * (1 - (max_dist - d[idx]) / max_dist) + min_val)) # col = cmap(255 * (max_dist - d[idx]) / max_dist) else: col = 'r' plt.plot((kp1_inl_nonzero[idx, 0] + h_offset[0], kp2_inl_nonzero[idx, 0] + h_offset[1]), (kp1_inl_nonzero[idx, 1] + v_offset[0], kp2_inl_nonzero[idx, 1] + v_offset[1]), color=col, linewidth=linewidth) plt.tight_layout() plt.axis('off') viz_file_hq = os.path.join(viz_folder_hq, '{:05d}.png'.format(i)) viz_file_lq = os.path.join(viz_folder_lq, '{:05d}.jpg'.format(i)) plt.savefig(viz_file_hq, bbox_inches='tight') # Convert with imagemagick os.system('convert -quality 75 -resize \"500>\" {} {}'.format( viz_file_hq, viz_file_lq)) plt.close() print('Done [{:.02f} s.]'.format(time() - t_start))
def main(cfg): '''Main function to compute matches. Parameters ---------- cfg: Namespace Configurations for running this part of the code. ''' # Get data directory data_dir = get_data_path(cfg) # Load pre-computed pairs with the new visibility criteria pairs_per_th = get_pairs_per_threshold(data_dir) # Check if all files exist if is_stereo_complete(cfg): print(' -- already exists, skipping stereo eval') return # Load keypoints and matches keypoints_dict = load_h5(get_kp_file(cfg)) matches_dict = load_h5(get_match_file(cfg)) geom_dict = load_h5(get_geom_file(cfg)) geom_inl_dict = load_h5(get_geom_inl_file(cfg)) filter_matches_dict = load_h5(get_filter_match_file(cfg)) # Load visiblity and images images_list = get_fullpath_list(data_dir, 'images') vis_list = get_fullpath_list(data_dir, 'visibility') if cfg.dataset != 'googleurban': depth_maps_list = get_fullpath_list(data_dir, 'depth_maps') image_names = get_item_name_list(images_list) # Load camera information calib_list = get_fullpath_list(data_dir, 'calibration') calib_dict = load_calib(calib_list) # Generate all possible pairs print('Generating list of all possible pairs') pairs = compute_image_pairs(vis_list, len(image_names), cfg.vis_th) print('Old pairs with the point-based visibility threshold: {} ' '(for compatibility)'.format(len(pairs))) for k, v in pairs_per_th.items(): print('New pairs at visibility threshold {}: {}'.format(k, len(v))) # Evaluate each stereo pair in parallel # Compute it for all pairs (i.e. visibility threshold 0) print('Compute stereo metrics for all pairs') #num_cores = int(multiprocessing.cpu_count() * 0.9) num_cores = int(len(os.sched_getaffinity(0)) * 0.9) result = Parallel(n_jobs=num_cores)(delayed(compute_stereo_metrics_from_E)( images_list[image_names.index(pair.split('-')[0])], images_list[ image_names.index(pair.split('-')[1])], depth_maps_list[image_names.index(pair.split('-')[0])] if cfg. dataset != 'googleurban' else None, depth_maps_list[image_names.index( pair.split('-')[1])] if cfg.dataset != 'googleurban' else None, np.asarray(keypoints_dict[pair.split('-')[0]]), np.asarray(keypoints_dict[pair.split('-')[1]]), calib_dict[pair.split( '-')[0]], calib_dict[pair.split('-') [1]], geom_dict[pair], matches_dict[pair], filter_matches_dict[pair], geom_inl_dict[pair], cfg) for pair in tqdm(pairs_per_th['0.0'])) # Convert previous visibility list to strings old_keys = [] for pair in pairs: old_keys.append('{}-{}'.format(image_names[pair[0]], image_names[pair[1]])) # Extract scores, err_q, err_t from results all_keys = pairs_per_th['0.0'] err_dict, rep_s_dict = {}, {} geo_s_dict_pre_match, geo_s_dict_refined_match, \ geo_s_dict_final_match = {}, {}, {} true_s_dict_pre_match, true_s_dict_refined_match, \ true_s_dict_final_match = {}, {}, {} for i in range(len(result)): if all_keys[i] in old_keys: if result[i][5]: geo_s_dict_pre_match[ all_keys[i]] = result[i][0][0] if result[i][0] else None geo_s_dict_refined_match[ all_keys[i]] = result[i][0][1] if result[i][0] else None geo_s_dict_final_match[ all_keys[i]] = result[i][0][2] if result[i][0] else None true_s_dict_pre_match[ all_keys[i]] = result[i][1][0] if result[i][1] else None true_s_dict_refined_match[ all_keys[i]] = result[i][1][1] if result[i][1] else None true_s_dict_final_match[ all_keys[i]] = result[i][1][2] if result[i][1] else None err_q = result[i][2] err_t = result[i][3] rep_s_dict[all_keys[i]] = result[i][4] err_dict[all_keys[i]] = [err_q, err_t] print('Aggregating results for the old visibility constraint: ' '{}/{}'.format(len(geo_s_dict_pre_match), len(result))) # Repeat with the new visibility threshold err_dict_th, rep_s_dict_th = {}, {} geo_s_dict_pre_match_th, geo_s_dict_refined_match_th, \ geo_s_dict_final_match_th = {}, {}, {} true_s_dict_pre_match_th, true_s_dict_refined_match_th, \ true_s_dict_final_match_th = {}, {}, {} for th, cur_pairs in pairs_per_th.items(): _err_dict, _rep_s_dict = {}, {} _geo_s_dict_pre_match, _geo_s_dict_refined_match, \ _geo_s_dict_final_match = {}, {}, {} _true_s_dict_pre_match, _true_s_dict_refined_match, \ _true_s_dict_final_match = {}, {}, {} for i in range(len(all_keys)): if len(cur_pairs) > 0 and all_keys[i] in cur_pairs: if result[i][5]: _geo_s_dict_pre_match[all_keys[ i]] = result[i][0][0] if result[i][0] else None _geo_s_dict_refined_match[all_keys[ i]] = result[i][0][1] if result[i][0] else None _geo_s_dict_final_match[all_keys[ i]] = result[i][0][2] if result[i][0] else None _true_s_dict_pre_match[all_keys[ i]] = result[i][1][0] if result[i][1] else None _true_s_dict_refined_match[all_keys[ i]] = result[i][1][1] if result[i][1] else None _true_s_dict_final_match[all_keys[ i]] = result[i][1][2] if result[i][1] else None err_q = result[i][2] err_t = result[i][3] _rep_s_dict[ all_keys[i]] = result[i][4] if result[i][4] else None _err_dict[all_keys[i]] = [err_q, err_t] geo_s_dict_pre_match_th[th] = _geo_s_dict_pre_match geo_s_dict_refined_match_th[th] = _geo_s_dict_refined_match geo_s_dict_final_match_th[th] = _geo_s_dict_final_match true_s_dict_pre_match_th[th] = _true_s_dict_pre_match true_s_dict_refined_match_th[th] = _true_s_dict_refined_match true_s_dict_final_match_th[th] = _true_s_dict_final_match err_dict_th[th] = _err_dict rep_s_dict_th[th] = _rep_s_dict print('Aggregating results for threshold "{}": {}/{}'.format( th, len(geo_s_dict_pre_match_th[th]), len(result))) # Create results folder if it does not exist if not os.path.exists(get_stereo_path(cfg)): os.makedirs(get_stereo_path(cfg)) # Finally, save packed scores and errors if cfg.dataset != 'googleurban': save_h5(geo_s_dict_pre_match, get_stereo_epipolar_pre_match_file(cfg)) save_h5(geo_s_dict_refined_match, get_stereo_epipolar_refined_match_file(cfg)) save_h5(geo_s_dict_final_match, get_stereo_epipolar_final_match_file(cfg)) save_h5(true_s_dict_pre_match, get_stereo_depth_projection_pre_match_file(cfg)) save_h5(true_s_dict_refined_match, get_stereo_depth_projection_refined_match_file(cfg)) save_h5(true_s_dict_final_match, get_stereo_depth_projection_final_match_file(cfg)) save_h5(rep_s_dict, get_repeatability_score_file(cfg)) save_h5(err_dict, get_stereo_pose_file(cfg)) for th in pairs_per_th: if cfg.dataset != 'googleurban': save_h5(geo_s_dict_pre_match_th[th], get_stereo_epipolar_pre_match_file(cfg, th)) save_h5(geo_s_dict_refined_match_th[th], get_stereo_epipolar_refined_match_file(cfg, th)) save_h5(geo_s_dict_final_match_th[th], get_stereo_epipolar_final_match_file(cfg, th)) save_h5(true_s_dict_pre_match_th[th], get_stereo_depth_projection_pre_match_file(cfg, th)) save_h5(true_s_dict_refined_match_th[th], get_stereo_depth_projection_refined_match_file(cfg, th)) save_h5(true_s_dict_final_match_th[th], get_stereo_depth_projection_final_match_file(cfg, th)) save_h5(rep_s_dict_th[th], get_repeatability_score_file(cfg, th)) save_h5(err_dict_th[th], get_stereo_pose_file(cfg, th))