def get_best_colmap_index(cfg): ''' Determines the colmap model with the most images if there is more than one. ''' colmap_output_path = get_colmap_output_path(cfg) # First find the colmap reconstruction with the most number of images. best_index, best_num_images = -1, 0 # Check all valid sub reconstructions. if os.path.exists(colmap_output_path): idx_list = [ _d for _d in os.listdir(colmap_output_path) if os.path.isdir(os.path.join(colmap_output_path, _d)) ] else: idx_list = [] for cur_index in idx_list: cur_output_path = os.path.join(colmap_output_path, cur_index) if os.path.isdir(cur_output_path): colmap_img_file = os.path.join(cur_output_path, 'images.bin') images_bin = read_images_binary(colmap_img_file) # Check validity if not is_colmap_img_valid(colmap_img_file): continue # Find the reconstruction with most number of images if len(images_bin) > best_num_images: best_index = int(cur_index) best_num_images = len(images_bin) return best_index
def compute_pose_error(cfg): ''' Computes the error using quaternions and translation vector for COLMAP ''' if os.path.exists(get_colmap_pose_file(cfg)): print(' -- already exists, skipping COLMAP eval') return # Load visiblity and images image_path_list = get_colmap_image_path_list(cfg) subset_index = get_colmap_image_subset_index(cfg, image_path_list) image_name_list = get_item_name_list(image_path_list) # Load camera information data_dir = get_data_path(cfg) calib_list = get_fullpath_list(data_dir, 'calibration') calib_dict = load_calib(calib_list, subset_index) # Generate all possible pairs from all images pair_list = [] for ii in range(len(image_path_list)): for jj in range(ii + 1, len(image_path_list)): pair_list.append([ii, jj]) # Check if colmap results exist. Otherwise, this whole bag is a fail. colmap_output_path = get_colmap_output_path(cfg) is_colmap_valid = os.path.exists(os.path.join(colmap_output_path, '0')) if is_colmap_valid: # Find the best colmap reconstruction best_index = get_best_colmap_index(cfg) print('Computing pose errors') #num_cores = int(multiprocessing.cpu_count() * 0.9) num_cores = int(len(os.sched_getaffinity(0)) * 0.9) result = Parallel(n_jobs=num_cores)( delayed(compute_stereo_metrics_from_colmap)(image_path_list[ pair[0]], image_path_list[pair[1]], calib_dict[image_name_list[ pair[0]]], calib_dict[image_name_list[pair[1]]], best_index, cfg) for pair in tqdm(pair_list)) # Collect err_q, err_t from results err_dict = {} for _i in range(len(pair_list)): pair = pair_list[_i] if is_colmap_valid: err_q = result[_i][0] err_t = result[_i][1] else: err_q = np.inf err_t = np.inf err_dict[image_name_list[pair[0]] + '-' + image_name_list[pair[1]]] = [err_q, err_t] # Finally, save packed errors save_h5(err_dict, get_colmap_pose_file(cfg))
def compute_stereo_metrics_from_colmap(img1, img2, calib1, calib2, best_index, cfg): '''Computes (pairwise) error metrics from Colmap results.''' # Load COLMAP dR and dt colmap_output_path = get_colmap_output_path(cfg) # First read images.bin for the best reconstruction images_bin = read_images_binary( os.path.join(colmap_output_path, str(best_index), 'images.bin')) # For each key check if images_bin[key].name = image_name R_1_actual, t_1_actual = None, None R_2_actual, t_2_actual = None, None for key in images_bin.keys(): if images_bin[key].name == os.path.basename(img1): R_1_actual = qvec2rotmat(images_bin[key].qvec) t_1_actual = images_bin[key].tvec if images_bin[key].name == os.path.basename(img2): R_2_actual = qvec2rotmat(images_bin[key].qvec) t_2_actual = images_bin[key].tvec # Compute err_q and err_t only when R, t are not None err_q, err_t = np.inf, np.inf if (R_1_actual is not None) and (R_2_actual is not None) and ( t_1_actual is not None) and (t_2_actual is not None): # Compute dR, dt (actual) dR_act = np.dot(R_2_actual, R_1_actual.T) dt_act = t_2_actual - np.dot(dR_act, t_1_actual) # Get R, t from calibration information R_1, t_1 = calib1['R'], calib1['T'].reshape((3, 1)) R_2, t_2 = calib2['R'], calib2['T'].reshape((3, 1)) # Compute ground truth dR, dt dR = np.dot(R_2, R_1.T) dt = t_2 - np.dot(dR, t_1) # Save err_, err_t err_q, err_t = evaluate_R_t(dR, dt, dR_act, dt_act) return err_q, err_t
def run_colmap_for_bag(cfg): '''Runs colmap to retrieve poses for each bag''' # Colmap pose file already exists, skip the session if os.path.exists(get_colmap_pose_file(cfg)): print(' -- already exists, skipping COLMAP eval') return # Load keypoints and matches keypoints_dict = load_h5(get_kp_file(cfg)) matches_dict = load_h5(get_filter_match_file(cfg)) print('Running COLMAP on "{}", bagsize {} -- bag {}'.format( cfg.scene, cfg.bag_size, cfg.bag_id)) # Additional sanity check to account for crash -- in this case colmap temp # directory can exist. This in an indication that you need to remove # results and rerun colmap. colmap_temp_path = get_colmap_temp_path(cfg) colmap_output_path = get_colmap_output_path(cfg) if os.path.exists(colmap_temp_path): print(' -- temp path exists - cleaning up from crash') rmtree(colmap_temp_path) if os.path.exists(colmap_output_path): rmtree(colmap_output_path) if os.path.exists(get_colmap_pose_file(cfg)): os.remove(get_colmap_pose_file(cfg)) # Check existance of colmap result and terminate if already exists. colmap_output_path = get_colmap_output_path(cfg) if os.path.exists(colmap_output_path): print(' -- already exists, skipping COLMAP session') return # Create output directory os.makedirs(colmap_output_path) # Create colmap temporary directory and copy files over. Remove anything # that might have existed. colmap_temp_path = get_colmap_temp_path(cfg) if os.path.exists(colmap_temp_path): rmtree(colmap_temp_path) # Make sure old data is gone and create a new temp folder assert (not os.path.exists(colmap_temp_path)) os.makedirs(colmap_temp_path) # Create colmap-friendy structures os.makedirs(os.path.join(colmap_temp_path, 'images')) os.makedirs(os.path.join(colmap_temp_path, 'features')) # Get list of all images in this bag image_subset_list = get_colmap_image_path_list(cfg) subset_index = get_colmap_image_subset_index(cfg, image_subset_list) # Copy images for _src in image_subset_list: _dst = os.path.join(colmap_temp_path, 'images', os.path.basename(_src)) copyfile(_src, _dst) # Write features to colmap friendly format for image_path in image_subset_list: # Retrieve image name, with and without extension image_name = os.path.basename(image_path) image_name_no_ext = os.path.splitext(image_name)[0] # Read keypoint keypoints = keypoints_dict[image_name_no_ext] # Keypoint file to write to kp_file = os.path.join(colmap_temp_path, 'features', image_name + '.txt') # Open a file to write with open(kp_file, 'w') as f: # Retieve the number of keypoints len_keypoints = len(keypoints) f.write(str(len_keypoints) + ' ' + str(128) + '\n') for i in range(len_keypoints): kp = ' '.join(str(k) for k in keypoints[i][:4]) desc = ' '.join(str(0) for d in range(128)) f.write(kp + ' ' + desc + '\n') # Write matches to colmap friendly format # Read visibilties data_dir = get_data_path(cfg) vis_list = get_fullpath_list(data_dir, 'visibility') # Load matches and store them to a text file # TODO: This seems to be done multiple times. Do we need to do this? print('Generate list of all possible pairs') pairs = compute_image_pairs(vis_list, len(image_subset_list), cfg.vis_th, subset_index) print('{} pairs generated'.format(len(pairs))) # Write to match file match_file = os.path.join(colmap_temp_path, 'matches.txt') with open(match_file, 'w') as f: for pair in pairs: image_1_name = os.path.basename(image_subset_list[pair[0]]) image_2_name = os.path.basename(image_subset_list[pair[1]]) image_1_name_no_ext = os.path.splitext(image_1_name)[0] image_2_name_no_ext = os.path.splitext(image_2_name)[0] # Load matches key = '-'.join([image_1_name_no_ext, image_2_name_no_ext]) matches = np.squeeze(matches_dict[key]) # only write when matches are given if matches.ndim == 2: f.write(image_1_name + ' ' + image_2_name + '\n') for _i in range(matches.shape[1]): f.write( str(matches[0, _i]) + ' ' + str(matches[1, _i]) + '\n') f.write('\n') f.close() # COLMAP runs -- wrapped in try except to throw errors if subprocess fails # and then clean up the colmap temp directory try: print('COLMAP Feature Import') cmd = ['colmap', 'feature_importer'] cmd += [ '--database_path', os.path.join(colmap_output_path, 'databases.db') ] cmd += ['--image_path', os.path.join(colmap_temp_path, 'images')] cmd += ['--import_path', os.path.join(colmap_temp_path, 'features')] colmap_res = subprocess.run(cmd) if colmap_res.returncode != 0: raise RuntimeError(' -- COLMAP failed to import features!') print('COLMAP Match Import') cmd = ['colmap', 'matches_importer'] cmd += [ '--database_path', os.path.join(colmap_output_path, 'databases.db') ] cmd += [ '--match_list_path', os.path.join(colmap_temp_path, 'matches.txt') ] cmd += ['--match_type', 'raw'] cmd += ['--SiftMatching.use_gpu', '0'] colmap_res = subprocess.run(cmd) if colmap_res.returncode != 0: raise RuntimeError(' -- COLMAP failed to import matches!') print('COLMAP Mapper') cmd = ['colmap', 'mapper'] cmd += ['--image_path', os.path.join(colmap_temp_path, 'images')] cmd += [ '--database_path', os.path.join(colmap_output_path, 'databases.db') ] cmd += ['--output_path', colmap_output_path] cmd += ['--Mapper.min_model_size', str(cfg.colmap_min_model_size)] colmap_res = subprocess.run(cmd) if colmap_res.returncode != 0: raise RuntimeError(' -- COLMAP failed to run mapper!') # Delete temp directory after working rmtree(colmap_temp_path) except Exception as err: # Remove colmap output path and temp path rmtree(colmap_temp_path) rmtree(colmap_output_path) # Re-throw error print(err) raise RuntimeError('Parts of colmap runs returns failed state!') print('Checking validity of the colmap run just in case') # Check validity of colmap reconstruction for all of them is_any_colmap_valid = False idx_list = [ os.path.join(colmap_output_path, _d) for _d in os.listdir(colmap_output_path) if os.path.isdir(os.path.join(colmap_output_path, _d)) ] for idx in idx_list: colmap_img_file = os.path.join(idx, 'images.bin') if is_colmap_img_valid(colmap_img_file): is_any_colmap_valid = True break if not is_any_colmap_valid: print('Error in reading colmap output -- ' 'removing colmap output directory') rmtree(colmap_output_path)
def main(cfg): '''Visualization of colmap points. Parameters ---------- cfg: Namespace Configurations for running this part of the code. ''' bag_size_json = load_json( getattr(cfg, 'splits_{}_{}'.format(cfg.dataset, cfg.subset))) bag_size_list = [b['bag_size'] for b in bag_size_json] bag_size_num = [b['num_in_bag'] for b in bag_size_json] # # Do not re-run if files already exist -- off for now # skip = True # for _bag_size in bag_size_list: # cfg_bag = deepcopy(cfg) # cfg_bag.bag_size = _bag_size # viz_folder_hq, viz_folder_lq = get_colmap_viz_folder(cfg_bag) # for _bag_id in range( # getattr(cfg_bag, # 'num_viz_colmap_subsets_bagsize{}'.format(_bag_size))): # if any([ # not os.path.exists( # os.path.join( # viz_folder_lq, # 'colmap-bagsize{:d}-bag{:02d}-image{:02d}.jpg'. # format(_bag_size, _bag_id, i))) # for i in range(_bag_size) # ]): # skip = False # break # if not os.path.exists( # os.path.join( # viz_folder_lq, # 'colmap-bagsize{:d}-bag{:02d}.pcd'.format( # _bag_size, _bag_id))): # skip = False # break # if skip: # print(' -- already exists, skipping colmap visualization') # return print(' -- Visualizations, multiview: "{}/{}"'.format( cfg.dataset, cfg.scene)) t_start = time() # Create results folder if it does not exist for _bag_size in bag_size_list: cfg_bag = deepcopy(cfg) cfg_bag.bag_size = _bag_size viz_folder_hq, viz_folder_lq = get_colmap_viz_folder(cfg_bag) if not os.path.exists(viz_folder_hq): os.makedirs(viz_folder_hq) if not os.path.exists(viz_folder_lq): os.makedirs(viz_folder_lq) # Load keypoints keypoints_dict = load_h5(get_kp_file(cfg)) # Loop over bag sizes for _bag_size in bag_size_list: cfg_bag = deepcopy(cfg) cfg_bag.bag_size = _bag_size num_bags = getattr( cfg_bag, 'num_viz_colmap_subsets_bagsize{}'.format(_bag_size)) for _bag_id in range(num_bags): print( ' -- Visualizations, multiview: "{}/{}", bag_size={}, bag {}/{}' .format(cfg.dataset, cfg.scene, _bag_size, _bag_id + 1, num_bags)) # Retrieve list of images cfg_bag.bag_id = _bag_id images_in_bag = get_colmap_image_path_list(cfg_bag) # Retrieve reconstruction colmap_output_path = get_colmap_output_path(cfg_bag) # is_colmap_valid = os.path.exists( # os.path.join(colmap_output_path, '0')) best_index = get_best_colmap_index(cfg_bag) if best_index != -1: colmap_images = read_images_binary( os.path.join(colmap_output_path, str(best_index), 'images.bin')) for i, image_path in enumerate(images_in_bag): # Limit to 10 or so, even for bag size 25 if i >= cfg.max_num_images_viz_multiview: break # Load image and keypoints im, _ = load_image(image_path, use_color_image=True, crop_center=False, force_rgb=True) used = None key = os.path.splitext(os.path.basename(image_path))[0] if best_index != -1: for j in colmap_images: if key in colmap_images[j].name: # plot all keypoints used = colmap_images[j].point3D_ids != -1 break if used is None: used = [False] * keypoints_dict[key].shape[0] used = np.array(used) fig = plt.figure(figsize=(20, 20)) plt.imshow(im) plt.plot(keypoints_dict[key][~used, 0], keypoints_dict[key][~used, 1], 'r.', markersize=12) plt.plot(keypoints_dict[key][used, 0], keypoints_dict[key][used, 1], 'b.', markersize=12) plt.tight_layout() plt.axis('off') # TODO Ideally we would save to pdf # but it does not work on 16.04, so we do png instead # https://bugs.launchpad.net/ubuntu/+source/imagemagick/+bug/1796563 viz_folder_hq, viz_folder_lq = get_colmap_viz_folder(cfg_bag) viz_file_hq = os.path.join( viz_folder_hq, 'bagsize{:d}-bag{:02d}-image{:02d}.png'.format( _bag_size, _bag_id, i)) viz_file_lq = os.path.join( viz_folder_lq, 'bagsize{:d}-bag{:02d}-image{:02d}.jpg'.format( _bag_size, _bag_id, i)) plt.savefig(viz_file_hq, bbox_inches='tight') # Convert with imagemagick os.system('convert -quality 75 -resize \"400>\" {} {}'.format( viz_file_hq, viz_file_lq)) plt.close() if best_index != -1: colmap_points = read_points3d_binary( os.path.join(colmap_output_path, str(best_index), 'points3D.bin')) points3d = [] for k in colmap_points: points3d.append([ colmap_points[k].xyz[0], colmap_points[k].xyz[1], colmap_points[k].xyz[2] ]) points3d = np.array(points3d) points3d -= np.median(points3d, axis=0)[None, ...] points3d /= np.abs(points3d).max() + 1e-6 pcd = os.path.join( get_colmap_viz_folder(cfg_bag)[0], 'colmap-bagsize{:d}-bag{:02d}.pcd'.format( _bag_size, _bag_id)) with open(pcd, 'w') as f: f.write('# .PCD v.7 - Point Cloud Data file format\n') f.write('VERSION .7\n') f.write('FIELDS x y z\n') f.write('SIZE 4 4 4\n') f.write('TYPE F F F\n') f.write('COUNT 1 1 1\n') f.write('WIDTH {}\n'.format(len(colmap_points))) f.write('HEIGHT 1\n') f.write('VIEWPOINT 0 0 0 1 0 0 0\n') f.write('POINTS {}\n'.format(len(colmap_points))) f.write('DATA ascii\n') for p in points3d: f.write('{:.05f} {:.05f} {:.05f}\n'.format( p[0], p[1], p[2])) copyfile( os.path.join( get_colmap_viz_folder(cfg_bag)[0], 'colmap-bagsize{:d}-bag{:02d}.pcd'.format( _bag_size, _bag_id)), os.path.join( get_colmap_viz_folder(cfg_bag)[1], 'colmap-bagsize{:d}-bag{:02d}.pcd'.format( _bag_size, _bag_id))) print('done [{:.02f} s.]'.format(time() - t_start))