def run_dataset(data: DataSet, list_path, bundle_path, undistorted): """Export reconstruction to bundler format. Args: list_path: txt list of images to export bundle_path : output path undistorted : export undistorted reconstruction """ udata = data.undistorted_dataset() default_path = os.path.join(data.data_path, "bundler") list_file_path = list_path if list_path else default_path bundle_file_path = bundle_path if bundle_path else default_path if undistorted: reconstructions = udata.load_undistorted_reconstruction() track_manager = udata.load_undistorted_tracks_manager() images = reconstructions[0].shots.keys() else: reconstructions = data.load_reconstruction() track_manager = data.load_tracks_manager() images = data.images() io.export_bundler(images, reconstructions, track_manager, bundle_file_path, list_file_path)
def run_dataset(data: DataSet, reconstruction, reconstruction_index, tracks, output): """Export reconstruction to NVM_V3 format from VisualSfM Args: reconstruction: reconstruction to undistort reconstruction_index: index of the reconstruction component to undistort tracks: tracks graph of the reconstruction output: undistorted """ undistorted_data_path = os.path.join(data.data_path, output) udata = dataset.UndistortedDataSet(data, undistorted_data_path, io_handler=data.io_handler) reconstructions = data.load_reconstruction(reconstruction) if data.tracks_exists(tracks): tracks_manager = data.load_tracks_manager(tracks) else: tracks_manager = None if reconstructions: r = reconstructions[reconstruction_index] undistort.undistort_reconstruction_and_images(tracks_manager, r, data, udata)
def run_dataset(data: DataSet, diagram_max_points=-1): """Compute various staistics of a datasets and write them to 'stats' folder Args: data: dataset object """ reconstructions = data.load_reconstruction() tracks_manager = data.load_tracks_manager() output_path = os.path.join(data.data_path, "stats") data.io_handler.mkdir_p(output_path) stats_dict = stats.compute_all_statistics(data, tracks_manager, reconstructions) stats.save_residual_grids( data, tracks_manager, reconstructions, output_path, data.io_handler ) stats.save_matchgraph( data, tracks_manager, reconstructions, output_path, data.io_handler ) stats.save_residual_histogram(stats_dict, output_path, data.io_handler) if diagram_max_points > 0: stats.decimate_points(reconstructions, diagram_max_points) stats.save_heatmap( data, tracks_manager, reconstructions, output_path, data.io_handler ) stats.save_topview( data, tracks_manager, reconstructions, output_path, data.io_handler ) with data.io_handler.open_wt(os.path.join(output_path, "stats.json")) as fout: io.json_dump(stats_dict, fout)
def run_dataset( data: DataSet, reconstruction: Optional[str] = None, reconstruction_index: int = 0, tracks: Optional[str] = None, output: str = "undistorted", skip_images: bool = False, ) -> None: """Export reconstruction to NVM_V3 format from VisualSfM Args: reconstruction: reconstruction to undistort reconstruction_index: index of the reconstruction component to undistort tracks: tracks graph of the reconstruction output: undistorted skip_images: do not undistort images """ undistorted_data_path = os.path.join(data.data_path, output) udata = dataset.UndistortedDataSet(data, undistorted_data_path, io_handler=data.io_handler) reconstructions = data.load_reconstruction(reconstruction) if data.tracks_exists(tracks): tracks_manager = data.load_tracks_manager(tracks) else: tracks_manager = None if reconstructions: r = reconstructions[reconstruction_index] undistort.undistort_reconstruction_with_images(tracks_manager, r, data, udata, skip_images)
def SfM_reconstruct(src): # incremental_reconstruction from opensfm.dataset import DataSet from opensfm.reconstruction import (tracking, compute_image_pairs, bootstrap_reconstruction, grow_reconstruction) data = DataSet(src); result = [] gcp = data.load_ground_control_points() tracks = data.load_tracks_manager() imgs = tracks.get_shot_ids() if not data.reference_lla_exists(): data.invent_reference_lla(imgs) camera_priors = data.load_camera_models() common_tracks = tracking.all_common_tracks(tracks) pairs = compute_image_pairs(common_tracks, camera_priors, data) imgs = set(imgs); report = {'candidate_image_pairs': len(pairs)} for im1, im2 in pairs: if im1 in imgs and im2 in imgs: report[im1+' & '+im2] = log = {} v, p1, p2 = common_tracks[im1, im2] rec, log['bootstrap'] = bootstrap_reconstruction( data, tracks, camera_priors, im1, im2, p1, p2) if rec: imgs.remove(im1); imgs.remove(im2) rec, log['grow'] = grow_reconstruction( data, tracks, rec, imgs, camera_priors, gcp) result.append(rec) result = sorted(result, key=lambda x: -len(x.shots)) data.save_reconstruction(result) report['not_reconstructed_images'] = list(imgs) with open(f'{src}/reports/reconstruction.json','w') as f: json.dump(report, f, indent=4)
def run_dataset(data: DataSet, points, image_list, output, undistorted): """Export reconstruction to PLY format Args: points: export points image_list: export only the shots included in this file (path to .txt file) output: output pmvs directory undistorted: export the undistorted reconstruction """ udata = data.undistorted_dataset() base_output_path = output if output else os.path.join( data.data_path, "pmvs") io.mkdir_p(base_output_path) logger.info("Converting dataset [%s] to PMVS dir [%s]" % (data.data_path, base_output_path)) if undistorted: reconstructions = udata.load_undistorted_reconstruction() else: reconstructions = data.load_reconstruction() # load tracks for vis.dat try: if undistorted: tracks_manager = udata.load_undistorted_tracks_manager() else: tracks_manager = data.load_tracks_manager() image_graph = tracking.as_weighted_graph(tracks_manager) except IOError: image_graph = None export_only = None if image_list: export_only = {} with open(image_list, "r") as f: for image in f: export_only[image.strip()] = True for h, reconstruction in enumerate(reconstructions): export( reconstruction, h, image_graph, # pyre-fixme[61]: `tracks_manager` may not be initialized here. tracks_manager, base_output_path, data, undistorted, udata, points, export_only, )
def check_merge_partial_reconstructions(self): if self.reconstructed(): data = DataSet(self.opensfm_project_path) reconstructions = data.load_reconstruction() tracks_manager = data.load_tracks_manager() if len(reconstructions) > 1: log.ODM_WARNING( "Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap" % len(reconstructions)) log.ODM_INFO("Attempting merge") merged = Reconstruction() merged.set_reference(reconstructions[0].reference) for ix_r, rec in enumerate(reconstructions): if merged.reference != rec.reference: # Should never happen continue log.ODM_INFO("Merging reconstruction %s" % ix_r) for camera in rec.cameras.values(): merged.add_camera(camera) for point in rec.points.values(): try: new_point = merged.create_point( point.id, point.coordinates) new_point.color = point.color except RuntimeError as e: log.ODM_WARNING("Cannot merge shot id %s (%s)" % (shot.id, str(e))) continue for shot in rec.shots.values(): merged.add_shot(shot) try: obsdict = tracks_manager.get_shot_observations( shot.id) except RuntimeError: log.ODM_WARNING( "Shot id %s missing from tracks_manager!" % shot.id) continue for track_id, obs in obsdict.items(): if track_id in merged.points: merged.add_observation(shot.id, track_id, obs) data.save_reconstruction([merged])
def run_dataset(data: DataSet, no_cameras, no_points, depthmaps, point_num_views): """Export reconstruction to PLY format Args: no_cameras: do not save camera positions no_points: do not save points depthmaps: export per-image depthmaps as pointclouds point_num_views: Export the number of views associated with each point """ reconstructions = data.load_reconstruction() tracks_manager = data.load_tracks_manager() no_cameras = no_cameras no_points = no_points point_num_views = point_num_views if reconstructions: data.save_ply(reconstructions[0], tracks_manager, None, no_cameras, no_points, point_num_views) if depthmaps: udata = dataset.UndistortedDataSet(data) urec = udata.load_undistorted_reconstruction()[0] for shot in urec.shots.values(): rgb = udata.load_undistorted_image(shot.id) for t in ("clean", "raw"): path_depth = udata.depthmap_file(shot.id, t + ".npz") if not os.path.exists(path_depth): continue depth = np.load(path_depth)["depth"] rgb = scale_down_image(rgb, depth.shape[1], depth.shape[0]) ply = depthmap_to_ply(shot, depth, rgb) with io.open_wt(udata.depthmap_file(shot.id, t + ".ply")) as fout: fout.write(ply)