def incremental_reconstruction(data, graph): """Run the entire incremental reconstruction pipeline.""" logger.info("Starting incremental reconstruction") report = {} chrono = Chronometer() tracks, images = tracking.tracks_and_images(graph) chrono.lap('load_tracks_graph') if not data.reference_lla_exists(): data.invent_reference_lla(images) remaining_images = set(images) gcp = data.load_ground_control_points() common_tracks = tracking.all_common_tracks(graph, tracks) reconstructions = [] pairs = compute_image_pairs(common_tracks, data) chrono.lap('compute_image_pairs') report['num_candidate_image_pairs'] = len(pairs) report['reconstructions'] = [] for im1, im2 in pairs: if im1 in remaining_images and im2 in remaining_images: rec_report = {} report['reconstructions'].append(rec_report) tracks, p1, p2 = common_tracks[im1, im2] reconstruction, graph_inliers, rec_report[ 'bootstrap'] = bootstrap_reconstruction( data, graph, im1, im2, p1, p2) if reconstruction: remaining_images.remove(im1) remaining_images.remove(im2) reconstruction, rec_report['grow'] = grow_reconstruction( data, graph, graph_inliers, reconstruction, remaining_images, gcp) reconstructions.append(reconstruction) reconstructions = sorted(reconstructions, key=lambda x: -len(x.shots)) rec_report['stats'] = compute_statistics( reconstruction, graph_inliers) logger.info(rec_report['stats']) for k, r in enumerate(reconstructions): logger.info("Reconstruction {}: {} images, {} points".format( k, len(r.shots), len(r.points))) logger.info("{} partial reconstructions in total.".format( len(reconstructions))) chrono.lap('compute_reconstructions') report['wall_times'] = dict(chrono.lap_times()) report['not_reconstructed_images'] = list(remaining_images) return report, reconstructions
def common_tracks_double_dict(graph): """List of track ids observed by each image pair. Return a dict, ``res``, such that ``res[im1][im2]`` is the list of common tracks between ``im1`` and ``im2``. """ tracks, images = tracking.tracks_and_images(graph) common_tracks_per_pair = tracking.all_common_tracks( graph, tracks, include_features=False) res = {image: {} for image in images} for (im1, im2), v in iteritems(common_tracks_per_pair): res[im1][im2] = v res[im2][im1] = v return res
def incremental_reconstruction(data, graph): """Run the entire incremental reconstruction pipeline.""" logger.info("Starting incremental reconstruction") report = {} chrono = Chronometer() tracks, images = tracking.tracks_and_images(graph) chrono.lap('load_tracks_graph') if not data.reference_lla_exists(): data.invent_reference_lla(images) remaining_images = set(images) gcp = None if data.ground_control_points_exist(): gcp = data.load_ground_control_points() common_tracks = tracking.all_common_tracks(graph, tracks) reconstructions = [] pairs = compute_image_pairs(common_tracks, data) chrono.lap('compute_image_pairs') report['num_candidate_image_pairs'] = len(pairs) report['reconstructions'] = [] for im1, im2 in pairs: if im1 in remaining_images and im2 in remaining_images: rec_report = {} report['reconstructions'].append(rec_report) tracks, p1, p2 = common_tracks[im1, im2] reconstruction, rec_report['bootstrap'] = bootstrap_reconstruction( data, graph, im1, im2, p1, p2) if reconstruction: remaining_images.remove(im1) remaining_images.remove(im2) reconstruction, rec_report['grow'] = grow_reconstruction( data, graph, reconstruction, remaining_images, gcp) reconstructions.append(reconstruction) reconstructions = sorted(reconstructions, key=lambda x: -len(x.shots)) for k, r in enumerate(reconstructions): logger.info("Reconstruction {}: {} images, {} points".format( k, len(r.shots), len(r.points))) logger.info("{} partial reconstructions in total.".format( len(reconstructions))) chrono.lap('compute_reconstructions') report['wall_times'] = dict(chrono.lap_times()) report['not_reconstructed_images'] = list(remaining_images) return report, reconstructions
def compute_depthmaps(data, graph, reconstruction): """Compute and refine depthmaps for all shots.""" logger.info('Computing neighbors') processes = data.config.get('processes', 1) num_neighbors = data.config['depthmap_num_neighbors'] tracks, _ = tracking.tracks_and_images(graph) common_tracks = tracking.all_common_tracks(graph, tracks, include_features=False) neighbors = {} for shot in reconstruction.shots.values(): neighbors[shot.id] = find_neighboring_images(shot, common_tracks, reconstruction, num_neighbors) arguments = [] for shot in reconstruction.shots.values(): if len(neighbors[shot.id]) <= 1: continue min_depth, max_depth = compute_depth_range(graph, reconstruction, shot) arguments.append( (data, neighbors[shot.id], min_depth, max_depth, shot)) parallel_map(compute_depthmap_catched, arguments, processes) arguments = [] for shot in reconstruction.shots.values(): if len(neighbors[shot.id]) <= 1: continue arguments.append((data, neighbors[shot.id], shot)) parallel_map(clean_depthmap_catched, arguments, processes) arguments = [] for shot in reconstruction.shots.values(): if len(neighbors[shot.id]) <= 1: continue arguments.append((data, neighbors[shot.id], shot)) parallel_map(prune_depthmap_catched, arguments, processes) merge_depthmaps(data, reconstruction)
def write_report(self, data, graph, features_time, matches_time, tracks_time): tracks, images = tracking.tracks_and_images(graph) image_graph = bipartite.weighted_projected_graph(graph, images) view_graph = [] for im1 in data.images(): for im2 in data.images(): if im1 in image_graph and im2 in image_graph[im1]: weight = image_graph[im1][im2]['weight'] view_graph.append((im1, im2, weight)) report = { "wall_times": { "load_features": features_time, "load_matches": matches_time, "compute_tracks": tracks_time, }, "wall_time": features_time + matches_time + tracks_time, "num_images": len(images), "num_tracks": len(tracks), "view_graph": view_graph } data.save_report(io.json_dumps(report), 'tracks.json')