def all_common_tracks( tracks_manager: pymap.TracksManager, include_features: bool = True, min_common: int = 50, ) -> t.Dict[t.Tuple[str, str], t.Union[TPairTracks, t.List[str]]]: """List of tracks observed by each image pair. Args: tracks_manager: tracks manager include_features: whether to include the features from the images min_common: the minimum number of tracks the two images need to have in common Returns: tuple: im1, im2 -> tuple: tracks, features from first image, features from second image """ common_tracks = {} for (im1, im2), size in tracks_manager.get_all_pairs_connectivity().items(): if size < min_common: continue tuples = tracks_manager.get_all_common_observations(im1, im2) if include_features: common_tracks[im1, im2] = ( [v for v, _, _ in tuples], np.array([p.point for _, p, _ in tuples]), np.array([p.point for _, _, p in tuples]), ) else: common_tracks[im1, im2] = [v for v, _, _ in tuples] return common_tracks
def as_weighted_graph(tracks_manager: pymap.TracksManager) -> nx.Graph: """Return the tracks manager as a weighted graph having shots a snodes and weighted by the # of common tracks between two nodes. """ images = tracks_manager.get_shot_ids() image_graph = nx.Graph() for im in images: image_graph.add_node(im) for k, v in tracks_manager.get_all_pairs_connectivity().items(): image_graph.add_edge(k[0], k[1], weight=v) return image_graph
def save_matchgraph( data: DataSetBase, tracks_manager: pymap.TracksManager, reconstructions: List[types.Reconstruction], output_path: str, io_handler: io.IoFilesystemBase, ) -> None: all_shots = [] all_points = [] shot_component = {} for i, rec in enumerate(reconstructions): all_points += rec.points all_shots += rec.shots for shot in rec.shots: shot_component[shot] = i connectivity = tracks_manager.get_all_pairs_connectivity( all_shots, all_points) all_values = connectivity.values() lowest = np.percentile(list(all_values), 5) highest = np.percentile(list(all_values), 95) plt.clf() cmap = cm.get_cmap("viridis") for (node1, node2), edge in sorted(connectivity.items(), key=lambda x: x[1]): if edge < 2 * data.config["resection_min_inliers"]: continue comp1 = shot_component[node1] comp2 = shot_component[node2] if comp1 != comp2: continue o1 = reconstructions[comp1].shots[node1].pose.get_origin() o2 = reconstructions[comp2].shots[node2].pose.get_origin() c = max(0, min(1.0, 1 - (edge - lowest) / (highest - lowest))) plt.plot([o1[0], o2[0]], [o1[1], o2[1]], linestyle="-", color=cmap(c)) for i, rec in enumerate(reconstructions): for shot in rec.shots.values(): o = shot.pose.get_origin() c = i / len(reconstructions) plt.plot(o[0], o[1], linestyle="", marker="o", color=cmap(c)) plt.xticks([]) plt.yticks([]) ax = plt.gca() for b in ["top", "bottom", "left", "right"]: ax.spines[b].set_visible(False) norm = colors.Normalize(vmin=lowest, vmax=highest) sm = cm.ScalarMappable(norm=norm, cmap=cmap.reversed()) sm.set_array([]) plt.colorbar( sm, orientation="horizontal", label="Number of matches between images", pad=0.0, ) with io_handler.open(os.path.join(output_path, "matchgraph.png"), "wb") as fwb: plt.savefig( fwb, dpi=300, bbox_inches="tight", )