예제 #1
0
def compute_depthmaps(data, graph, reconstruction):
    """Compute and refine depthmaps for all shots."""
    logger.info('Computing neighbors')
    processes = data.config.get('processes', 1)
    num_neighbors = data.config['depthmap_num_neighbors']
    tracks, _ = matching.tracks_and_images(graph)
    common_tracks = matching.all_common_tracks(graph,
                                               tracks,
                                               include_features=False)

    neighbors = {}
    for shot in reconstruction.shots.values():
        neighbors[shot.id] = find_neighboring_images(shot, common_tracks,
                                                     reconstruction,
                                                     num_neighbors)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        min_depth, max_depth = compute_depth_range(graph, reconstruction, shot)
        arguments.append(
            (data, reconstruction, neighbors, min_depth, max_depth, shot))
    parallel_run(compute_depthmap, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, reconstruction, neighbors, shot))
    parallel_run(clean_depthmap, arguments, processes)

    merge_depthmaps(data, graph, reconstruction, neighbors)
예제 #2
0
def incremental_reconstruction(data):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    data.invent_reference_lla()
    graph = data.load_tracks_graph()
    tracks, images = matching.tracks_and_images(graph)
    remaining_images = set(images)
    gcp = None
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()
    common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, data.config)
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            tracks, p1, p2 = common_tracks[im1, im2]
            reconstruction = bootstrap_reconstruction(data, graph, im1, im2,
                                                      p1, p2)
            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                reconstruction = grow_reconstruction(data, graph,
                                                     reconstruction,
                                                     remaining_images, gcp)
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
예제 #3
0
    def write_report(self, data, graph,
                     features_time, matches_time, tracks_time):
        tracks, images = matching.tracks_and_images(graph)
        image_graph = bipartite.weighted_projected_graph(graph, images)
        matrix = []
        for im1 in data.images():
            row = []
            for im2 in data.images():
                if im1 in image_graph and im2 in image_graph[im1]:
                    row.append(image_graph[im1][im2]['weight'])
                else:
                    row.append(0)
            matrix.append(row)

        report = {
            "wall_times": {
                "load_features": features_time,
                "load_matches": matches_time,
                "compute_tracks": tracks_time,
            },
            "wall_time": features_time + matches_time + tracks_time,
            "num_images": len(images),
            "num_tracks": len(tracks),
            "viewing_graph": matrix
        }
        data.save_report(io.json_dumps(report), 'tracks.json')
예제 #4
0
def retriangulate(graph, reconstruction, config):
    """Retrianguate all points"""
    threshold = config.get('triangulation_threshold', 0.004)
    min_ray_angle = config.get('triangulation_min_ray_angle', 2.0)
    triangulator = TrackTriangulator(graph, reconstruction)
    tracks, images = matching.tracks_and_images(graph)
    for track in tracks:
        triangulator.triangulate(track, threshold, min_ray_angle)
예제 #5
0
def incremental_reconstruction(data,
                               graph=None,
                               common_tracks=None,
                               my_init=False):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    if (not data.config['use_dummy_camera']):
        data.invent_reference_lla()
    if (graph == None):
        graph = data.load_tracks_graph()
    tracks, images = matching.tracks_and_images(graph)
    remaining_images = set(images)
    gcp = None
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()
    if (common_tracks == None):
        common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []

    data.config['five_point_algo_threshold'] = 0.5

    if (not my_init):
        pairs = compute_image_pairs(common_tracks, data.config)
    else:
        pairs = compute_image_pairs_new(common_tracks, data.config, step=5)

    for im1, im2 in pairs:

        if im1 in remaining_images and im2 in remaining_images:

            tracks, p1, p2 = common_tracks[im1, im2]

            reconstruction = bootstrap_reconstruction(data, graph, im1, im2,
                                                      p1, p2, my_init)
            """ Add a visualization to check the result """
            # my_canvas = vispy_util_persp.MyCanvas(reconstruction, data.image_files, has_color=0)

            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                # reconstruction = grow_reconstruction(
                #     data, graph, reconstruction, remaining_images, gcp, my_canvas)
                reconstruction = grow_reconstruction(data, graph,
                                                     reconstruction,
                                                     remaining_images, gcp)

                # my_canvas.update_data(reconstruction)

                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
예제 #6
0
def incremental_reconstruction(data):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")

    # load the exif information from the images and convert to internal format
    data.invent_reference_lla()

    # return an nx graph, with two kind of nodes, images, and tracks. features are keypoint locations
    graph = data.load_tracks_graph()

    # all tracks and images stored in two lists
    tracks, images = matching.tracks_and_images(graph)
    remaining_images = set(images)
    gcp = None

    # otherwise explictly written a ground control point, no such file exists.
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()

    # returns a [im1, im2] -> (tracks, im1_features, im2_features)
    common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []

    # return a list of image pairs that sorted by decreasing favorability
    pairs = compute_image_pairs(common_tracks, data.config)
    if len(pairs)==0:
        print("no image pairs available, use all combinations instead")
        pairs = combinations(sorted(remaining_images), 2)
    for im1, im2 in pairs:
        # each time choose two images that both are not in the collection
        # after adding them into the reconstruction, removing them from the set
        # if this if is entered multiple times, then it indicates that multiple
        # reconstructions are found, which is not good.
        if im1 in remaining_images and im2 in remaining_images:
            tracks, p1, p2 = common_tracks[im1, im2]
            # TODO: we have to carefully select which image pairs to use
            # This is only called once
            reconstruction = bootstrap_reconstruction(data, graph, im1, im2, p1, p2)
            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                # The main growing process, it doesn't only add in one image, it add in all.
                reconstruction = grow_reconstruction(
                    data, graph, reconstruction, remaining_images, gcp)
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)
            else:
                print("reconstruction for image %s and %s failed" % (im1, im2))

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
예제 #7
0
def calculate_reconstruction_results(data, graph, reconstruction, options,
                                     command_keys):
    registered = 0
    error = 0.0
    count = 0
    missing_errors = 0
    total_time = 0.0
    cameras = 0
    times = {}

    tracks, images = matching.tracks_and_images(graph)
    cameras = len(data.images())
    for s in reconstruction.shots:
        if s not in graph:
            continue
        pts_triangulated = set(reconstruction.points.keys()).intersection(
            set(graph[s].keys()))
        if len(pts_triangulated) >= options['min_triangulated']:
            registered += 1

    for pid in reconstruction.points:
        if reconstruction.points[pid].reprojection_error:
            error = error + reconstruction.points[pid].reprojection_error
            count = count + 1
        else:
            missing_errors = missing_errors + 1

    profile_fn = os.path.join(data.data_path, 'profile.log')
    if os.path.exists(profile_fn):
        with open(profile_fn, 'r') as f:
            for line in f.readlines():
                datums = line.split(':')
                if datums[0] in command_keys:
                    times[datums[0]] = float(datums[1])

        for key in times:
            total_time = total_time + times[key]

    if count == 0:
        count = 0.0001

    results = {
        'dataset': os.path.basename(os.path.normpath(data.data_path)),
        'registered images': registered,
        'total images in dataset': cameras,
        'points triangulated ': len(reconstruction.points.keys()),
        'average reprojection error': 1.0 * error / count,
        'points with reprojection error': count,
        'missing reprojection error': missing_errors,
        'time': round(total_time, 2)
    }
    return results
예제 #8
0
def scene_sampling_phase(data, tracks_graph):
    alpha = 0.1
    C = {}  # images in iconic set
    tracks, remaining_images = matching.tracks_and_images(tracks_graph)

    while True:
        R_i = {}
        delta = {}
        T_A_ = {}
        T_A = iconic_set_points(data, tracks_graph, C)
        D = confusing_points(data, tracks_graph, C)
        U = T_A - D
        completeness = len(T_A)
        distinctiveness = len(D)
        R = completeness - alpha * distinctiveness
        # print '='*100
        # print 'R: {} T_A: {} D: {}'.format(R, completeness, distinctiveness)
        # print 'C: {}'.format(C.keys())
        for i in remaining_images:
            C_i = C.copy()
            C_i[i] = True
            T_A_[i] = iconic_set_points(data, tracks_graph, C_i)
            D_i = confusing_points(data, tracks_graph, C_i)
            U_i = T_A_[i] - D_i
            completeness_i = len(T_A_[i])
            distinctiveness_i = len(D_i)
            R_i[i] = completeness_i - alpha * distinctiveness_i
            delta[i] = R_i[i] - R

        best_image = max(delta.iteritems(), key=operator.itemgetter(1))[0]

        # print '!'*100
        # print json.dumps(delta, sort_keys=True, indent=4, separators=(',', ': '))
        # print best_image
        # print '!'*100

        # if 1.0*len(T_A_[best_image])/len(tracks) < 0.6:
        if delta[best_image] > 0:
            C[best_image] = True
            remaining_images.remove(best_image)
            if len(remaining_images) == 0:
                break
        else:
            break

    # print '='*100
    # print json.dumps(C, sort_keys=True, indent=4, separators=(',', ': '))
    # print '#'*100
    # print json.dumps(remaining_images, sort_keys=True, indent=4, separators=(',', ': '))
    return sorted(C.keys()), sorted(remaining_images)
예제 #9
0
def retriangulate(graph, reconstruction, config):
    """Retrianguate all points"""
    chrono = Chronometer()
    report = {}
    report['num_points_before'] = len(reconstruction.points)
    threshold = config['triangulation_threshold']
    min_ray_angle = config['triangulation_min_ray_angle']
    triangulator = TrackTriangulator(graph, reconstruction)
    tracks, images = matching.tracks_and_images(graph)
    for track in tracks:
        triangulator.triangulate(track, threshold, min_ray_angle)
    report['num_points_after'] = len(reconstruction.points)
    chrono.lap('retriangulate')
    report['wall_time'] = chrono.total_time()
    return report
예제 #10
0
def retriangulate(graph, reconstruction, config):
    """Retrianguate all points"""
    chrono = Chronometer()
    report = {}
    report['num_points_before'] = len(reconstruction.points)
    threshold = config['triangulation_threshold']
    min_ray_angle = config['triangulation_min_ray_angle']
    triangulator = TrackTriangulator(graph, reconstruction)
    tracks, images = matching.tracks_and_images(graph)
    for track in tracks:
        triangulator.triangulate(track, threshold, min_ray_angle)
    report['num_points_after'] = len(reconstruction.points)
    chrono.lap('retriangulate')
    report['wall_time'] = chrono.total_time()
    return report
예제 #11
0
def incremental_reconstruction(data):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    report = {}
    chrono = Chronometer()
    if not data.reference_lla_exists():
        data.invent_reference_lla()

    graph = data.load_tracks_graph()
    tracks, images = matching.tracks_and_images(graph)
    chrono.lap('load_tracks_graph')
    remaining_images = set(images)
    gcp = None
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()
    common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, data)
    chrono.lap('compute_image_pairs')
    report['num_candidate_image_pairs'] = len(pairs)
    report['reconstructions'] = []
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            rec_report = {}
            report['reconstructions'].append(rec_report)
            tracks, p1, p2 = common_tracks[im1, im2]
            reconstruction, rec_report['bootstrap'] = bootstrap_reconstruction(
                data, graph, im1, im2, p1, p2)

            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                reconstruction, rec_report['grow'] = grow_reconstruction(
                    data, graph, reconstruction, remaining_images, gcp)
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
    chrono.lap('compute_reconstructions')
    report['wall_times'] = dict(chrono.lap_times())
    report['not_reconstructed_images'] = list(remaining_images)
    return report
예제 #12
0
def incremental_reconstruction(data):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    report = {}
    chrono = Chronometer()
    if not data.reference_lla_exists():
        data.invent_reference_lla()

    graph = data.load_tracks_graph()
    tracks, images = matching.tracks_and_images(graph)
    chrono.lap('load_tracks_graph')
    remaining_images = set(images)
    gcp = None
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()
    common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, data)
    chrono.lap('compute_image_pairs')
    report['num_candidate_image_pairs'] = len(pairs)
    report['reconstructions'] = []
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            rec_report = {}
            report['reconstructions'].append(rec_report)
            tracks, p1, p2 = common_tracks[im1, im2]
            reconstruction, rec_report['bootstrap'] = bootstrap_reconstruction(
                data, graph, im1, im2, p1, p2)

            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                reconstruction, rec_report['grow'] = grow_reconstruction(
                    data, graph, reconstruction, remaining_images, gcp)
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
    chrono.lap('compute_reconstructions')
    report['wall_times'] = dict(chrono.lap_times())
    report['not_reconstructed_images'] = list(remaining_images)
    return report
예제 #13
0
def write_report(data, graph, features_time, matches_time, tracks_time):
    tracks, images = matching.tracks_and_images(graph)
    image_graph = bipartite.weighted_projected_graph(graph, images)
    view_graph = []
    for im1 in data.images():
        for im2 in data.images():
            if im1 in image_graph and im2 in image_graph[im1]:
                weight = image_graph[im1][im2]['weight']
                view_graph.append((im1, im2, weight))

    report = {
        "wall_times": {
            "load_features": features_time,
            "load_matches": matches_time,
            "compute_tracks": tracks_time,
        },
        "wall_time": features_time + matches_time + tracks_time,
        "num_images": len(images),
        "num_tracks": len(tracks),
        "view_graph": view_graph
    }
    data.save_report(io.json_dumps(report), 'yan.json')
예제 #14
0
    def write_report(self, data, graph,
                     features_time, matches_time, tracks_time):
        tracks, images = matching.tracks_and_images(graph)
        image_graph = bipartite.weighted_projected_graph(graph, images)
        view_graph = []
        for im1 in data.images():
            for im2 in data.images():
                if im1 in image_graph and im2 in image_graph[im1]:
                    weight = image_graph[im1][im2]['weight']
                    view_graph.append((im1, im2, weight))

        report = {
            "wall_times": {
                "load_features": features_time,
                "load_matches": matches_time,
                "compute_tracks": tracks_time,
            },
            "wall_time": features_time + matches_time + tracks_time,
            "num_images": len(images),
            "num_tracks": len(tracks),
            "view_graph": view_graph
        }
        data.save_report(io.json_dumps(report), 'tracks.json')
예제 #15
0
def compute_depthmaps(data, graph, reconstruction):
    """Compute and refine depthmaps for all shots."""
    logger.info('Computing neighbors')
    processes = data.config.get('processes', 1)
    num_neighbors = data.config['depthmap_num_neighbors']
    tracks, _ = matching.tracks_and_images(graph)
    common_tracks = matching.all_common_tracks(graph, tracks, include_features=False)

    neighbors = {}
    for shot in reconstruction.shots.values():
        neighbors[shot.id] = find_neighboring_images(
            shot, common_tracks, reconstruction, num_neighbors)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        min_depth, max_depth = compute_depth_range(graph, reconstruction, shot)
        arguments.append((data, neighbors[shot.id], min_depth, max_depth, shot))
    parallel_map(compute_depthmap_catched, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, neighbors[shot.id], shot))
    parallel_map(clean_depthmap_catched, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, neighbors[shot.id], shot))
    parallel_map(prune_depthmap_catched, arguments, processes)

    merge_depthmaps(data, graph, reconstruction, neighbors)
예제 #16
0
def track_regeneration(data, G, V, iconic_images, non_iconic_images):
    current_track_counter = 1
    image_tracks = {}
    old_to_new_track_mapping = {}
    new_to_old_track_mapping = {}

    # tracks, images = matching.tracks_and_images(V)
    tracks, images = matching.tracks_and_images(V)
    # images = list(images)
    # tracks = list(tracks)

    V_prime = nx.Graph()
    V_prime.add_nodes_from(images, bipartite=0)
    # V_prime.add_nodes_from(tracks, bipartite=1)

    # print '{} / {}'.format(len(images), len(tracks))
    # tracks_, images_ = matching.tracks_and_images(V)
    # print ('{}_ / {}_'.format(len(images_), len(tracks_)))

    # tracks_prime_, images_prime_ = matching.tracks_and_images(V_prime)
    # print '#'*100
    # print ('{}_ / {}_'.format(len(images_prime_), len(tracks_prime_)))

    # for i in V_prime.nodes(data=True):
    # print i
    # import sys; sys.exit(1)

    cum_common_tracks_prime = 0
    cum_common_tracks = 0
    # source = sorted(G.nodes())[0]
    # for i,j in nx.bfs_edges(G, source):
    # for count in range(0,3):

    for i in sorted(G.nodes()):
        # if i != 'P1010141.jpg' and i != 'P1010159.jpg':
        #     continue
        for j in sorted(G.neighbors(i)):
            # if j != 'P1010145.jpg':
            #     continue

            # print ('{} - {}'.format(i,j))
            common_tracks = len(
                set(V[i].keys()).intersection(set(V[j].keys())))
            cum_common_tracks += common_tracks
            # print ('\tCommon tracks: {}'.format(common_tracks))

            if i not in image_tracks:
                image_tracks[i] = {}
            if j not in image_tracks:
                image_tracks[j] = {}
            for t in V[i]:
                if t not in V[j]:
                    continue
                # print 'hello'
                # print '{} / {}'.format(j,t)
                # print V[j][t]
                # import sys; sys.exit(1)

                if t not in old_to_new_track_mapping:
                    old_to_new_track_mapping[t] = [
                    ]  # old track could map to multiple new tracks

                # if len(set(old_to_new_track_mapping[t]).intersection(V_prime[i].keys())) == 0:
                if t not in image_tracks[i]:
                    if t in image_tracks[j]:
                        current_track_id = image_tracks[j][t]
                    else:
                        current_track_id = current_track_counter
                        current_track_counter += 1
                    # Create new track and add it to image i
                    old_to_new_track_mapping[t].append(current_track_id)
                    new_to_old_track_mapping[current_track_id] = t

                    image_tracks[i][t] = current_track_id

                    V_prime.add_nodes_from([current_track_id], bipartite=1)
                    V_prime.add_edge(i, current_track_id)
                    V_prime[i][current_track_id] = {
                        'feature_id': V[i][t]['feature_id'],
                        'feature_color': V[i][t]['feature_color'],
                        'feature': V[i][t]['feature']
                    }

                if t not in image_tracks[j]:
                    new_track_id = image_tracks[i][t]
                    # Add track to image j
                    V_prime.add_edge(j, new_track_id)
                    V_prime[j][new_track_id] = {
                        'feature_id': V[j][t]['feature_id'],
                        'feature_color': V[j][t]['feature_color'],
                        'feature': V[j][t]['feature']
                    }
                    image_tracks[j][t] = new_track_id

            common_tracks_prime = len(
                set(V_prime[i].keys()).intersection(set(V_prime[j].keys())))
            cum_common_tracks_prime += common_tracks_prime
            # print ('\tCommon tracks: {}'.format(common_tracks_prime))

        # import sys; sys.exit(1)
    print(
        'Original tracks graph: {} edges  --  Regenerated tracks graph: {} edges'
        .format(len(V.edges()), len(V_prime.edges())))
    print('Common Tracks: {} / {}'.format(cum_common_tracks,
                                          cum_common_tracks_prime))

    tracks_, images_ = matching.tracks_and_images(V)
    print('{}_ / {}_'.format(len(images_), len(tracks_)))
    tracks_, images_ = matching.tracks_and_images(V_prime)
    print('{}_ / {}_'.format(len(images_), len(tracks_)))
    # tracks_, images_ = nx.bipartite.sets(V)
    # print ('{}_ / {}_'.format(len(images_), len(tracks_)))
    # tracks_, images_ = nx.bipartite.sets(V_prime)
    # print ('{}_ / {}_'.format(len(images_), len(tracks_)))

    # for o in old_to_new_track_mapping:
    #     if len(old_to_new_track_mapping[o]) > 1:

    #         print '{} : {}'.format(o, len(old_to_new_track_mapping[o]))
    return V_prime