コード例 #1
0
def incremental_reconstruction(data):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    data.invent_reference_lla()
    graph = data.load_tracks_graph()
    tracks, images = matching.tracks_and_images(graph)
    remaining_images = set(images)
    gcp = None
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()
    common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, data.config)
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            tracks, p1, p2 = common_tracks[im1, im2]
            reconstruction = bootstrap_reconstruction(data, graph, im1, im2,
                                                      p1, p2)
            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                reconstruction = grow_reconstruction(data, graph,
                                                     reconstruction,
                                                     remaining_images, gcp)
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
コード例 #2
0
def compute_depthmaps(data, graph, reconstruction):
    """Compute and refine depthmaps for all shots."""
    logger.info('Computing neighbors')
    processes = data.config.get('processes', 1)
    num_neighbors = data.config['depthmap_num_neighbors']
    tracks, _ = matching.tracks_and_images(graph)
    common_tracks = matching.all_common_tracks(graph,
                                               tracks,
                                               include_features=False)

    neighbors = {}
    for shot in reconstruction.shots.values():
        neighbors[shot.id] = find_neighboring_images(shot, common_tracks,
                                                     reconstruction,
                                                     num_neighbors)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        min_depth, max_depth = compute_depth_range(graph, reconstruction, shot)
        arguments.append(
            (data, reconstruction, neighbors, min_depth, max_depth, shot))
    parallel_run(compute_depthmap, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, reconstruction, neighbors, shot))
    parallel_run(clean_depthmap, arguments, processes)

    merge_depthmaps(data, graph, reconstruction, neighbors)
コード例 #3
0
def incremental_reconstruction(data,
                               graph=None,
                               common_tracks=None,
                               my_init=False):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    if (not data.config['use_dummy_camera']):
        data.invent_reference_lla()
    if (graph == None):
        graph = data.load_tracks_graph()
    tracks, images = matching.tracks_and_images(graph)
    remaining_images = set(images)
    gcp = None
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()
    if (common_tracks == None):
        common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []

    data.config['five_point_algo_threshold'] = 0.5

    if (not my_init):
        pairs = compute_image_pairs(common_tracks, data.config)
    else:
        pairs = compute_image_pairs_new(common_tracks, data.config, step=5)

    for im1, im2 in pairs:

        if im1 in remaining_images and im2 in remaining_images:

            tracks, p1, p2 = common_tracks[im1, im2]

            reconstruction = bootstrap_reconstruction(data, graph, im1, im2,
                                                      p1, p2, my_init)
            """ Add a visualization to check the result """
            # my_canvas = vispy_util_persp.MyCanvas(reconstruction, data.image_files, has_color=0)

            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                # reconstruction = grow_reconstruction(
                #     data, graph, reconstruction, remaining_images, gcp, my_canvas)
                reconstruction = grow_reconstruction(data, graph,
                                                     reconstruction,
                                                     remaining_images, gcp)

                # my_canvas.update_data(reconstruction)

                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
コード例 #4
0
ファイル: reconstruction.py プロジェクト: gy20073/OpenSfM
def incremental_reconstruction(data):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")

    # load the exif information from the images and convert to internal format
    data.invent_reference_lla()

    # return an nx graph, with two kind of nodes, images, and tracks. features are keypoint locations
    graph = data.load_tracks_graph()

    # all tracks and images stored in two lists
    tracks, images = matching.tracks_and_images(graph)
    remaining_images = set(images)
    gcp = None

    # otherwise explictly written a ground control point, no such file exists.
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()

    # returns a [im1, im2] -> (tracks, im1_features, im2_features)
    common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []

    # return a list of image pairs that sorted by decreasing favorability
    pairs = compute_image_pairs(common_tracks, data.config)
    if len(pairs)==0:
        print("no image pairs available, use all combinations instead")
        pairs = combinations(sorted(remaining_images), 2)
    for im1, im2 in pairs:
        # each time choose two images that both are not in the collection
        # after adding them into the reconstruction, removing them from the set
        # if this if is entered multiple times, then it indicates that multiple
        # reconstructions are found, which is not good.
        if im1 in remaining_images and im2 in remaining_images:
            tracks, p1, p2 = common_tracks[im1, im2]
            # TODO: we have to carefully select which image pairs to use
            # This is only called once
            reconstruction = bootstrap_reconstruction(data, graph, im1, im2, p1, p2)
            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                # The main growing process, it doesn't only add in one image, it add in all.
                reconstruction = grow_reconstruction(
                    data, graph, reconstruction, remaining_images, gcp)
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)
            else:
                print("reconstruction for image %s and %s failed" % (im1, im2))

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
コード例 #5
0
ファイル: reconstruction.py プロジェクト: whlook/OpenSfM
def incremental_reconstruction(data):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    report = {}
    chrono = Chronometer()
    if not data.reference_lla_exists():
        data.invent_reference_lla()

    graph = data.load_tracks_graph()
    tracks, images = matching.tracks_and_images(graph)
    chrono.lap('load_tracks_graph')
    remaining_images = set(images)
    gcp = None
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()
    common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, data)
    chrono.lap('compute_image_pairs')
    report['num_candidate_image_pairs'] = len(pairs)
    report['reconstructions'] = []
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            rec_report = {}
            report['reconstructions'].append(rec_report)
            tracks, p1, p2 = common_tracks[im1, im2]
            reconstruction, rec_report['bootstrap'] = bootstrap_reconstruction(
                data, graph, im1, im2, p1, p2)

            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                reconstruction, rec_report['grow'] = grow_reconstruction(
                    data, graph, reconstruction, remaining_images, gcp)
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
    chrono.lap('compute_reconstructions')
    report['wall_times'] = dict(chrono.lap_times())
    report['not_reconstructed_images'] = list(remaining_images)
    return report
コード例 #6
0
def incremental_reconstruction(data):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    report = {}
    chrono = Chronometer()
    if not data.reference_lla_exists():
        data.invent_reference_lla()

    graph = data.load_tracks_graph()
    tracks, images = matching.tracks_and_images(graph)
    chrono.lap('load_tracks_graph')
    remaining_images = set(images)
    gcp = None
    if data.ground_control_points_exist():
        gcp = data.load_ground_control_points()
    common_tracks = matching.all_common_tracks(graph, tracks)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, data)
    chrono.lap('compute_image_pairs')
    report['num_candidate_image_pairs'] = len(pairs)
    report['reconstructions'] = []
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            rec_report = {}
            report['reconstructions'].append(rec_report)
            tracks, p1, p2 = common_tracks[im1, im2]
            reconstruction, rec_report['bootstrap'] = bootstrap_reconstruction(
                data, graph, im1, im2, p1, p2)

            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                reconstruction, rec_report['grow'] = grow_reconstruction(
                    data, graph, reconstruction, remaining_images, gcp)
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))
                data.save_reconstruction(reconstructions)

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
    chrono.lap('compute_reconstructions')
    report['wall_times'] = dict(chrono.lap_times())
    report['not_reconstructed_images'] = list(remaining_images)
    return report
コード例 #7
0
ファイル: dense.py プロジェクト: dakotabenjamin/OpenSfM
def compute_depthmaps(data, graph, reconstruction):
    """Compute and refine depthmaps for all shots."""
    logger.info('Computing neighbors')
    processes = data.config.get('processes', 1)
    num_neighbors = data.config['depthmap_num_neighbors']
    tracks, _ = matching.tracks_and_images(graph)
    common_tracks = matching.all_common_tracks(graph, tracks, include_features=False)

    neighbors = {}
    for shot in reconstruction.shots.values():
        neighbors[shot.id] = find_neighboring_images(
            shot, common_tracks, reconstruction, num_neighbors)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        min_depth, max_depth = compute_depth_range(graph, reconstruction, shot)
        arguments.append((data, neighbors[shot.id], min_depth, max_depth, shot))
    parallel_map(compute_depthmap_catched, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, neighbors[shot.id], shot))
    parallel_map(clean_depthmap_catched, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, neighbors[shot.id], shot))
    parallel_map(prune_depthmap_catched, arguments, processes)

    merge_depthmaps(data, graph, reconstruction, neighbors)