Exemplo n.º 1
0
def SfM_reconstruct(src): # incremental_reconstruction
    from opensfm.dataset import DataSet
    from opensfm.reconstruction import (tracking, compute_image_pairs,
        bootstrap_reconstruction, grow_reconstruction)

    data = DataSet(src); result = []
    gcp = data.load_ground_control_points()
    tracks = data.load_tracks_manager()
    imgs = tracks.get_shot_ids()
    if not data.reference_lla_exists():
        data.invent_reference_lla(imgs)

    camera_priors = data.load_camera_models()
    common_tracks = tracking.all_common_tracks(tracks)
    pairs = compute_image_pairs(common_tracks, camera_priors, data)
    imgs = set(imgs); report = {'candidate_image_pairs': len(pairs)}
    for im1, im2 in pairs:
        if im1 in imgs and im2 in imgs:
            report[im1+' & '+im2] = log = {}
            v, p1, p2 = common_tracks[im1, im2]
            rec, log['bootstrap'] = bootstrap_reconstruction(
                data, tracks, camera_priors, im1, im2, p1, p2)
            if rec:
                imgs.remove(im1); imgs.remove(im2)
                rec, log['grow'] = grow_reconstruction(
                    data, tracks, rec, imgs, camera_priors, gcp)
                result.append(rec)
    result = sorted(result, key=lambda x: -len(x.shots))
    data.save_reconstruction(result)
    report['not_reconstructed_images'] = list(imgs)
    with open(f'{src}/reports/reconstruction.json','w') as f:
        json.dump(report, f, indent=4)
Exemplo n.º 2
0
def run_dataset(
    data: DataSet,
    proj: str,
    transformation: bool,
    image_positions: bool,
    reconstruction: bool,
    dense : bool,
    output: str,
) -> None:
    """Export reconstructions in geographic coordinates

    Args:
        proj: PROJ.4 projection string
        transformation : print cooordinate transformation matrix'
        image_positions : export image positions
        reconstruction : export reconstruction.json
        dense : export dense point cloud (depthmaps/merged.ply)
        output : path of the output file relative to the dataset

    """

    if not (transformation or image_positions or reconstruction or dense):
        logger.info("Nothing to do. At least on of the options: ")
        logger.info(" --transformation, --image-positions, --reconstruction, --dense")

    reference = data.load_reference()

    projection = pyproj.Proj(proj)
    t = _get_transformation(reference, projection)

    if transformation:
        output = output or "geocoords_transformation.txt"
        output_path = os.path.join(data.data_path, output)
        _write_transformation(t, output_path)

    if image_positions:
        reconstructions = data.load_reconstruction()
        output = output or "image_geocoords.tsv"
        output_path = os.path.join(data.data_path, output)
        _transform_image_positions(reconstructions, t, output_path)

    if reconstruction:
        reconstructions = data.load_reconstruction()
        for r in reconstructions:
            _transform_reconstruction(r, t)
        output = output or "reconstruction.geocoords.json"
        data.save_reconstruction(reconstructions, output)

    if dense:
        output = output or "undistorted/depthmaps/merged.geocoords.ply"
        output_path = os.path.join(data.data_path, output)
        udata = data.undistorted_dataset()
        _transform_dense_point_cloud(udata, t, output_path)
Exemplo n.º 3
0
    def check_merge_partial_reconstructions(self):
        if self.reconstructed():
            data = DataSet(self.opensfm_project_path)
            reconstructions = data.load_reconstruction()
            tracks_manager = data.load_tracks_manager()

            if len(reconstructions) > 1:
                log.ODM_WARNING(
                    "Multiple reconstructions detected (%s), this might be an indicator that some areas did not have sufficient overlap"
                    % len(reconstructions))
                log.ODM_INFO("Attempting merge")

                merged = Reconstruction()
                merged.set_reference(reconstructions[0].reference)

                for ix_r, rec in enumerate(reconstructions):
                    if merged.reference != rec.reference:
                        # Should never happen
                        continue

                    log.ODM_INFO("Merging reconstruction %s" % ix_r)

                    for camera in rec.cameras.values():
                        merged.add_camera(camera)

                    for point in rec.points.values():
                        try:
                            new_point = merged.create_point(
                                point.id, point.coordinates)
                            new_point.color = point.color
                        except RuntimeError as e:
                            log.ODM_WARNING("Cannot merge shot id %s (%s)" %
                                            (shot.id, str(e)))
                            continue

                    for shot in rec.shots.values():
                        merged.add_shot(shot)
                        try:
                            obsdict = tracks_manager.get_shot_observations(
                                shot.id)
                        except RuntimeError:
                            log.ODM_WARNING(
                                "Shot id %s missing from tracks_manager!" %
                                shot.id)
                            continue
                        for track_id, obs in obsdict.items():
                            if track_id in merged.points:
                                merged.add_observation(shot.id, track_id, obs)

                data.save_reconstruction([merged])
Exemplo n.º 4
0
def run_dataset(data: DataSet, method, definition, output_debug):
    """Given a dataset that contains rigs, construct rig data files.

    Args:
        data: dataset object
        method : `auto` will run `reconstruct` process and try to detect rig pattern (TODO)
                 `camera` will create instances based on the camera model name
                 'pattern` will create instances based on a REGEX pattern (see below)
        definition : JSON dict (one for each RigCamera) with values as :
                    - (.*) for `pattern` method where the part outside
                        of parenthesis defines a RigCamera instance
                    - a camera model ID for the `camera` method
        output_debug : output a debug JSON reconstruction `rig_instances.json` with rig instances
    """
    rig.create_rigs_with_pattern(data, definition)
    if output_debug:
        reconstructions = _reconstruction_from_rigs_and_assignments(data)
        data.save_reconstruction(reconstructions, "rig_instances.json")