Пример #1
0
def SfM_reconstruct(src): # incremental_reconstruction
    from opensfm.dataset import DataSet
    from opensfm.reconstruction import (tracking, compute_image_pairs,
        bootstrap_reconstruction, grow_reconstruction)

    data = DataSet(src); result = []
    gcp = data.load_ground_control_points()
    tracks = data.load_tracks_manager()
    imgs = tracks.get_shot_ids()
    if not data.reference_lla_exists():
        data.invent_reference_lla(imgs)

    camera_priors = data.load_camera_models()
    common_tracks = tracking.all_common_tracks(tracks)
    pairs = compute_image_pairs(common_tracks, camera_priors, data)
    imgs = set(imgs); report = {'candidate_image_pairs': len(pairs)}
    for im1, im2 in pairs:
        if im1 in imgs and im2 in imgs:
            report[im1+' & '+im2] = log = {}
            v, p1, p2 = common_tracks[im1, im2]
            rec, log['bootstrap'] = bootstrap_reconstruction(
                data, tracks, camera_priors, im1, im2, p1, p2)
            if rec:
                imgs.remove(im1); imgs.remove(im2)
                rec, log['grow'] = grow_reconstruction(
                    data, tracks, rec, imgs, camera_priors, gcp)
                result.append(rec)
    result = sorted(result, key=lambda x: -len(x.shots))
    data.save_reconstruction(result)
    report['not_reconstructed_images'] = list(imgs)
    with open(f'{src}/reports/reconstruction.json','w') as f:
        json.dump(report, f, indent=4)
Пример #2
0
def run_dataset(data: DataSet, list_path, bundle_path, undistorted):
    """Export reconstruction to bundler format.

    Args:
        list_path: txt list of images to export
        bundle_path : output path
        undistorted : export undistorted reconstruction

    """

    udata = data.undistorted_dataset()

    default_path = os.path.join(data.data_path, "bundler")
    list_file_path = list_path if list_path else default_path
    bundle_file_path = bundle_path if bundle_path else default_path

    if undistorted:
        reconstructions = udata.load_undistorted_reconstruction()
        track_manager = udata.load_undistorted_tracks_manager()
        images = reconstructions[0].shots.keys()
    else:
        reconstructions = data.load_reconstruction()
        track_manager = data.load_tracks_manager()
        images = data.images()

    io.export_bundler(images, reconstructions, track_manager, bundle_file_path,
                      list_file_path)
Пример #3
0
def SfM_export_pos(src, dop=0.1, tsv='image_geocoords.tsv'):
    rename_rec(src)
    dif = []
    gjs = {}  # for sfm
    SfM_cmd(src, f'export_geocoords --image-positions --proj="{LLA}"')
    rename_rec(src)
    geo = load_sort_save(f'{src}/{tsv}', a=1)  # sort
    with open(src + '/geo.txt', 'w') as f:
        f.writelines([LLA + '\n'] + geo[1:])

    from opensfm.dataset import DataSet
    data = DataSet(src)
    ref = data.load_reference()
    for v in geo[1:]:  # skip 1st-row
        im, *v = v.split()
        v = np.float64(v)[[1, 0, 2]]
        o = [*data.load_exif(im)['gps'].values()][:3]  # lat,lon,alt
        ov = ref.to_topocentric(*v) - np.array(ref.to_topocentric(*o))
        gjs[im] = {
            'gps': dict(latitude=v[0], longitude=v[1], altitude=v[2], dop=dop)
        }
        dif += [f'{im} lla={v} exif={o}\tdif={ov.tolist()}\n']
    with open(f'{src}/{tsv[:-4]}.dif.txt', 'w') as f:
        f.writelines(dif)
    with open(src + '/geo.json', 'w') as f:
        json.dump(gjs, f, indent=4)
Пример #4
0
 def run(self, args):
     start = timer()
     data = DataSet(args.dataset)
     self.run_impl(data, args)
     end = timer()
     with open(data.profile_log(), 'a') as fout:
         fout.write(type(self).name + ': {0}\n'.format(end - start))
Пример #5
0
def run_dataset(
    data: DataSet,
    reconstruction: Optional[str] = None,
    reconstruction_index: int = 0,
    tracks: Optional[str] = None,
    output: str = "undistorted",
    skip_images: bool = False,
) -> None:
    """Export reconstruction to NVM_V3 format from VisualSfM

    Args:
        reconstruction: reconstruction to undistort
        reconstruction_index: index of the reconstruction component to undistort
        tracks: tracks graph of the reconstruction
        output: undistorted
        skip_images: do not undistort images
    """
    undistorted_data_path = os.path.join(data.data_path, output)
    udata = dataset.UndistortedDataSet(data,
                                       undistorted_data_path,
                                       io_handler=data.io_handler)
    reconstructions = data.load_reconstruction(reconstruction)
    if data.tracks_exists(tracks):
        tracks_manager = data.load_tracks_manager(tracks)
    else:
        tracks_manager = None

    if reconstructions:
        r = reconstructions[reconstruction_index]
        undistort.undistort_reconstruction_with_images(tracks_manager, r, data,
                                                       udata, skip_images)
Пример #6
0
    def ground_control_points(self, proj4):
        """
        Load ground control point information.
        """
        gcp_stats_file = self.path("stats", "ground_control_points.json")

        if not io.file_exists(gcp_stats_file):
            return []

        gcps_stats = {}
        try:
            with open(gcp_stats_file) as f:
                gcps_stats = json.loads(f.read())
        except:
            log.ODM_INFO("Cannot parse %s" % gcp_stats_file)

        if not gcps_stats:
            return []

        ds = DataSet(self.opensfm_project_path)
        reference = ds.load_reference()
        projection = pyproj.Proj(proj4)

        result = []
        for gcp in gcps_stats:
            geocoords = _transform(gcp['coordinates'], reference, projection)
            result.append({
                'id': gcp['id'],
                'observations': gcp['observations'],
                'coordinates': geocoords,
                'error': gcp['error']
            })

        return result
Пример #7
0
def create_rigs_with_pattern(data: DataSet, patterns):
    """Create rig data (`rig_models.json` and `rig_assignments.json`) by performing
    pattern matching to group images belonging to the same instances, followed
    by a bit of ad-hoc SfM to find some initial relative poses.
    """

    # Construct instances assignments for each rig
    instances_per_rig = create_instances_with_patterns(data.images(), patterns)
    for rig_id, instances in instances_per_rig.items():
        logger.info(
            f"Found {len(instances)} rig instances for rig {rig_id} using pattern matching."
        )

    # Create some subset DataSet with enough images from each rig
    subset_data = create_subset_dataset_from_instances(data, instances_per_rig,
                                                       "rig_calibration")

    # # Run a bit of SfM without any rig
    logger.info(
        f"Running SfM on a subset of {len(subset_data.images())} images.")
    actions.extract_metadata.run_dataset(subset_data)
    actions.detect_features.run_dataset(subset_data)
    actions.match_features.run_dataset(subset_data)
    actions.create_tracks.run_dataset(subset_data)
    actions.reconstruct.run_dataset(subset_data)

    # Compute some relative poses
    rig_models = create_rig_models_from_reconstruction(
        subset_data.load_reconstruction()[0], instances_per_rig)

    data.save_rig_models(rig_models)
    data.save_rig_assignments(instances_per_rig)
Пример #8
0
def create_subset_dataset_from_instances(data: DataSet, instances_per_rig,
                                         name):
    """Given a list of images grouped by rigs instances, pick a subset of images
        and create a dataset subset with the provided name from them.

    Returns :
        A DataSet containing a subset of images containing enough rig instances
    """
    subset_images = []
    for instances in instances_per_rig.values():
        instances_sorted = sorted(
            instances, key=lambda x: data.load_exif(x[0][0])["capture_time"])

        subset_size = data.config["rig_calibration_subset_size"]
        middle = len(instances_sorted) / 2
        instances_calibrate = instances_sorted[max(
            [0, middle - int(subset_size / 2)]
        ):min([middle +
               int(subset_size / 2
                   ), len(instances_sorted) - 1])]

        for instance in instances_calibrate:
            subset_images += [x[0] for x in instance]

    return data.subset(name, subset_images)
Пример #9
0
def run_dataset(data: DataSet, reconstruction, reconstruction_index, tracks,
                output):
    """Export reconstruction to NVM_V3 format from VisualSfM

    Args:
        reconstruction: reconstruction to undistort
        reconstruction_index: index of the reconstruction component to undistort
        tracks: tracks graph of the reconstruction
        output: undistorted

    """
    undistorted_data_path = os.path.join(data.data_path, output)
    udata = dataset.UndistortedDataSet(data,
                                       undistorted_data_path,
                                       io_handler=data.io_handler)
    reconstructions = data.load_reconstruction(reconstruction)
    if data.tracks_exists(tracks):
        tracks_manager = data.load_tracks_manager(tracks)
    else:
        tracks_manager = None

    if reconstructions:
        r = reconstructions[reconstruction_index]
        undistort.undistort_reconstruction_and_images(tracks_manager, r, data,
                                                      udata)
Пример #10
0
def run_dataset(data: DataSet, diagram_max_points=-1):
    """Compute various staistics of a datasets and write them to 'stats' folder

    Args:
        data: dataset object

    """
    reconstructions = data.load_reconstruction()
    tracks_manager = data.load_tracks_manager()

    output_path = os.path.join(data.data_path, "stats")
    data.io_handler.mkdir_p(output_path)

    stats_dict = stats.compute_all_statistics(data, tracks_manager, reconstructions)

    stats.save_residual_grids(
        data, tracks_manager, reconstructions, output_path, data.io_handler
    )
    stats.save_matchgraph(
        data, tracks_manager, reconstructions, output_path, data.io_handler
    )
    stats.save_residual_histogram(stats_dict, output_path, data.io_handler)

    if diagram_max_points > 0:
        stats.decimate_points(reconstructions, diagram_max_points)

    stats.save_heatmap(
        data, tracks_manager, reconstructions, output_path, data.io_handler
    )
    stats.save_topview(
        data, tracks_manager, reconstructions, output_path, data.io_handler
    )

    with data.io_handler.open_wt(os.path.join(output_path, "stats.json")) as fout:
        io.json_dump(stats_dict, fout)
Пример #11
0
def check_prior(data: dataset.DataSet, output_rec_path: str):
    reconstruction = data.load_reconstruction()  # load old reconstruction
    prior_rec = data.load_reconstruction(output_rec_path)
    for shot_id, shot in reconstruction[0].shots.items():
        utils.assert_shots_equal(shot, prior_rec[0].shots[shot_id])

    assert len(prior_rec[0].points) > 1000
Пример #12
0
def create_default_dataset_context(
    dataset_path: str, dataset_type: str = ""
) -> Generator[DataSet, None, None]:
    dataset = DataSet(dataset_path)
    try:
        yield dataset
    finally:
        dataset.clean_up()
Пример #13
0
def run_dataset(data: DataSet, points, image_list, output, undistorted):
    """Export reconstruction to PLY format

    Args:
        points: export points
        image_list: export only the shots included in this file (path to .txt file)
        output: output pmvs directory
        undistorted: export the undistorted reconstruction

    """

    udata = data.undistorted_dataset()

    base_output_path = output if output else os.path.join(
        data.data_path, "pmvs")
    io.mkdir_p(base_output_path)
    logger.info("Converting dataset [%s] to PMVS dir [%s]" %
                (data.data_path, base_output_path))

    if undistorted:
        reconstructions = udata.load_undistorted_reconstruction()
    else:
        reconstructions = data.load_reconstruction()

    # load tracks for vis.dat
    try:
        if undistorted:
            tracks_manager = udata.load_undistorted_tracks_manager()
        else:
            tracks_manager = data.load_tracks_manager()
        image_graph = tracking.as_weighted_graph(tracks_manager)
    except IOError:
        image_graph = None

    export_only = None
    if image_list:
        export_only = {}
        with open(image_list, "r") as f:
            for image in f:
                export_only[image.strip()] = True

    for h, reconstruction in enumerate(reconstructions):
        export(
            reconstruction,
            h,
            image_graph,
            # pyre-fixme[61]: `tracks_manager` may not be initialized here.
            tracks_manager,
            base_output_path,
            data,
            undistorted,
            udata,
            points,
            export_only,
        )
Пример #14
0
def filter_reconstruct(src, thd=0.3):
    src = os.path.abspath(src)
    res = {}
    if os.path.isdir(src + '/opensfm'):  # for odm
        cam = src + '/cameras.json'
        src += '/opensfm'
        rec = src + '/reconstruction.topocentric.json'
    elif os.path.isfile(src + '/../cameras.json'):
        cam = src + '/../cameras.json'  # for odm
        rec = src + '/reconstruction.topocentric.json'
    elif os.path.isfile(src + '/camera_models.json'):
        cam = src + '/camera_models.json'  # for sfm
        rec = src + '/reconstruction.json'
    cam = Camera(cam)
    bak = rec[:-4] + 'bak'
    if os.path.isfile(bak):
        if os.path.isfile(rec): os.remove(rec)
        os.rename(bak, rec)  # for win
    with open(rec) as f:
        data = json.load(f)[0]
    os.rename(rec, bak)
    INFO(f'Filter: {rec}')

    from opensfm.dataset import DataSet
    T = DataSet(src).load_tracks_manager()
    for im in T.get_shot_ids():
        v = data['shots'][im]
        rotation = np.array(v['rotation'])
        translation = np.array(v['translation'])
        O, X, Y, Z = calc_axis_cam(translation, rotation)
        feat = load_feature(f'{src}/features/{im}', cam, 1)
        for tid, x in T.get_shot_observations(im).items():
            if not tid in data['points']: continue
            dp = data['points'][tid]['coordinates'] - O
            ddp = np.linalg.norm(dp)
            u, v = feat[x.id][:2]  # fid
            qt = u * X + v * Y + Z
            qt /= np.linalg.norm(qt)
            delta = np.dot(dp, qt)
            dis = np.sqrt(ddp**2 - delta**2)
            if tid not in res: res[tid] = dis
            elif dis > res[tid]: res[tid] = dis  # meters
            #print(f'{im} %6s %6s %.3f'%(tid,x.id,dis))
    dis = [*res.values()]
    md = np.mean(dis)
    thd = min(thd, md)
    out = {k: v
           for k, v in res.items() if v > thd}
    #print(out)
    #plt.hist(dis, [0.01,0.05,0.1,0.5,1,2]); plt.show()
    for tid in out:
        data['points'].pop(tid)
    with open(rec, 'w') as f:
        json.dump([data], f, indent=4)
    INFO('Out=%d/%d, Thd=%.3f, Max=%.3f' % (len(out), len(res), thd, max(dis)))
Пример #15
0
def create_rigs_with_pattern(data: DataSet, patterns):
    """Create rig data (`rig_models.json` and `rig_assignments.json`) by performing
    pattern matching to group images belonging to the same instances, followed
    by a bit of ad-hoc SfM to find some initial relative poses.
    """

    # Construct instances assignments for each rig
    instances_per_rig = create_instances_with_patterns(data.images(), patterns)
    for rig_id, instances in instances_per_rig.items():
        logger.info(
            f"Found {len(instances)} rig instances for rig {rig_id} using pattern matching."
        )

    # Create some subset DataSet with enough images from each rig
    subset_data = create_subset_dataset_from_instances(data, instances_per_rig,
                                                       "rig_calibration")

    # # Run a bit of SfM without any rig
    logger.info(
        f"Running SfM on a subset of {len(subset_data.images())} images.")
    actions.extract_metadata.run_dataset(subset_data)
    actions.detect_features.run_dataset(subset_data)
    actions.match_features.run_dataset(subset_data)
    actions.create_tracks.run_dataset(subset_data)
    actions.reconstruct.run_dataset(subset_data)

    # Compute some relative poses
    rig_models_poses = create_rig_model_from_reconstruction(
        subset_data.load_reconstruction()[0], instances_per_rig)

    # Ad-hoc construction of output model data
    # Will be replaced by `io` counterpart
    models = {}
    for rig_id in patterns:
        rig_pattern = patterns[rig_id]
        model = rig_models_poses[rig_id]

        rig_model = {}
        for rig_camera_id in model:
            pose, camera_id = model[rig_camera_id]
            rig_model[rig_camera_id] = {
                "translation": list(pose.translation),
                "rotation": list(pose.rotation),
                "camera": camera_id,
            }

        models[rig_id] = {
            "rig_relative_type": "shared",
            "rig_cameras": rig_model,
        }

    data.save_rig_models(models)
    data.save_rig_assignments(instances_per_rig)
Пример #16
0
def SfM_parse_track(src):
    from opensfm.dataset import DataSet
    #from opensfm.pymap import Observation
    TM = DataSet(src).load_tracks_manager()
    T = {}
    for im in TM.get_shot_ids():
        T.setdefault(im, [[], [], [], []])  # tid=str
        for tid, v in TM.get_shot_observations(im).items():
            T[im][0] += [tid]
            T[im][1] += [v.id]  # xys
            T[im][2] += [np.hstack([v.point, v.scale])]
            T[im][3] += [v.color]  # RGB
    return T  # {im: [tid,fid,xys,RGB]}
Пример #17
0
def _create_image_list(data: DataSet, meta_data):
    ills = []
    for image in data.images():
        exif = data.load_exif(image)
        if ("gps" not in exif or "latitude" not in exif["gps"]
                or "longitude" not in exif["gps"]):
            logger.warning("Skipping {} because of missing GPS".format(image))
            continue

        lat = exif["gps"]["latitude"]
        lon = exif["gps"]["longitude"]
        ills.append((image, lat, lon))

    meta_data.create_image_list(ills)
Пример #18
0
def run_dataset(data: DataSet, binary):
    """ Export reconstruction to COLMAP format."""

    export_folder = os.path.join(data.data_path, "colmap_export")
    io.mkdir_p(export_folder)

    database_path = os.path.join(export_folder, "colmap_database.db")
    images_path = os.path.join(data.data_path, "images")

    if os.path.exists(database_path):
        os.remove(database_path)
    db = COLMAPDatabase.connect(database_path)
    db.create_tables()

    images_map, camera_map = export_cameras(data, db)
    features_map = export_features(data, db, images_map)
    export_matches(data, db, features_map, images_map)

    if data.reconstruction_exists():
        export_ini_file(export_folder, database_path, images_path)
        export_cameras_reconstruction(data, export_folder, camera_map, binary)
        points_map = export_points_reconstruction(data, export_folder,
                                                  images_map, binary)
        export_images_reconstruction(
            data,
            export_folder,
            camera_map,
            images_map,
            features_map,
            points_map,
            binary,
        )
    db.commit()
    db.close()
Пример #19
0
    def create_submodels(self, clusters):
        data = DataSet(self.data_path)
        for i, cluster in enumerate(clusters):
            # create sub model dirs
            submodel_path = self._submodel_path(i)
            submodel_images_path = self._submodel_images_path(i)
            io.mkdir_p(submodel_path)
            io.mkdir_p(submodel_images_path)

            # create image list file
            image_list_path = os.path.join(submodel_path, 'image_list.txt')
            with io.open_wt(image_list_path) as txtfile:
                for image in cluster:
                    src = data.image_files[image]
                    dst = os.path.join(submodel_images_path, image)
                    src_relpath = os.path.relpath(src, submodel_images_path)
                    if not os.path.isfile(dst):
                        os.symlink(src_relpath, dst)
                    dst_relpath = os.path.relpath(dst, submodel_path)
                    txtfile.write(dst_relpath + "\n")

            # copy config.yaml if exists
            config_file_path = os.path.join(self.data_path, 'config.yaml')
            if os.path.exists(config_file_path):
                shutil.copyfile(config_file_path,
                                os.path.join(submodel_path, 'config.yaml'))

            # create symlinks to additional files
            filenames = [
                'camera_models.json', 'reference_lla.json', 'exif', 'features',
                'matches', 'masks', 'mask_list.txt', 'segmentations'
            ]
            for filename in filenames:
                self._create_symlink(submodel_path, filename)
Пример #20
0
    def convert_and_undistort(self, rerun=False, imageFilter=None, image_list=None, runId="nominal"):
        log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path)
        done_flag_file = self.path("undistorted", "%s_done.txt" % runId)

        if not io.file_exists(done_flag_file) or rerun:
            ds = DataSet(self.opensfm_project_path)

            if image_list is not None:
                ds._set_image_list(image_list)

            undistort.run_dataset(ds, "reconstruction.json", 
                                  0, None, "undistorted", imageFilter)
            
            self.touch(done_flag_file)
        else:
            log.ODM_WARNING("Already undistorted (%s)" % runId)
Пример #21
0
def run_dataset(data: DataSet, proj, transformation, image_positions,
                reconstruction, dense, output):
    """Export reconstructions in geographic coordinates

    Args:
        proj: PROJ.4 projection string
        transformation : print cooordinate transformation matrix'
        image_positions : export image positions
        reconstruction : export reconstruction.json
        dense : export dense point cloud (depthmaps/merged.ply)
        output : path of the output file relative to the dataset

    """

    if not (transformation or image_positions or reconstruction or dense):
        logger.info("Nothing to do. At least on of the options: ")
        logger.info(
            " --transformation, --image-positions, --reconstruction, --dense")

    reference = data.load_reference()

    projection = pyproj.Proj(proj)
    t = _get_transformation(reference, projection)

    if transformation:
        output = output or "geocoords_transformation.txt"
        output_path = os.path.join(data.data_path, output)
        _write_transformation(t, output_path)

    if image_positions:
        reconstructions = data.load_reconstruction()
        output = output or "image_geocoords.tsv"
        output_path = os.path.join(data.data_path, output)
        _transform_image_positions(reconstructions, t, output_path)

    if reconstruction:
        reconstructions = data.load_reconstruction()
        for r in reconstructions:
            _transform_reconstruction(r, t)
        output = output or "reconstruction.geocoords.json"
        data.save_reconstruction(reconstructions, output)

    if dense:
        output = output or "undistorted/depthmaps/merged.geocoords.ply"
        output_path = os.path.join(data.data_path, output)
        udata = dataset.UndistortedDataSet(data)
        _transform_dense_point_cloud(udata, t, output_path)
Пример #22
0
def processing_statistics(
        data: DataSet,
        reconstructions: List[types.Reconstruction]) -> Dict[str, Any]:
    steps = {
        "Feature Extraction": "features.json",
        "Features Matching": "matches.json",
        "Tracks Merging": "tracks.json",
        "Reconstruction": "reconstruction.json",
    }

    steps_times = {}
    for step_name, report_file in steps.items():
        file_path = os.path.join(data.data_path, "reports", report_file)
        if os.path.exists(file_path):
            with io.open_rt(file_path) as fin:
                obj = io.json_load(fin)
        else:
            obj = {}
        if "wall_time" in obj:
            steps_times[step_name] = obj["wall_time"]
        elif "wall_times" in obj:
            steps_times[step_name] = sum(obj["wall_times"].values())
        else:
            steps_times[step_name] = -1

    stats = {}
    stats["steps_times"] = steps_times
    stats["steps_times"]["Total Time"] = sum(
        filter(lambda x: x >= 0, steps_times.values()))

    try:
        stats["date"] = datetime.datetime.fromtimestamp(
            data.io_handler.timestamp(data._reconstruction_file(
                None))).strftime("%d/%m/%Y at %H:%M:%S")
    except FileNotFoundError:
        stats["date"] = "unknown"

    start_ct, end_ct = start_end_capture_time(reconstructions)
    if start_ct is not None and end_ct is not None:
        stats["start_date"] = datetime.datetime.fromtimestamp(
            start_ct).strftime("%d/%m/%Y at %H:%M:%S")
        stats["end_date"] = datetime.datetime.fromtimestamp(end_ct).strftime(
            "%d/%m/%Y at %H:%M:%S")
    else:
        stats["start_date"] = "unknown"
        stats["end_date"] = "unknown"

    default_max = 1e30
    min_x, min_y, max_x, max_y = default_max, default_max, 0, 0
    for rec in reconstructions:
        for shot in rec.shots.values():
            o = shot.pose.get_origin()
            min_x = min(min_x, o[0])
            min_y = min(min_y, o[1])
            max_x = max(max_x, o[0])
            max_y = max(max_y, o[1])
    stats["area"] = (max_x - min_x) * (max_y -
                                       min_y) if min_x != default_max else -1
    return stats
Пример #23
0
def SfM_gcp_gz(GPS, RTK='', thd=1, dst=0):
    from opensfm.dataset import DataSet
    if not os.path.isdir(RTK): RTK = GPS
    R = RTK + '/reconstruction.topocentric.json'
    if not os.path.isfile(R): R = R[:-16] + 'json'
    with open(R) as f:
        R = json.load(f)[0]['points']
    T = SfM_parse_track(RTK)  # {im2:[tid2,fid2,xys2,rgb2]}
    ref = DataSet(RTK).load_reference()
    gcp = [LLA + '\n']

    for gz in os.scandir(GPS + '/matches_gcp'):
        im1 = gz.name[:-15]  # parse *_matches.pkl.gz
        with gzip.open(gz.path, 'rb') as f:
            gz = pickle.load(f)
        for im2, fid in gz.items():  # (im1,im2):[fid1,fid2]
            if len(fid) < 7:
                INFO(f'skip: {im1} {im2}')
                continue  # filter
            _, uv1 = SfM_feat_uv(im1, src=GPS, idx=fid[:, 0])  # norm->pixel
            _, uv2 = SfM_feat_uv(im2, src=RTK, idx=fid[:, 1])  # norm->pixel
            _, idx = filter_match(uv1, uv2, thd=0.5)
            fid = fid[idx]

            idx = IDX(T[im2][1], fid[:, 1])  # filter: track+fid2->fid2
            tid2, fid2, xys2, rgb2 = [np.array(k)[idx] for k in T[im2]]
            idx = IDX(tid2, list(R))  # filter: reconstruct+tid2->tid2
            INFO(f'gz_gcp: {im1} {im2} {len(uv1)}->{len(fid)}->{len(idx)}')
            if len(idx) < 1: continue  # skip ref.to_lla() when idx=[]
            tid2, fid2, xys2, rgb2 = tid2[idx], fid2[idx], xys2[idx], rgb2[idx]
            xyz2 = np.array([R[k]['coordinates'] for k in tid2])  # local xyz
            lla2 = np.array(ref.to_lla(*xyz2.T)).T  # xyz->lat,lon,alt

            idx = IDX(fid[:, 1], fid2)
            fid = fid[idx]
            _, uv1 = SfM_feat_uv(im1, src=GPS, idx=fid[:, 0])  # fid1
            _, uv2 = SfM_feat_uv(im2, src=RTK, pt=xys2)  # norm->pixel
            for pt, uv in zip(lla2, uv1):
                gcp += [GCF([*pt[[1, 0, 2]], *uv, im1])]
    with open(GPS + '/gcp_list.txt', 'w') as f:
        f.writelines(gcp)
    gcp = dedup_gcp(GPS)
    gcp = filter_gcp(GPS, RTK, thd=thd)
    INFO(f'Created {len(gcp)-1} GCPs: {GPS}/gcp_list.txt\n')
    cv2.destroyAllWindows()
    return gcp  # list
Пример #24
0
    def convert_and_undistort(self, rerun=False, imageFilter=None):
        log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path)
        undistorted_images_path = self.path("undistorted", "images")

        if not io.dir_exists(undistorted_images_path) or rerun:
            undistort.run_dataset(DataSet(self.opensfm_project_path), "reconstruction.json", 
                                  0, None, "undistorted", imageFilter)
        else:
            log.ODM_WARNING("Found an undistorted directory in %s" % undistorted_images_path)
Пример #25
0
def run_dataset(data: DataSet, method, definition, output_debug):
    """Given a dataset that contains rigs, construct rig data files.

    Args:
        data: dataset object
        method : `auto` will run `reconstruct` process and try to detect rig pattern (TODO)
                 `camera` will create instances based on the camera model name
                 'pattern` will create instances based on a REGEX pattern (see below)
        definition : JSON dict (one for each RigCamera) with values as :
                    - (.*) for `pattern` method where the part outside
                        of parenthesis defines a RigCamera instance
                    - a camera model ID for the `camera` method
        output_debug : output a debug JSON reconstruction `rig_instances.json` with rig instances
    """
    rig.create_rigs_with_pattern(data, definition)
    if output_debug:
        reconstructions = _reconstruction_from_rigs_and_assignments(data)
        data.save_reconstruction(reconstructions, "rig_instances.json")
Пример #26
0
def run_dataset(data: DataSet):
    """ Split the dataset into smaller submodels. """

    meta_data = MetaDataSet(data.data_path)

    meta_data.remove_submodels()
    data.invent_reference_lla()
    _create_image_list(data, meta_data)

    if meta_data.image_groups_exists():
        _read_image_groups(meta_data)
    else:
        _cluster_images(meta_data, data.config["submodel_size"])

    _add_cluster_neighbors(meta_data, data.config["submodel_overlap"])
    _save_clusters_geojson(meta_data)
    _save_cluster_neighbors_geojson(meta_data)

    meta_data.create_submodels(meta_data.load_clusters_with_neighbors())
Пример #27
0
def validate_image_names(data: DataSet, udata: UndistortedDataSet):
    """Check that image files do not have spaces."""
    for image in data.images():
        filename = image_path(image, udata)
        if " " in filename:
            logger.error(
                'Image name "{}" contains spaces.  '
                "This is not supported by the NVM format.  "
                "Please, rename it before running OpenSfM.".format(filename))
            sys.exit(1)
Пример #28
0
def check_gcp(gcp, cam, org=0, n=0):
    res = {}
    K = Camera(cam).K()
    if os.path.isdir(org):
        from opensfm.dataset import DataSet
        ref = DataSet(org).load_reference()
    with open(gcp) as f:
        data = f.readlines()[n:]
    for v in data:  # skip first n-rows
        v = v.split()
        im = v[-1]
        v = v[:5] + [np.inf] * 2
        if os.path.isdir(org):  # lat,lon.alt->xyz
            lon, lat, alt = [float(i) for i in v[:3]]
            v[:3] = ref.to_topocentric(lat, lon, alt)
        if im not in res: res[im] = [v]
        else: res[im].append(v)

    for k, v in res.items():
        v = res[k] = np.float64(v)
        if len(v) < 5: continue  # skip
        pt, uv = v[:, :3].copy(), v[:, 3:5]  # copy()->new mem-block
        _, Rvec, Tvec, Ins = cv2.solvePnPRansac(pt, uv, K, None)
        xy, Jacob = cv2.projectPoints(pt, Rvec, Tvec, K, None)
        err = v[:, 5] = np.linalg.norm(xy.squeeze() - uv, axis=1)

        his = np.histogram(err, bins=[*range(11), np.inf])[0]
        for c in range(len(his) - 1, 0, -1):  # len(v)=sum(his)
            if sum(his[c:]) >= len(v) * 0.2: break
        idx = np.where(err <= c)[0]
        #print(c, his)
        if len(idx) < 7: continue  # skip
        _, Rvec, Tvec = cv2.solvePnP(pt[idx], uv[idx], K, None)
        xy, Jacob = cv2.projectPoints(pt, Rvec, Tvec, K, None)
        v[:, -1] = np.linalg.norm(xy.squeeze() - uv, axis=1)  # err2

    out = os.path.abspath(gcp + '.err')
    print(out)
    with open(out, 'w') as f:
        for k, v in zip(data, np.vstack([*res.values()])):
            f.write(k[:-1] + '%11.3f%11.3f\n' % (*v[-2:], ))
Пример #29
0
def SfM_gcp2xyz(GPS, RTK):
    from opensfm.dataset import DataSet
    RRF = DataSet(RTK).load_reference()
    a = 3 if os.path.isdir(GPS + '/matches') else 2
    if not os.path.isfile(GPS + '/reconstruction.json'):
        SfM_cmd(GPS, range(a, 5))
    GTM = DataSet(GPS).load_tracks_manager()
    res = []
    GRC = GPS + '/reconstruction.topocentric.json'
    if not os.path.isfile(GRC): GRC = GRC[:-16] + 'json'
    with open(GRC) as f:
        GRC = json.load(f)[0]['points']
    GIM = GPS if os.path.isdir(GPS + '/images') else GPS + '/..'
    with open(GPS + '/gcp_list.txt') as f:
        gcp = f.readlines()
    #dtp = [(k,float) for k in ('lon','lat','alt','x','y')]
    for v in gcp[1:]:  # skip 1st-row
        *v, im = v.split()
        v = np.float64(v)
        x = []
        h, w, c = cv2.imread(f'{GIM}/images/{im}').shape
        denm = lambda x: (x * max(w, h) * 2 + (w, h) - 1) / 2
        for tid, ob in GTM.get_shot_observations(im).items():
            d = np.linalg.norm(denm(ob.point) - v[3:5])
            if d < 1: x.append([d, tid])  # pixel
        d, tid = min(x) if len(x) else [np.inf, '']
        if tid not in GRC: INFO(f'skip {tid}: {[*v,im]}')
        else: res += [(*GRC[tid]['coordinates'], *v[[1, 0, 2]])]
    v = np.array(res).T
    v[3:] = RRF.to_topocentric(*v[3:])
    return v.T
Пример #30
0
def SfM_match(src, pre, mix=0):  # match_features
    from opensfm.actions.match_features import timer, matching, write_report
    from opensfm.dataset import DataSet
    data = DataSet(src)
    t = timer()
    INFO(f'{SfM_DIR}/bin/opensfm match_features: {src}')
    GPS, RTK = [], []
    if os.path.isdir(pre):
        merge_dir(pre + '/exif', src + '/exif')
        merge_dir(pre + '/features', src + '/features')
        merge_json(pre, src, 'camera_models.json')
        #merge_json(pre, src, 'reports/features.json')
        #merge_dir(pre+'/reports/features', src+'/reports/features')
        GPS, RTK = data.images(), DataSet(pre).images()
    else:  # split data->(GPS,RTK)
        for i in data.images():
            (RTK if i.startswith(pre) else GPS).append(i)
    if mix in (1, 3): GPS += RTK  # 1: match (GPS+RTK, RTK)
    if mix in (2, 3): RTK += GPS  # 2: match (GPS, RTK+GPS)
    pairs, preport = matching.match_images(data, {}, GPS, RTK)
    matching.save_matches(data, GPS, pairs)
    write_report(data, preport, list(pairs.keys()), timer() - t)