예제 #1
0
파일: odm_sfm.py 프로젝트: CosmosHua/GLD
def SfM_export_pos(src, dop=0.1, tsv='image_geocoords.tsv'):
    rename_rec(src)
    dif = []
    gjs = {}  # for sfm
    SfM_cmd(src, f'export_geocoords --image-positions --proj="{LLA}"')
    rename_rec(src)
    geo = load_sort_save(f'{src}/{tsv}', a=1)  # sort
    with open(src + '/geo.txt', 'w') as f:
        f.writelines([LLA + '\n'] + geo[1:])

    from opensfm.dataset import DataSet
    data = DataSet(src)
    ref = data.load_reference()
    for v in geo[1:]:  # skip 1st-row
        im, *v = v.split()
        v = np.float64(v)[[1, 0, 2]]
        o = [*data.load_exif(im)['gps'].values()][:3]  # lat,lon,alt
        ov = ref.to_topocentric(*v) - np.array(ref.to_topocentric(*o))
        gjs[im] = {
            'gps': dict(latitude=v[0], longitude=v[1], altitude=v[2], dop=dop)
        }
        dif += [f'{im} lla={v} exif={o}\tdif={ov.tolist()}\n']
    with open(f'{src}/{tsv[:-4]}.dif.txt', 'w') as f:
        f.writelines(dif)
    with open(src + '/geo.json', 'w') as f:
        json.dump(gjs, f, indent=4)
예제 #2
0
    def ground_control_points(self, proj4):
        """
        Load ground control point information.
        """
        gcp_stats_file = self.path("stats", "ground_control_points.json")

        if not io.file_exists(gcp_stats_file):
            return []

        gcps_stats = {}
        try:
            with open(gcp_stats_file) as f:
                gcps_stats = json.loads(f.read())
        except:
            log.ODM_INFO("Cannot parse %s" % gcp_stats_file)

        if not gcps_stats:
            return []

        ds = DataSet(self.opensfm_project_path)
        reference = ds.load_reference()
        projection = pyproj.Proj(proj4)

        result = []
        for gcp in gcps_stats:
            geocoords = _transform(gcp['coordinates'], reference, projection)
            result.append({
                'id': gcp['id'],
                'observations': gcp['observations'],
                'coordinates': geocoords,
                'error': gcp['error']
            })

        return result
예제 #3
0
def run_dataset(
    data: DataSet,
    proj: str,
    transformation: bool,
    image_positions: bool,
    reconstruction: bool,
    dense : bool,
    output: str,
) -> None:
    """Export reconstructions in geographic coordinates

    Args:
        proj: PROJ.4 projection string
        transformation : print cooordinate transformation matrix'
        image_positions : export image positions
        reconstruction : export reconstruction.json
        dense : export dense point cloud (depthmaps/merged.ply)
        output : path of the output file relative to the dataset

    """

    if not (transformation or image_positions or reconstruction or dense):
        logger.info("Nothing to do. At least on of the options: ")
        logger.info(" --transformation, --image-positions, --reconstruction, --dense")

    reference = data.load_reference()

    projection = pyproj.Proj(proj)
    t = _get_transformation(reference, projection)

    if transformation:
        output = output or "geocoords_transformation.txt"
        output_path = os.path.join(data.data_path, output)
        _write_transformation(t, output_path)

    if image_positions:
        reconstructions = data.load_reconstruction()
        output = output or "image_geocoords.tsv"
        output_path = os.path.join(data.data_path, output)
        _transform_image_positions(reconstructions, t, output_path)

    if reconstruction:
        reconstructions = data.load_reconstruction()
        for r in reconstructions:
            _transform_reconstruction(r, t)
        output = output or "reconstruction.geocoords.json"
        data.save_reconstruction(reconstructions, output)

    if dense:
        output = output or "undistorted/depthmaps/merged.geocoords.ply"
        output_path = os.path.join(data.data_path, output)
        udata = data.undistorted_dataset()
        _transform_dense_point_cloud(udata, t, output_path)
예제 #4
0
파일: rig.py 프로젝트: whuaegeanse/OpenSfM
def propose_subset_dataset_from_instances(
    data: DataSet, rig_instances: Dict[str, TRigInstance], name: str
) -> Iterable[Tuple[DataSet, List[List[Tuple[str, str]]]]]:
    """Given a list of images grouped by rigs instances, infitely propose random
        subset of images and create a dataset subset with the provided name from them.

    Returns :
        Yield infinitely DataSet containing a subset of images containing enough rig instances
    """
    per_rig_camera_group = group_instances(rig_instances)

    if not data.reference_lla_exists():
        data.invent_reference_lla()
    reference = data.load_reference()

    instances_to_pick = {}
    for key, instances in per_rig_camera_group.items():
        # build GPS look-up tree
        gpses = []
        for i, instance in enumerate(instances):
            all_gps = []
            for image, _ in instance:
                gps = data.load_exif(image)["gps"]
                all_gps.append(
                    reference.to_topocentric(gps["latitude"], gps["longitude"], 0)
                )
            gpses.append((i, np.average(np.array(all_gps), axis=0)))
        tree = spatial.cKDTree([x[1] for x in gpses])

        # build NN-graph and split by connected components
        nn = 6
        instances_graph = nx.Graph()
        for i, gps in gpses:
            distances, neighbors = tree.query(gps, k=nn)
            for d, n in zip(distances, neighbors):
                if i == n:
                    continue
                instances_graph.add_edge(i, n, weight=d)
        all_components = sorted(
            nx.algorithms.components.connected_components(instances_graph),
            key=len,
            reverse=True,
        )
        logger.info(f"Found {len(all_components)} connected components")

        # keep the biggest one
        biggest_component = all_components[0]
        logger.info(f"Best component has {len(biggest_component)} instances")
        instances_to_pick[key] = biggest_component

    random.seed(42)
    while True:
        total_instances = []
        subset_images = []
        for key, instances in instances_to_pick.items():
            all_instances = per_rig_camera_group[key]

            instances_sorted = sorted(
                [all_instances[i] for i in instances],
                key=lambda x: data.load_exif(x[0][0])["capture_time"],
            )

            subset_size = data.config["rig_calibration_subset_size"]
            random_index = random.randint(0, len(instances_sorted) - 1)
            instances_calibrate = instances_sorted[
                max([0, random_index - int(subset_size / 2)]) : min(
                    [random_index + int(subset_size / 2), len(instances_sorted) - 1]
                )
            ]

            for instance in instances_calibrate:
                subset_images += [x[0] for x in instance]
            total_instances += instances_calibrate

        data.io_handler.rm_if_exist(os.path.join(data.data_path, name))
        yield data.subset(name, subset_images), total_instances