コード例 #1
0
ファイル: main.py プロジェクト: whuaegeanse/OpenSfM
def load_sequence_database_from_file(root,
                                     fname="sequence_database.json",
                                     skip_missing=False):
    """
    Simply loads a sequence file and returns it.
    This doesn't require an existing SfM reconstruction
    """
    root = Path(root)
    p_json = root / fname
    if not p_json.exists():
        return None
    seq_dict = OrderedDict(io.json_load(open(p_json, "r")))

    available_images = file_sanity_check(root, seq_dict, fname)

    for skey in seq_dict:
        available_image_keys = []
        for k in seq_dict[skey]:
            if k in available_images:
                available_image_keys.append(k)
            elif not skip_missing:
                raise FileNotFoundError(f"{k} not found")
        seq_dict[skey] = available_image_keys

    empty_seqs = [skey for skey in seq_dict if not seq_dict[skey]]
    for skey in empty_seqs:
        del seq_dict[skey]

    return seq_dict
コード例 #2
0
 def load_reconstruction(
         self,
         filename: Optional[str] = None) -> List[types.Reconstruction]:
     with self.io_handler.open_rt(
             self._reconstruction_file(filename)) as fin:
         reconstructions = io.reconstructions_from_json(io.json_load(fin))
     return reconstructions
コード例 #3
0
    def _(self):
        reconstruction_path = str(get_undistorted_path() /
                                  "reconstruction.json")
        neighbors_path = str(get_undistorted_path() / "neighbors.json")

        with io.open_rt(reconstruction_path) as fin:
            reconstruction = io.reconstructions_from_json(io.json_load(fin))[0]
        self.shot_id = "1579044395.47_img_00088.jpg_perspective_view_front"
        self.lonely_shot_id = "1579044395.47_img_00088.jpg_perspective_view_back"

        self.shot = reconstruction.shots[self.shot_id]
        self.lonely_shot = reconstruction.shots[self.lonely_shot_id]
        self.shots_dict = reconstruction.shots

        self.expected_shot_ids = [
            "1579044395.47_img_00089.jpg_perspective_view_back",
            "1579044395.47_img_00081.jpg_perspective_view_bottom",
            "1579044395.47_img_00079.jpg_perspective_view_front"
        ]
        self.neighbors_dict = {
            self.shot_id: self.expected_shot_ids,
            self.lonely_shot_id: []
        }

        with io.open_wt(neighbors_path) as fp:
            io.json_dump(self.neighbors_dict, fp)

        self.data = some_data()
        self.reconstruction = reconstruction
コード例 #4
0
ファイル: dataset.py プロジェクト: originlake/OpenSfM
    def load_reference(self) -> geo.TopocentricConverter:
        """Load reference as a topocentric converter."""
        with self.io_handler.open_rt(self._reference_lla_path()) as fin:
            lla = io.json_load(fin)

        return geo.TopocentricConverter(lla["latitude"], lla["longitude"],
                                        lla["altitude"])
コード例 #5
0
ファイル: stats.py プロジェクト: CosmosHua/GLD
def processing_statistics(
        data: DataSet,
        reconstructions: List[types.Reconstruction]) -> Dict[str, Any]:
    steps = {
        "Feature Extraction": "features.json",
        "Features Matching": "matches.json",
        "Tracks Merging": "tracks.json",
        "Reconstruction": "reconstruction.json",
    }

    steps_times = {}
    for step_name, report_file in steps.items():
        file_path = os.path.join(data.data_path, "reports", report_file)
        if os.path.exists(file_path):
            with io.open_rt(file_path) as fin:
                obj = io.json_load(fin)
        else:
            obj = {}
        if "wall_time" in obj:
            steps_times[step_name] = obj["wall_time"]
        elif "wall_times" in obj:
            steps_times[step_name] = sum(obj["wall_times"].values())
        else:
            steps_times[step_name] = -1

    stats = {}
    stats["steps_times"] = steps_times
    stats["steps_times"]["Total Time"] = sum(
        filter(lambda x: x >= 0, steps_times.values()))

    try:
        stats["date"] = datetime.datetime.fromtimestamp(
            data.io_handler.timestamp(data._reconstruction_file(
                None))).strftime("%d/%m/%Y at %H:%M:%S")
    except FileNotFoundError:
        stats["date"] = "unknown"

    start_ct, end_ct = start_end_capture_time(reconstructions)
    if start_ct is not None and end_ct is not None:
        stats["start_date"] = datetime.datetime.fromtimestamp(
            start_ct).strftime("%d/%m/%Y at %H:%M:%S")
        stats["end_date"] = datetime.datetime.fromtimestamp(end_ct).strftime(
            "%d/%m/%Y at %H:%M:%S")
    else:
        stats["start_date"] = "unknown"
        stats["end_date"] = "unknown"

    default_max = 1e30
    min_x, min_y, max_x, max_y = default_max, default_max, 0, 0
    for rec in reconstructions:
        for shot in rec.shots.values():
            o = shot.pose.get_origin()
            min_x = min(min_x, o[0])
            min_y = min(min_y, o[1])
            max_x = max(max_x, o[0])
            max_y = max(max_y, o[1])
    stats["area"] = (max_x - min_x) * (max_y -
                                       min_y) if min_x != default_max else -1
    return stats
コード例 #6
0
 def _(self):
     adjacency_list_path = get_adjacency_list_path()
     with io.open_rt(adjacency_list_path) as fin:
         adjacency_list = io.json_load(fin)
     self.data = some_data()
     self.images = sorted(adjacency_list.keys())
     self.expected_pairs = [('0000.jpg', '0001.jpg'),
                            ('0001.jpg', '0002.jpg'),
                            ('0002.jpg', '0003.jpg')]
     return
コード例 #7
0
ファイル: debug_plot.py プロジェクト: onsiteiq/OpenSfM
def _load_topocentric_gps_points():
    topocentric_gps_points_dict = {}

    with open("gps_list.txt") as fin:
        gps_points_dict = io.read_gps_points_list(fin)

    with io.open_rt("reference_lla.json") as fin:
        reflla = io.json_load(fin)

    for key, value in gps_points_dict.items():
        x, y, z = geo.topocentric_from_lla(
            value[0], value[1], value[2],
            reflla['latitude'], reflla['longitude'], reflla['altitude'])
        topocentric_gps_points_dict[key] = (x, y, z)

    return topocentric_gps_points_dict
コード例 #8
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        udata = dataset.UndistortedDataSet(data, args.subfolder)

        data.config['interactive'] = args.interactive
        graph, neighbors_dict = None, None
        reconstructions = udata.load_undistorted_reconstruction()
        neighbors_path: Path = Path(data.data_path) / "neighbors.json"
        if neighbors_path.exists():
            with io.open_rt(neighbors_path) as fp:
                neighbors_dict = io.json_load(fp)
        else:
            graph = udata.load_undistorted_tracks_graph()

        dense.compute_depthmaps(udata, graph, reconstructions[0],
                                neighbors_dict)
コード例 #9
0
def processing_statistics(data, reconstructions):
    steps = {
        "Feature Extraction": "features.json",
        "Features Matching": "matches.json",
        "Tracks Merging": "tracks.json",
        "Reconstruction": "reconstruction.json",
    }

    steps_times = {}
    for step_name, report_file in steps.items():
        file_path = os.path.join(data.data_path, "reports", report_file)
        if os.path.exists(file_path):
            with io.open_rt(file_path) as fin:
                obj = io.json_load(fin)
        else:
            obj = {}
        if "wall_time" in obj:
            steps_times[step_name] = obj["wall_time"]
        elif "wall_times" in obj:
            steps_times[step_name] = sum(obj["wall_times"].values())
        else:
            steps_times[step_name] = -1

    stats = {}
    stats["steps_times"] = steps_times
    stats["steps_times"]["Total Time"] = sum(
        filter(lambda x: x >= 0, steps_times.values())
    )

    stats["date"] = datetime.datetime.fromtimestamp(
        os.path.getmtime(data._reconstruction_file(None))
    ).strftime("%d/%m/%Y at %H:%M:%S")

    min_x, min_y, max_x, max_y = 1e30, 1e30, 0, 0
    for rec in reconstructions:
        for shot in rec.shots.values():
            o = shot.pose.get_origin()
            min_x = min(min_x, o[0])
            min_y = min(min_y, o[1])
            max_x = max(max_x, o[0])
            max_y = max(max_y, o[1])
    stats["area"] = (max_x - min_x) * (max_y - min_y)
    return stats
コード例 #10
0
ファイル: dataset.py プロジェクト: mapillary/OpenSfM
 def load_reconstruction(self, filename=None):
     with io.open_rt(self._reconstruction_file(filename)) as fin:
         reconstructions = io.reconstructions_from_json(io.json_load(fin))
     return reconstructions
コード例 #11
0
ファイル: dataset.py プロジェクト: yanfeilong/OpenSfM
 def load_reference_lla(self):
     with io.open_rt(self._reference_lla_path()) as fin:
         return io.json_load(fin)
コード例 #12
0
ファイル: dataset.py プロジェクト: yanfeilong/OpenSfM
 def load_reconstruction(self, filename=None):
     with io.open_rt(self._reconstruction_file(filename)) as fin:
         reconstructions = io.reconstructions_from_json(io.json_load(fin))
     return reconstructions
コード例 #13
0
 def _read_stats_file(self, filename):
     file_path = os.path.join(self.output_path, filename)
     with io.open_rt(file_path) as fin:
         return io.json_load(fin)
コード例 #14
0
 def _read_stats_file(self, filename) -> Dict[str, Any]:
     file_path = os.path.join(self.output_path, filename)
     with self.io_handler.open_rt(file_path) as fin:
         return io.json_load(fin)
コード例 #15
0
    def _read_gcp_stats_file(self, filename):
        file_path = os.path.join(self.output_path,
                                 "ground_control_points.json")

        with self.io_handler.open_rt(file_path) as fin:
            return io.json_load(fin)
コード例 #16
0
def load_reference_lla(file_path):
        with io.open_rt(_reference_lla_path(file_path)) as fin:
            return io.json_load(fin)
コード例 #17
0
ファイル: sensors.py プロジェクト: arjunkurup/scanner-3d
import json

from opensfm import context
from opensfm import io

with io.open_rt(context.SENSOR) as f:
    sensor_data = io.json_load(f)

# Convert model types to lower cases for easier query
keys = [k.lower() for k in sensor_data.keys()]
values = sensor_data.values()
sensor_data = dict(zip(keys, values))
コード例 #18
0
 def load_undistorted_reconstruction(self) -> List[types.Reconstruction]:
     filename = os.path.join(self.data_path, "reconstruction.json")
     with self.io_handler.open_rt(filename) as fin:
         return io.reconstructions_from_json(io.json_load(fin))
コード例 #19
0
 def load_undistorted_shot_ids(self):
     filename = os.path.join(self.data_path, "undistorted_shot_ids.json")
     with io.open_rt(filename) as fin:
         return io.json_load(fin)
コード例 #20
0
 def load_undistorted_reconstruction(self):
     filename = os.path.join(self.data_path, "reconstruction.json")
     with io.open_rt(filename) as fin:
         return io.reconstructions_from_json(io.json_load(fin))
コード例 #21
0
ファイル: sensors.py プロジェクト: originlake/OpenSfM
def sensor_data():
    with io.open_rt(context.SENSOR) as f:
        data = io.json_load(f)

    # Convert model types to lower cases for easier query
    return {k.lower(): v for k, v in data.items()}
コード例 #22
0
ファイル: dataset.py プロジェクト: mapillary/OpenSfM
 def load_reference_lla(self):
     with io.open_rt(self._reference_lla_path()) as fin:
         return io.json_load(fin)
コード例 #23
0
 def load_undistorted_shot_ids(self) -> Dict[str, List[str]]:
     filename = os.path.join(self.data_path, "undistorted_shot_ids.json")
     with self.io_handler.open_rt(filename) as fin:
         return io.json_load(fin)
コード例 #24
0
 def load_reference_lla(self) -> Dict[str, float]:
     with self.io_handler.open_rt(self._reference_lla_path()) as fin:
         return io.json_load(fin)