コード例 #1
0
def add_point_constraints(ra, reconstruction_shots):
    connections = connected_reconstructions(reconstruction_shots)
    for connection in connections:
        d1 = dataset.DataSet(connection[0].submodel_path)
        d2 = dataset.DataSet(connection[1].submodel_path)

        r1 = d1.load_reconstruction()[connection[0].index]
        r2 = d2.load_reconstruction()[connection[1].index]

        common_ims = set(r1.shots.keys()).intersection(r2.shots.keys())

        g1 = d1.load_tracks_graph()
        g2 = d2.load_tracks_graph()

        rec_name1 = encode_reconstruction_name(connection[0])
        rec_name2 = encode_reconstruction_name(connection[1])

        common_tracks = set()
        for im in common_ims:
            for t1, t2 in corresponding_tracks(g1[im], g2[im]):
                if t1 in r1.points and t2 in r2.points:
                    common_tracks.add((t1, t2))

        for t1, t2 in common_tracks:
            c1 = r1.points[t1].coordinates
            c2 = r2.points[t2].coordinates

            ra.add_common_point_constraint(rec_name1, c1[0], c1[1], c1[2],
                                           rec_name2, c2[0], c2[1], c2[2],
                                           1e-1)
コード例 #2
0
ファイル: reconstruct.py プロジェクト: sunbirddy/MarkerSfM
    def run(self, args):
        start = time.time()
        data = dataset.DataSet(args.dataset)

        # experiments directory tree given
        if args.experiments:

            # check that directory exists
            if not os.path.isdir(args.experiments):
                print '--experiments option given but directory does not exist.'
                return

            # find yaml files in experiments directory
            yamls = glob.glob(os.path.join(args.experiments, '*.yaml'))
            if not yamls:
                print 'No yaml files found in ', args.experiments
                return
            for yaml in yamls:

                # setup
                data = dataset.DataSet(args.dataset)
                config_path = os.path.join(args.experiments, yaml)
                self.override_config(data, config_path)
                data.config['experiments_path'] = args.experiments
                start = time.time()

                # run recon
                if data.config.get('tag_tracks', False) or data.config.get(
                        'resection_with_tags', False):
                    reconstruction.incremental_reconstruction_with_tags(data)
                else:
                    reconstruction.incremental_reconstruction(data)

                # shutdown
                end = time.time()
                reconstruction_name = data.reconstruction_name_from_settings()
                log_path = os.path.join(args.experiments,
                                        reconstruction_name + '.log')
                with open(log_path, 'w') as fout:
                    fout.write('reconstruct: {0}\n'.format(end - start))

        # normal run
        else:
            # reconstruction type
            if data.config.get('tag_tracks', False) or data.config.get(
                    'resection_with_tags', False):
                reconstruction.incremental_reconstruction_with_tags(data)
            else:
                reconstruction.incremental_reconstruction(data)

            # profile
            end = time.time()
            with open(data.profile_log(), 'a') as fout:
                fout.write('reconstruct: {0}\n'.format(end - start))
コード例 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("dataset", help="dataset to process")
    parser.add_argument("--homography_ransac_threshold",
                        help="the threshold used to match homography",
                        default=0.004)
    parser.add_argument("--homography_inlier_ratio",
                        help="the lower bound of homography inlier ratio to be considered as the same frame",
                        default=0.90)
    parser.add_argument("--matching_mod",
                        help="could either be good or fast",
                        default="good")

    print("removing stopping frames")

    args = parser.parse_args()
    data = dataset.DataSet(args.dataset)

    is_good = (args.matching_mod == "good")
    if is_good:
        retained, indexes = remove_stopping_frames_good(args)
    else:
        retained, indexes = remove_stopping_frames_not_good(args)

    # Overwrite the image list if it exists
    image_list = os.path.join(data.data_path, "image_list.txt")
    with open(image_list, "w") as f:
        for im in retained:
            f.write("images/" + im + "\n")

    print("exit\n")
コード例 #4
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        reconstructions = data.load_reconstruction()
        graph = data.load_tracks_graph()

        if reconstructions:
            self.export(reconstructions[0], graph, data)
コード例 #5
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        reconstructions = data.load_reconstruction()
        graph = data.load_tracks_graph()

        if reconstructions:
            self.undistort_reconstruction(graph, reconstructions[0], data)
コード例 #6
0
def get_camera2d(points, opensfm_data_path, size):
    opensfm_reconstruction_path = opensfm_data_path + "/reconstruction.json"
    with open(opensfm_reconstruction_path) as f:
        data = json.load(f)[0]

    dset = dataset.DataSet(opensfm_data_path)
    reference = dset.load_reference()

    # print(data["shots"])
    shots = data["shots"]
    plane = np.array([0, 0, 1, 0])

    pt3d = np.empty((0, 3))
    pt2d = np.empty((0, 2))
    gpsarr = np.empty((0, 3))
    for id in shots:
        shot = shots[id]
        point3d = np.asarray(shot['translation'])
        # print("point3d = ", point3d)
        gps = np.asarray(shot['gps_position'])

        point2d = flatten_coords_by_plane_proj(pt3d, points, plane, size)

        pt3d = np.vstack((pt3d, point3d))
        pt2d = np.vstack((pt2d, point2d))
        gps = reference.to_lla(gps[0], gps[1], gps[2])
        gpsarr = np.vstack((gpsarr, gps))

    return pt3d, pt2d, gpsarr
コード例 #7
0
    def run(self, args):
        start = time.time()
        data = dataset.DataSet(args.dataset)
        images = data.images()

        # Read matches and extract match counts. We store the data
        # with some redundancy just for the convenience.
        matches = {}
        for im1 in images:
            matches[im1] = {}

            if (args.not_redundant):

                # Alternative where we don't store redundant information
                try:
                    im1_matches = data.load_matches(im1)
                except IOError:
                    continue
                for im2 in im1_matches:
                    matches1to2 = data.find_matches(im1, im2)
                    matches[im1][im2] = len(matches1to2)
            else:

                for im2 in images:
                    matches1to2 = data.find_matches(im1, im2)

                    if len(matches1to2) > 0:
                        matches[im1][im2] = len(matches1to2)

        data.save_match_counts(matches)

        end = time.time()
        with open(data.profile_log(), 'a') as fout:
            fout.write('create_tracks: {0}\n'.format(end - start))
コード例 #8
0
ファイル: main.py プロジェクト: whuaegeanse/OpenSfM
def load_shots_from_reconstructions(path, min_ims):
    data = dataset.DataSet(path)
    reconstructions = data.load_reconstruction()

    # Replace sequence keys for those in sequence_database.json

    n_recs = len(reconstructions)
    if len(reconstructions) > 2:
        reconstructions = [
            r for ix_r, r in enumerate(reconstructions)
            if len(r.shots) >= min_ims or ix_r < 2
        ]
    if len(reconstructions) < n_recs:
        print("Kept {}/{} reconstructions (min images: {})".format(
            len(reconstructions),
            n_recs,
            min_ims,
        ))

    output = []
    for rec in reconstructions:
        shots = sorted(rec.shots.values(),
                       key=lambda x: (x.metadata.capture_time.value, x.id))
        output.append([shot.id for shot in shots])
    return output
コード例 #9
0
ファイル: match_features.py プロジェクト: gy20073/OpenSfM
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        # even if there is a matching folder, we have to gone through
        # to make sure we add every new matches into it.
        '''
        if os.path.exists(os.path.join(data.data_path, 'matches')):
            print("found matches folder, skipping")
            return
        '''

        images = data.images()
        exifs = {im: data.load_exif(im) for im in images}
        pairs = match_candidates_from_metadata(images, exifs, data)

        num_pairs = sum(len(c) for c in pairs.values())
        logger.info('Matching {} image pairs'.format(num_pairs))

        ctx = Context()
        ctx.data = data
        ctx.cameras = ctx.data.load_camera_models()
        ctx.exifs = exifs
        ctx.p_pre, ctx.f_pre = load_preemptive_features(data)
        args = match_arguments(pairs, ctx)

        start = time.time()
        processes = ctx.data.config.get('processes', 1)
        if processes == 1:
            for arg in args:
                match(arg)
        else:
            p = Pool(processes)
            p.map(match, args)
        end = time.time()
        with open(ctx.data.profile_log(), 'a') as fout:
            fout.write('match_features: {0}\n'.format(end - start))
コード例 #10
0
    def run(self, args):

        # setup
        data = dataset.DataSet(args.dataset)
        images = data.images()
        tags = data.load_tag_detection()
        print tags
コード例 #11
0
    def run(self, args):
        if not (args.transformation or args.reconstruction or args.dense):
            print('Nothing to do. At least on of the options: ')
            print(' --transformation, --reconstruction, --dense')

        data = dataset.DataSet(args.dataset)
        reference = data.load_reference_lla()

        projection = pyproj.Proj(args.proj)
        transformation = self._get_transformation(reference, projection)

        if args.transformation:
            output = args.output or 'geocoords_transformation.txt'
            output_path = os.path.join(data.data_path, output)
            self._write_transformation(transformation, output_path)

        if args.reconstruction:
            reconstructions = data.load_reconstruction()
            for r in reconstructions:
                self._transform_reconstruction(r, transformation)
            output = args.output or 'reconstruction.geocoords.json'
            data.save_reconstruction(reconstructions, output)

        if args.dense:
            output = args.output or 'depthmaps/merged.geocoords.ply'
            self._transform_dense_point_cloud(data, transformation, output)
コード例 #12
0
ファイル: export_geocoords.py プロジェクト: leowini/OpenSfM
    def run(self, args):
        if not (args.transformation or args.reconstruction or args.dense):
            print('Nothing to do. At least on of the options: ')
            print(' --transformation, --reconstruction, --dense')

        data = dataset.DataSet(args.dataset)
        reference = data.load_reference_lla()

        projection = pyproj.Proj(args.proj)
        transformation = self._get_transformation(reference, projection)

        if args.transformation:
            for row in transformation:
                print(' '.join(map(str, row)))
            print('')

        if args.reconstruction:
            reconstructions = data.load_reconstruction()
            for r in reconstructions:
                self._transform_reconstruction(r, transformation)
            data.save_reconstruction(reconstructions,
                                     'reconstruction.geocoords.json')

        if args.dense:
            self._transform_dense_point_cloud(data, reference, transformation)
コード例 #13
0
ファイル: detect_features.py プロジェクト: Jekyll1021/OpenSfM
    def run(self, args):
        print "detecting features"

        data = dataset.DataSet(args.dataset)
        images = data.images()
        arguments = [(image, data) for image in images]

        start = time.time()

        processes = data.config.get('processes', 1)
        if processes == 1:
            for arg in arguments:
                t0 = time.time()
                detect(arg)
                print("total detect time in %f second" % (time.time() - t0))
        else:
            print "starting pool of %i processes to detect features" % processes
            p = Pool(processes)
            p.map(detect, arguments)

        end = time.time()

        print "done"
        with open(data.profile_log(), 'a') as fout:
            fout.write('detect_features: {0}\n'.format(end - start))
        print "exit\n"
コード例 #14
0
ファイル: reconstruct.py プロジェクト: BobDeng1974/compv-1
 def run(self, args):
     start = time.time()
     data = dataset.DataSet(args.dataset)
     reconstruction.incremental_reconstruction(data)
     end = time.time()
     with open(data.profile_log(), 'a') as fout:
         fout.write('reconstruct: {0}\n'.format(end - start))
コード例 #15
0
    def run(self, args):
        start = time.time()
        data = dataset.DataSet(args.dataset)

        try:
            graph = data.load_tracks_graph()
            reconstructions = data.load_reconstruction()
        except IOError:
            print 'mesh.py: Failed to load reconstructions or tracks graph. May be missing...'
            return

        for i, r in enumerate(reconstructions):
            for shot in r.shots.values():
                if shot.id in graph:
                    vertices, faces = mesh.triangle_mesh(
                        shot.id, r, graph, data)
                    shot.mesh = types.ShotMesh()
                    shot.mesh.vertices = vertices
                    shot.mesh.faces = faces

        data.save_reconstruction(reconstructions,
                                 filename='reconstruction.meshed.json',
                                 minify=True)

        end = time.time()
        with open(data.profile_log(), 'a') as fout:
            fout.write('mesh: {0}\n'.format(end - start))
コード例 #16
0
def init_dataset(src, dst, camera_models):
    """Init a dataset using data from a previous iteration."""
    filenames = [
        'config.yaml',
        'image_list.txt',
        'images',
        'masks',
        'segmentations',
        'features',
        'exif',
    ]

    io.mkdir_p(dst)

    if camera_models:
        data = dataset.DataSet(dst)
        data.save_camera_models_overrides(camera_models)
    else:
        filenames.append('camera_models_overrides.json')

    for filename in filenames:
        src_path = pj(src, filename)
        if os.path.isfile(src_path) or os.path.isdir(src_path):
            dst_path = pj(dst, filename)

            if os.path.islink(dst_path):
                os.unlink(dst_path)

            os.symlink(os.path.relpath(src_path, dst), dst_path)
コード例 #17
0
ファイル: create_tracks.py プロジェクト: weblucas/OpenSfM
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()

        # Read local features
        logging.info('reading features')
        features = {}
        colors = {}
        for im in images:
            p, f, c = data.load_features(im)
            features[im] = p[:, :2]
            colors[im] = c

        # Read matches
        matches = {}
        for im1 in images:
            try:
                im1_matches = data.load_matches(im1)
            except IOError:
                continue
            for im2 in im1_matches:
                matches[im1, im2] = im1_matches[im2]

        tracks_graph = matching.create_tracks_graph(features, colors, matches,
                                                    data.config)
        data.save_tracks_graph(tracks_graph)
コード例 #18
0
 def run(self, args):
     data = dataset.DataSet(args.dataset)
     imageNode = data.load_tracks_geocorrect()
     reconstructions = data.load_reconstruction()
     reconstruction = reconstructions[args.reconstruction_index]
     geocorrect.geo_correct_proc(reconstruction, imageNode, data.images(),
                                 data)
コード例 #19
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        queryImages = None
        if (args.localize):
            localizeDir = os.path.join(args.dataset, "localize")
            print("Localization - creating tracks: ")
            queryImages = [x for x in os.listdir(localizeDir)]
            [print(os.path.join(localizeDir, str(x))) for x in queryImages]

        else:
            queryImages = data.images()

        start = timer()
        features, colors = tracking.load_features(data, data.images(
        ))  #TODO: Just get those images for which matches have been computed.
        features_end = timer()
        matches = tracking.load_matches(data, data.images(
        ))  #TODO: Just get those images for which matches have been computed.
        matches_end = timer()
        tracks_manager = tracking.create_tracks_manager(
            features, colors, matches, data.config)
        tracks_end = timer()
        data.save_tracks_manager(tracks_manager)
        end = timer()

        with open(data.profile_log(), 'a') as fout:
            fout.write('create_tracks: {0}\n'.format(end - start))

        self.write_report(data, tracks_manager, features_end - start,
                          matches_end - features_end, tracks_end - matches_end)
コード例 #20
0
ファイル: merge_reconstruct.py プロジェクト: coderzbx/OpenSfM
    def run(self, args):
        data = None
        start = time.time()
        data_path_str = args.dataset
        data_path_list = str(data_path_str).split(",")
        reconstructions = []
        config = None
        for data_path in data_path_list:
            data = dataset.DataSet(data_path)
            config = data.config
            single_track_reconstructions = data.load_reconstruction()
            for single_reconstruction in single_track_reconstructions:
                reconstructions.append(single_reconstruction)

        reconstructions = reconstruction.merge_reconstructions(
            reconstructions, config)
        for k, r in enumerate(reconstructions):
            logger.info("Reconstruction {}: {} images, {} points".format(
                k, len(r.shots), len(r.points)))
        logger.info("{} partial reconstructions in total.".format(
            len(reconstructions)))

        end = time.time()

        with open(data.profile_log(), 'a') as fout:
            fout.write('reconstruct: {0}\n'.format(end - start))

        data.save_reconstruction(reconstructions)
コード例 #21
0
ファイル: create_tracks.py プロジェクト: BobDeng1974/compv-1
    def run(self, args):
        start = time.time()
        data = dataset.DataSet(args.dataset)
        images = data.images()

        # Read local features
        logging.info('reading features')
        features = {}
        colors = {}
        for im in images:
            p, f, c = data.load_features(im)
            features[im] = p[:, :2]
            colors[im] = c

        # Read matches
        matches = {}
        for im1 in images:
            try:
                im1_matches = data.load_matches(im1)
            except IOError:
                continue
            for im2 in im1_matches:
                matches[im1, im2] = im1_matches[im2]

        tracks_graph = matching.create_tracks_graph(features, colors, matches,
                                                    data.config)
        data.save_tracks_graph(tracks_graph)

        end = time.time()
        with open(data.profile_log(), 'a') as fout:
            fout.write('create_tracks: {0}\n'.format(end - start))
コード例 #22
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        data.config['interactive'] = args.interactive
        reconstructions = data.load_reconstruction()
        graph = data.load_tracks_graph()

        dense.compute_depthmaps(data, graph, reconstructions[0])
コード例 #23
0
ファイル: export_ply.py プロジェクト: ragarciafran/OpenSfM
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        reconstructions = data.load_reconstruction()
        no_cameras = args.no_cameras
        no_points = args.no_points
        _all = args.all
        path_all = args.dataset + "/reconstruction_files/"

        if reconstructions and not _all:
            data.save_ply(reconstructions[0], None, no_cameras, no_points)

        elif reconstructions and _all:
            if not os.path.isdir(path_all):
                os.mkdir(path_all)
            for r in range(len(reconstructions)):
                data.save_ply(
                    reconstructions[r],
                    "reconstruction_files/reconstruction_" + str(r) + ".ply",
                    no_cameras, no_points)

        if args.depthmaps and reconstructions:
            udata = dataset.UndistortedDataSet(data, 'undistorted')
            for id, shot in reconstructions[0].shots.items():
                rgb = udata.load_undistorted_image(id)
                for t in ('clean', 'raw'):
                    path_depth = udata._depthmap_file(id, t + '.npz')
                    if not os.path.exists(path_depth):
                        continue
                    depth = np.load(path_depth)['depth']
                    rgb = scale_down_image(rgb, depth.shape[1], depth.shape[0])
                    ply = depthmap_to_ply(shot, depth, rgb)
                    with io.open_wt(udata._depthmap_file(id,
                                                         t + '.ply')) as fout:
                        fout.write(ply)
コード例 #24
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()
        # print '#'*100
        # print images
        # print '#'*100
        exifs = {im: data.load_exif(im) for im in images}
        if data.config.get('image_matcher_type', False) == 'VOCAB_TREE':
            pairs, preport = match_candidates_from_vocab_tree(images, exifs, data)
        elif data.config.get('image_matcher_type', False) == 'BRUTEFORCE':
            pairs, preport = match_candidates_bruteforce(images, exifs, data)
        else:
            pairs, preport = match_candidates_from_metadata(images, exifs, data)

        # import sys
        # sys.exit(1)

        num_pairs = sum(len(c) for c in pairs.values())
        logger.info('Matching {} image pairs'.format(num_pairs))

        ctx = Context()
        ctx.data = data
        ctx.cameras = ctx.data.load_camera_models()
        ctx.exifs = exifs
        ctx.p_pre, ctx.f_pre = load_preemptive_features(data)
        args = list(match_arguments(pairs, ctx))

        start = timer()
        processes = ctx.data.config['processes']
        parallel_map(match, args, processes)
        end = timer()
        with open(ctx.data.profile_log(), 'a') as fout:
            fout.write('match_features: {0}\n'.format(end - start))
        self.write_report(data, preport, pairs, end - start)
コード例 #25
0
def getSparsePointCloud(opensfm_data_dir):
    data = dataset.DataSet(opensfm_data_dir)
    reconstruction = data.load_reconstruction()[0]

    tracks_manager = data.load_tracks_manager()
    images = tracks_manager.get_shot_ids()
    pcl = np.empty((0, 3))
    campose = np.empty((0, 3))
    for im in images:
        if (not (os.path.exists(data._exif_file(im)))):
            continue
        if (not (data.load_exif(im)['camera'] in reconstruction.cameras)):
            continue
        if (not (im in reconstruction.shots)):
            print(im, " not in shots!")
            continue
        camera = reconstruction.cameras[data.load_exif(im)['camera']]
        shot = reconstruction.shots[im]
        o = shot.pose.get_origin()
        R = shot.pose.get_rotation_matrix()
        for axis in range(3):
            c = 255 * np.eye(3)[axis]
            for depth in np.linspace(0, 2, 10):
                p = o + depth * R[axis]
                # s = "{} {} {} {} {} {}".format(p[0], p[1], p[2], int(c[0]), int(c[1]), int(c[2]))
                # vertices.append(s)
                campose = np.vstack((campose, p))
        # campose = np.vstack((campose, shot.pose.translation))
        pts2d, pts3d = get_shot_observations(tracks_manager, reconstruction,
                                             camera, im)
        pcl = np.vstack((pcl, pts3d))

    return pcl, campose
コード例 #26
0
def test_match_candidates_from_metadata_bow(lund_path):
    config = create_match_candidates_config(
        matching_bow_neighbors=NEIGHBORS,
        matcher_type='WORDS')
    data_generation.save_config(config, lund_path)
    data = dataset.DataSet(lund_path)
    match_candidates_from_metadata(data, assert_count=5)
コード例 #27
0
ファイル: export_colmap.py プロジェクト: yummy123866/OpenSfM
    def run(self, args):
        data = dataset.DataSet(args.dataset)

        export_folder = os.path.join(data.data_path, 'colmap_export')
        io.mkdir_p(export_folder)

        database_path = os.path.join(export_folder, 'colmap_database.db')
        images_path = os.path.join(data.data_path, 'images')

        if os.path.exists(database_path):
            os.remove(database_path)
        db = COLMAPDatabase.connect(database_path)
        db.create_tables()

        images_map, camera_map = export_cameras(data, db)
        features_map = export_features(data, db, images_map)
        export_matches(data, db, features_map, images_map)

        if data.reconstruction_exists():
            export_ini_file(export_folder, database_path, images_path)
            export_cameras_reconstruction(data, export_folder, camera_map,
                                          args.binary)
            points_map = export_points_reconstruction(data, export_folder,
                                                      images_map, args.binary)
            export_images_reconstruction(data, export_folder, camera_map,
                                         images_map, features_map, points_map,
                                         args.binary)
        db.commit()
        db.close()
コード例 #28
0
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        queryImages = None
        if (args.localize):
            localizeDir = os.path.join(args.dataset, "localize")
            print("Localization - creating matches: ")
            queryImages = [
                x for x in os.listdir(localizeDir) if x.endswith(".jpg")
            ]
            [print(os.path.join(localizeDir, str(x))) for x in queryImages]

        else:
            queryImages = data.images()

        start = timer()
        pairs_matches, preport = matching.match_images(
            data, queryImages, data.images(
            ))  #2nd Argument, ref_images will be those images that are query.
        matching.save_matches(data, queryImages, pairs_matches)
        end = timer()

        with open(data.profile_log(), 'a') as fout:
            fout.write('match_features: {0}\n'.format(end - start))
        self.write_report(data, preport, list(pairs_matches.keys()),
                          end - start)
コード例 #29
0
ファイル: match_features.py プロジェクト: shooshx/OpenSfM
    def run(self, args):
        data = dataset.DataSet(args.dataset)
        images = data.images()
        exifs = {im: data.load_exif(im) for im in images}
        pairs = match_candidates_from_metadata(images, exifs, data)

        num_pairs = sum(len(c) for c in pairs.values())
        logger.info('Matching {} image pairs'.format(num_pairs))

        ctx = Context()
        ctx.data = data
        ctx.cameras = ctx.data.load_camera_models()
        ctx.exifs = exifs
        ctx.p_pre, ctx.f_pre = load_preemptive_features(data)
        args = match_arguments(pairs, ctx)

        start = time.time()
        processes = ctx.data.config.get('processes', 1)
        if processes == 1:
            for arg in args:
                match(arg)
        else:
            p = Pool(processes)
            p.map(match, args)
        end = time.time()
        with open(ctx.data.profile_log(), 'a') as fout:
            fout.write('match_features: {0}\n'.format(end - start))
コード例 #30
0
ファイル: reconstruct.py プロジェクト: ixonos/OpenSfM
    def run(self, args):
        start = time.time()
        data = dataset.DataSet(args.dataset)
        tracks_manager = data.load_tracks_manager()

        # report, reconstructions = reconstruction.\
        #     incremental_reconstruction(data, tracks_manager)

        report, reconstructions, zero_pairs = reconstruction.\
            incremental_reconstruction_fastrack(data, tracks_manager, float(args.focal_prior))

        if zero_pairs:
            print(
                "calibration faild, no pairs to initialize bundle adjustment")

        camera_priors = data.load_camera_models()
        for i in camera_priors.keys():
            img_width = camera_priors[i].width

        rec1 = reconstructions.pop()
        for i in rec1.cameras.keys():
            est_focal = rec1.cameras[i].focal

        end = time.time()

        with open(data.est_focal_log(), 'a') as fout:
            fout.write(str(est_focal * img_width))