예제 #1
0
    def setup(self,
              args,
              images_path,
              reconstruction,
              append_config=[],
              rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = os.path.join(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            if reconstruction.multi_camera:
                photos = get_photos_by_band(reconstruction.multi_camera,
                                            args.primary_band)
                if len(photos) < 1:
                    raise Exception("Not enough images in selected band %s" %
                                    args.primary_band.lower())
                log.ODM_INFO("Reconstruction will use %s images from %s band" %
                             (len(photos), args.primary_band.lower()))
            else:
                photos = reconstruction.photos

            # create file list
            has_alt = True
            has_gps = False
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    if photo.latitude is not None and photo.longitude is not None:
                        has_gps = True

                    fout.write('%s\n' %
                               os.path.join(images_path, photo.filename))

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path,
                                             "image_groups.txt")
            if 'split_image_groups_is_set' in args:
                image_groups_file = os.path.abspath(args.split_image_groups)

            if io.file_exists(image_groups_file):
                dst_groups_file = os.path.join(self.opensfm_project_path,
                                               "image_groups.txt")
                io.copy(image_groups_file, dst_groups_file)
                log.ODM_INFO("Copied %s to %s" %
                             (image_groups_file, dst_groups_file))

            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(
                        args.cameras)
                    with open(
                            os.path.join(self.opensfm_project_path,
                                         "camera_models_overrides.json"),
                            'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO(
                        "Wrote camera_models_overrides.json to OpenSfM directory"
                    )
                except Exception as e:
                    log.ODM_WARNING(
                        "Cannot set camera_models_overrides.json: %s" % str(e))

            use_bow = args.matcher_type == "bow"
            feature_type = "SIFT"

            # GPSDOP override if we have GPS accuracy information (such as RTK)
            if 'gps_accuracy_is_set' in args:
                log.ODM_INFO("Forcing GPS DOP to %s for all images" %
                             args.gps_accuracy)

            log.ODM_INFO("Writing exif overrides")

            exif_overrides = {}
            for p in photos:
                if 'gps_accuracy_is_set' in args:
                    dop = args.gps_accuracy
                elif p.get_gps_dop() is not None:
                    dop = p.get_gps_dop()
                else:
                    dop = args.gps_accuracy  # default value

                if p.latitude is not None and p.longitude is not None:
                    exif_overrides[p.filename] = {
                        'gps': {
                            'latitude': p.latitude,
                            'longitude': p.longitude,
                            'altitude':
                            p.altitude if p.altitude is not None else 0,
                            'dop': dop,
                        }
                    }

            with open(
                    os.path.join(self.opensfm_project_path,
                                 "exif_overrides.json"), 'w') as f:
                f.write(json.dumps(exif_overrides))

            # Check image masks
            masks = []
            for p in photos:
                if p.mask is not None:
                    masks.append(
                        (p.filename, os.path.join(images_path, p.mask)))

            if masks:
                log.ODM_INFO("Found %s image masks" % len(masks))
                with open(
                        os.path.join(self.opensfm_project_path,
                                     "mask_list.txt"), 'w') as f:
                    for fname, mask in masks:
                        f.write("{} {}\n".format(fname, mask))

            # Compute feature_process_size
            feature_process_size = 2048  # default

            if 'resize_to_is_set' in args:
                # Legacy
                log.ODM_WARNING(
                    "Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead."
                )
                feature_process_size = int(args.resize_to)
            else:
                feature_quality_scale = {
                    'ultra': 1,
                    'high': 0.5,
                    'medium': 0.25,
                    'low': 0.125,
                    'lowest': 0.0675,
                }

                max_dim = find_largest_photo_dim(photos)

                if max_dim > 0:
                    log.ODM_INFO("Maximum photo dimensions: %spx" %
                                 str(max_dim))
                    feature_process_size = int(
                        max_dim * feature_quality_scale[args.feature_quality])
                else:
                    log.ODM_WARNING(
                        "Cannot compute max image dimensions, going with defaults"
                    )

            depthmap_resolution = get_depthmap_resolution(args, photos)

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "flann_algorithm: KDTREE",  # more stable, faster than KMEANS
                "feature_process_size: %s" % feature_process_size,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % args.matcher_neighbors,
                "matching_gps_distance: %s" % args.matcher_distance,
                "optimize_camera_parameters: %s" %
                ('no'
                 if args.use_fixed_camera_params or args.cameras else 'yes'),
                "undistorted_image_format: tif",
                "bundle_outlier_filtering_type: AUTO",
                "align_orientation_prior: vertical",
                "triangulation_type: ROBUST",
                "retriangulation_ratio: 2",
            ]

            if args.camera_lens != 'auto':
                config.append("camera_projection_type: %s" %
                              args.camera_lens.upper())

            if not has_gps:
                log.ODM_INFO("No GPS information, using BOW matching")
                use_bow = True

            feature_type = args.feature_type.upper()

            if use_bow:
                config.append("matcher_type: WORDS")

                # Cannot use SIFT with BOW
                if feature_type == "SIFT":
                    log.ODM_WARNING(
                        "Using BOW matching, will use HAHOG feature type, not SIFT"
                    )
                    feature_type = "HAHOG"

            # GPU acceleration?
            if has_gpus() and feature_type == "SIFT":
                log.ODM_INFO("Using GPU for extracting SIFT features")
                log.ODM_INFO("--min-num-features will be ignored")
                feature_type = "SIFT_GPU"

            config.append("feature_type: %s" % feature_type)

            if has_alt:
                log.ODM_INFO(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            gcp_path = reconstruction.gcp.gcp_path
            if has_alt or gcp_path:
                config.append("align_method: auto")
            else:
                config.append("align_method: orientation_prior")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment
            else:
                config.append("local_bundle_radius: 0")

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                if not args.force_gps:
                    config.append("bundle_use_gps: no")
                io.copy(gcp_path, self.path("gcp_list.txt"))

            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" %
                            list_path)
예제 #2
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            raise system.ExitException(
                'Not enough photos in photos array to start OpenSfM')

        octx = OSFMContext(tree.opensfm)
        octx.setup(args,
                   tree.dataset_raw,
                   reconstruction=reconstruction,
                   rerun=self.rerun())
        octx.photos_to_metadata(photos, self.rerun())
        self.update_progress(20)
        octx.feature_matching(self.rerun())
        self.update_progress(30)
        octx.reconstruct(self.rerun())
        octx.extract_cameras(tree.path("cameras.json"), self.rerun())
        self.update_progress(70)

        def cleanup_disk_space():
            if args.optimize_disk_space:
                for folder in ["features", "matches", "reports"]:
                    folder_path = octx.path(folder)
                    if os.path.exists(folder_path):
                        if os.path.islink(folder_path):
                            os.unlink(folder_path)
                        else:
                            shutil.rmtree(folder_path)

        # If we find a special flag file for split/merge we stop right here
        if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")):
            log.ODM_INFO("Stopping OpenSfM early because we found: %s" %
                         octx.path("split_merge_stop_at_reconstruction.txt"))
            self.next_stage = None
            cleanup_disk_space()
            return

        # Stats are computed in the local CRS (before geoprojection)
        if not args.skip_report:

            # TODO: this will fail to compute proper statistics if
            # the pipeline is run with --skip-report and is subsequently
            # rerun without --skip-report a --rerun-* parameter (due to the reconstruction.json file)
            # being replaced below. It's an isolated use case.

            octx.export_stats(self.rerun())

        self.update_progress(75)

        # We now switch to a geographic CRS
        if reconstruction.is_georeferenced() and (not io.file_exists(
                tree.opensfm_topocentric_reconstruction) or self.rerun()):
            octx.run(
                'export_geocoords --reconstruction --proj "%s" --offset-x %s --offset-y %s'
                % (reconstruction.georef.proj4(),
                   reconstruction.georef.utm_east_offset,
                   reconstruction.georef.utm_north_offset))
            shutil.move(tree.opensfm_reconstruction,
                        tree.opensfm_topocentric_reconstruction)
            shutil.move(tree.opensfm_geocoords_reconstruction,
                        tree.opensfm_reconstruction)
        else:
            log.ODM_WARNING("Will skip exporting %s" %
                            tree.opensfm_geocoords_reconstruction)

        self.update_progress(80)

        updated_config_flag_file = octx.path('updated_config.txt')

        # Make sure it's capped by the depthmap-resolution arg,
        # since the undistorted images are used for MVS
        outputs['undist_image_max_size'] = max(
            gsd.image_max_size(photos,
                               args.orthophoto_resolution,
                               tree.opensfm_reconstruction,
                               ignore_gsd=args.ignore_gsd,
                               has_gcp=reconstruction.has_gcp()),
            get_depthmap_resolution(args, photos))

        if not io.file_exists(updated_config_flag_file) or self.rerun():
            octx.update_config({
                'undistorted_image_max_size':
                outputs['undist_image_max_size']
            })
            octx.touch(updated_config_flag_file)

        # Undistorted images will be used for texturing / MVS

        alignment_info = None
        primary_band_name = None
        largest_photo = None
        undistort_pipeline = []

        def undistort_callback(shot_id, image):
            for func in undistort_pipeline:
                image = func(shot_id, image)
            return image

        def resize_thermal_images(shot_id, image):
            photo = reconstruction.get_photo(shot_id)
            if photo.is_thermal():
                return thermal.resize_to_match(image, largest_photo)
            else:
                return image

        def radiometric_calibrate(shot_id, image):
            photo = reconstruction.get_photo(shot_id)
            if photo.is_thermal():
                return thermal.dn_to_temperature(photo, image,
                                                 tree.dataset_raw)
            else:
                return multispectral.dn_to_reflectance(
                    photo,
                    image,
                    use_sun_sensor=args.radiometric_calibration ==
                    "camera+sun")

        def align_to_primary_band(shot_id, image):
            photo = reconstruction.get_photo(shot_id)

            # No need to align if requested by user
            if args.skip_band_alignment:
                return image

            # No need to align primary
            if photo.band_name == primary_band_name:
                return image

            ainfo = alignment_info.get(photo.band_name)
            if ainfo is not None:
                return multispectral.align_image(image, ainfo['warp_matrix'],
                                                 ainfo['dimension'])
            else:
                log.ODM_WARNING(
                    "Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected."
                    % (shot_id))
                return image

        if reconstruction.multi_camera:
            largest_photo = find_largest_photo(photos)
            undistort_pipeline.append(resize_thermal_images)

        if args.radiometric_calibration != "none":
            undistort_pipeline.append(radiometric_calibrate)

        image_list_override = None

        if reconstruction.multi_camera:

            # Undistort only secondary bands
            image_list_override = [
                os.path.join(tree.dataset_raw, p.filename) for p in photos
            ]  # if p.band_name.lower() != primary_band_name.lower()

            # We backup the original reconstruction.json, tracks.csv
            # then we augment them by duplicating the primary band
            # camera shots with each band, so that exports, undistortion,
            # etc. include all bands
            # We finally restore the original files later

            added_shots_file = octx.path('added_shots_done.txt')
            s2p, p2s = None, None

            if not io.file_exists(added_shots_file) or self.rerun():
                primary_band_name = multispectral.get_primary_band_name(
                    reconstruction.multi_camera, args.primary_band)
                s2p, p2s = multispectral.compute_band_maps(
                    reconstruction.multi_camera, primary_band_name)

                if not args.skip_band_alignment:
                    alignment_info = multispectral.compute_alignment_matrices(
                        reconstruction.multi_camera,
                        primary_band_name,
                        tree.dataset_raw,
                        s2p,
                        p2s,
                        max_concurrency=args.max_concurrency)
                else:
                    log.ODM_WARNING("Skipping band alignment")
                    alignment_info = {}

                log.ODM_INFO("Adding shots to reconstruction")

                octx.backup_reconstruction()
                octx.add_shots_to_reconstruction(p2s)
                octx.touch(added_shots_file)

            undistort_pipeline.append(align_to_primary_band)

        octx.convert_and_undistort(self.rerun(), undistort_callback,
                                   image_list_override)

        self.update_progress(95)

        if reconstruction.multi_camera:
            octx.restore_reconstruction_backup()

            # Undistort primary band and write undistorted
            # reconstruction.json, tracks.csv
            octx.convert_and_undistort(self.rerun(),
                                       undistort_callback,
                                       runId='primary')

        if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun():
            octx.run('export_visualsfm --points')
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM NVM reconstruction file in: %s' %
                tree.opensfm_reconstruction_nvm)

        if reconstruction.multi_camera:
            log.ODM_INFO("Multiple bands found")

            # Write NVM files for the various bands
            for band in reconstruction.multi_camera:
                nvm_file = octx.path(
                    "undistorted",
                    "reconstruction_%s.nvm" % band['name'].lower())

                if not io.file_exists(nvm_file) or self.rerun():
                    img_map = {}

                    if primary_band_name is None:
                        primary_band_name = multispectral.get_primary_band_name(
                            reconstruction.multi_camera, args.primary_band)
                    if p2s is None:
                        s2p, p2s = multispectral.compute_band_maps(
                            reconstruction.multi_camera, primary_band_name)

                    for fname in p2s:

                        # Primary band maps to itself
                        if band['name'] == primary_band_name:
                            img_map[add_image_format_extension(
                                fname, 'tif')] = add_image_format_extension(
                                    fname, 'tif')
                        else:
                            band_filename = next(
                                (p.filename for p in p2s[fname]
                                 if p.band_name == band['name']), None)

                            if band_filename is not None:
                                img_map[add_image_format_extension(
                                    fname,
                                    'tif')] = add_image_format_extension(
                                        band_filename, 'tif')
                            else:
                                log.ODM_WARNING(
                                    "Cannot find %s band equivalent for %s" %
                                    (band, fname))

                    nvm.replace_nvm_images(tree.opensfm_reconstruction_nvm,
                                           img_map, nvm_file)
                else:
                    log.ODM_WARNING("Found existing NVM file %s" % nvm_file)

        # Skip dense reconstruction if necessary and export
        # sparse reconstruction instead
        if args.fast_orthophoto:
            output_file = octx.path('reconstruction.ply')

            if not io.file_exists(output_file) or self.rerun():
                octx.run('export_ply --no-cameras --point-num-views')
            else:
                log.ODM_WARNING("Found a valid PLY reconstruction in %s" %
                                output_file)

        cleanup_disk_space()

        if args.optimize_disk_space:
            os.remove(octx.path("tracks.csv"))
            if io.file_exists(octx.recon_backup_file()):
                os.remove(octx.recon_backup_file())

            if io.dir_exists(octx.path("undistorted", "depthmaps")):
                files = glob.glob(
                    octx.path("undistorted", "depthmaps", "*.npz"))
                for f in files:
                    os.remove(f)

            # Keep these if using OpenMVS
            if args.fast_orthophoto:
                files = [
                    octx.path("undistorted", "tracks.csv"),
                    octx.path("undistorted", "reconstruction.json")
                ]
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
예제 #3
0
파일: openmvs.py 프로젝트: sccmvv/ODM
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenMVS')
            exit(1)

        # check if reconstruction was done before
        if not io.file_exists(tree.openmvs_model) or self.rerun():
            if io.dir_exists(tree.openmvs):
                shutil.rmtree(tree.openmvs)

            # export reconstruction from opensfm
            octx = OSFMContext(tree.opensfm)
            cmd = 'export_openmvs'
            if reconstruction.multi_camera:
                # Export only the primary band
                primary = reconstruction.multi_camera[0]
                image_list = os.path.join(tree.opensfm, "image_list_%s.txt" % primary['name'].lower())
                cmd += ' --image_list "%s"' % image_list
            octx.run(cmd)
            
            self.update_progress(10)

            depthmaps_dir = os.path.join(tree.openmvs, "depthmaps")
            if not io.dir_exists(depthmaps_dir):
                os.mkdir(depthmaps_dir)
            
            depthmap_resolution = get_depthmap_resolution(args, photos)

            if outputs["undist_image_max_size"] <= depthmap_resolution:
                resolution_level = 0
            else:
                resolution_level = math.floor(math.log(outputs['undist_image_max_size'] / float(depthmap_resolution)) / math.log(2))

            config = [
                " --resolution-level %s" % int(resolution_level),
	            "--min-resolution %s" % depthmap_resolution,
                "--max-resolution %s" % int(outputs['undist_image_max_size']),
                "--max-threads %s" % args.max_concurrency,
                '-w "%s"' % depthmaps_dir, 
                "-v 0",
            ]

            log.ODM_INFO("Running dense reconstruction. This might take a while.")
            
            system.run('%s "%s" %s' % (context.omvs_densify_path, 
                                       os.path.join(tree.openmvs, 'scene.mvs'),
                                      ' '.join(config)))

            self.update_progress(85)

            # Filter points
            scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')
            if os.path.exists(scene_dense):
                config = [
                    "--filter-point-cloud -1",
                    '-i "%s"' % scene_dense,
                    "-v 0"
                ]
                system.run('%s %s' % (context.omvs_densify_path, ' '.join(config)))
            else:
                log.ODM_WARNING("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
                exit(1)

            self.update_progress(95)

            if args.optimize_disk_space:
                files = [scene_dense,
                         os.path.join(tree.openmvs, 'scene_dense.ply'),
                         os.path.join(tree.openmvs, 'scene_dense_dense_filtered.mvs'),
                         octx.path("undistorted", "tracks.csv"),
                         octx.path("undistorted", "reconstruction.json")
                        ]
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
                shutil.rmtree(depthmaps_dir)
        else:
            log.ODM_WARNING('Found a valid OpenMVS reconstruction file in: %s' %
                            tree.openmvs_model)
예제 #4
0
파일: openmvs.py 프로젝트: terraframe/ODM
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenMVS')
            exit(1)

        # check if reconstruction was done before
        if not io.file_exists(tree.openmvs_model) or self.rerun():
            if self.rerun():
                if io.dir_exists(tree.openmvs):
                    shutil.rmtree(tree.openmvs)

            # export reconstruction from opensfm
            openmvs_scene_file = os.path.join(tree.openmvs, "scene.mvs")
            if not io.file_exists(openmvs_scene_file) or self.rerun():
                octx = OSFMContext(tree.opensfm)
                cmd = 'export_openmvs'
                octx.run(cmd)
            else:
                log.ODM_WARNING("Found existing %s" % openmvs_scene_file)

            self.update_progress(10)

            depthmaps_dir = os.path.join(tree.openmvs, "depthmaps")

            if io.dir_exists(depthmaps_dir) and self.rerun():
                shutil.rmtree(depthmaps_dir)

            if not io.dir_exists(depthmaps_dir):
                os.mkdir(depthmaps_dir)

            depthmap_resolution = get_depthmap_resolution(args, photos)
            if outputs["undist_image_max_size"] <= depthmap_resolution:
                resolution_level = 0
            else:
                resolution_level = int(
                    round(
                        math.log(outputs['undist_image_max_size'] /
                                 float(depthmap_resolution)) / math.log(2)))

            log.ODM_INFO(
                "Running dense reconstruction. This might take a while.")

            log.ODM_INFO("Estimating depthmaps")

            densify_ini_file = os.path.join(tree.openmvs, 'config.ini')
            with open(densify_ini_file, 'w+') as f:
                f.write("Optimize = 0\n")  # Disable depth-maps re-filtering

            config = [
                " --resolution-level %s" % int(resolution_level),
                "--min-resolution %s" % depthmap_resolution,
                "--max-resolution %s" % int(outputs['undist_image_max_size']),
                "--max-threads %s" % args.max_concurrency,
                "--number-views-fuse 2",
                '-w "%s"' % depthmaps_dir, "-v 0"
            ]

            if args.pc_tile:
                config.append("--fusion-mode 1")

            system.run('%s "%s" %s' % (context.omvs_densify_path,
                                       openmvs_scene_file, ' '.join(config)))

            self.update_progress(85)
            files_to_remove = []
            scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')

            if args.pc_tile:
                log.ODM_INFO("Computing sub-scenes")
                config = [
                    "--sub-scene-area 660000",
                    "--max-threads %s" % args.max_concurrency,
                    '-w "%s"' % depthmaps_dir,
                    "-v 0",
                ]
                system.run('%s "%s" %s' %
                           (context.omvs_densify_path, openmvs_scene_file,
                            ' '.join(config)))

                scene_files = glob.glob(
                    os.path.join(tree.openmvs,
                                 "scene_[0-9][0-9][0-9][0-9].mvs"))
                if len(scene_files) == 0:
                    log.ODM_ERROR(
                        "No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed."
                    )
                    exit(1)

                log.ODM_INFO("Fusing depthmaps for %s scenes" %
                             len(scene_files))

                scene_ply_files = []

                for sf in scene_files:
                    p, _ = os.path.splitext(sf)
                    scene_ply = p + "_dense_dense_filtered.ply"
                    scene_dense_mvs = p + "_dense.mvs"

                    files_to_remove += [scene_ply, sf, scene_dense_mvs]
                    scene_ply_files.append(scene_ply)

                    if not io.file_exists(scene_ply) or self.rerun():
                        # Fuse
                        config = [
                            '--resolution-level %s' % int(resolution_level),
                            '--min-resolution %s' % depthmap_resolution,
                            '--max-resolution %s' %
                            int(outputs['undist_image_max_size']),
                            '--dense-config-file "%s"' % densify_ini_file,
                            '--number-views-fuse 2',
                            '--max-threads %s' % args.max_concurrency,
                            '-w "%s"' % depthmaps_dir,
                            '-v 0',
                        ]

                        try:
                            system.run('%s "%s" %s' %
                                       (context.omvs_densify_path, sf,
                                        ' '.join(config)))

                            # Filter
                            system.run(
                                '%s "%s" --filter-point-cloud -1 -v 0' %
                                (context.omvs_densify_path, scene_dense_mvs))
                        except:
                            log.ODM_WARNING(
                                "Sub-scene %s could not be reconstructed, skipping..."
                                % sf)

                        if not io.file_exists(scene_ply):
                            scene_ply_files.pop()
                            log.ODM_WARNING(
                                "Could not compute PLY for subscene %s" % sf)
                    else:
                        log.ODM_WARNING("Found existing dense scene file %s" %
                                        scene_ply)

                # Merge
                log.ODM_INFO("Merging %s scene files" % len(scene_ply_files))
                if len(scene_ply_files) == 0:
                    log.ODM_ERROR(
                        "Could not compute dense point cloud (no PLY files available)."
                    )
                if len(scene_ply_files) == 1:
                    # Simply rename
                    os.rename(scene_ply_files[0], tree.openmvs_model)
                    log.ODM_INFO("%s --> %s" %
                                 (scene_ply_files[0], tree.openmvs_model))
                else:
                    # Merge
                    fast_merge_ply(scene_ply_files, tree.openmvs_model)
            else:
                # Filter all at once
                if os.path.exists(scene_dense):
                    config = [
                        "--filter-point-cloud -1",
                        '-i "%s"' % scene_dense, "-v 0"
                    ]
                    system.run('%s %s' %
                               (context.omvs_densify_path, ' '.join(config)))
                else:
                    log.ODM_WARNING(
                        "Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting..."
                    )
                    exit(1)

            # TODO: add support for image masks

            self.update_progress(95)

            if args.optimize_disk_space:
                files = [
                    scene_dense,
                    os.path.join(tree.openmvs, 'scene_dense.ply'),
                    os.path.join(tree.openmvs,
                                 'scene_dense_dense_filtered.mvs'),
                    octx.path("undistorted", "tracks.csv"),
                    octx.path("undistorted", "reconstruction.json")
                ] + files_to_remove
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
                shutil.rmtree(depthmaps_dir)
        else:
            log.ODM_WARNING(
                'Found a valid OpenMVS reconstruction file in: %s' %
                tree.openmvs_model)
예제 #5
0
파일: openmvs.py 프로젝트: originlake/ODM
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos
        octx = OSFMContext(tree.opensfm)

        if not photos:
            raise system.ExitException('Not enough photos in photos array to start OpenMVS')

        # check if reconstruction was done before
        if not io.file_exists(tree.openmvs_model) or self.rerun():
            if self.rerun():
                if io.dir_exists(tree.openmvs):
                    shutil.rmtree(tree.openmvs)

            # export reconstruction from opensfm
            openmvs_scene_file = os.path.join(tree.openmvs, "scene.mvs")
            if not io.file_exists(openmvs_scene_file) or self.rerun():
                cmd = 'export_openmvs'
                octx.run(cmd)
            else:
                log.ODM_WARNING("Found existing %s" % openmvs_scene_file)
            
            self.update_progress(10)

            depthmaps_dir = os.path.join(tree.openmvs, "depthmaps")

            if io.dir_exists(depthmaps_dir) and self.rerun():
                shutil.rmtree(depthmaps_dir)

            if not io.dir_exists(depthmaps_dir):
                os.mkdir(depthmaps_dir)

            depthmap_resolution = get_depthmap_resolution(args, photos)
            log.ODM_INFO("Depthmap resolution set to: %spx" % depthmap_resolution)

            if outputs["undist_image_max_size"] <= depthmap_resolution:
                resolution_level = 0
            else:
                resolution_level = int(round(math.log(outputs['undist_image_max_size'] / float(depthmap_resolution)) / math.log(2)))

            log.ODM_INFO("Running dense reconstruction. This might take a while.")
            
            log.ODM_INFO("Estimating depthmaps")
            number_views_fuse = 2
            densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini')
            subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap.

            config = [
                " --resolution-level %s" % int(resolution_level),
                '--dense-config-file "%s"' % densify_ini_file,
                "--min-resolution %s" % depthmap_resolution,
                "--max-resolution %s" % int(outputs['undist_image_max_size']),
                "--max-threads %s" % args.max_concurrency,
                "--number-views-fuse %s" % number_views_fuse,
                "--sub-resolution-levels %s" % subres_levels,
                '-w "%s"' % depthmaps_dir, 
                "-v 0"
            ]

            gpu_config = []

            if not has_gpu(args):
                gpu_config.append("--cuda-device -2")

            if args.pc_tile:
                config.append("--fusion-mode 1")

            extra_config = []
            
            if not args.pc_geometric:
                extra_config.append("--geometric-iters 0")
            
            masks_dir = os.path.join(tree.opensfm, "undistorted", "masks")
            masks = os.path.exists(masks_dir) and len(os.listdir(masks_dir)) > 0
            if masks:
                extra_config.append("--ignore-mask-label 0")

            sharp = args.pc_geometric
            with open(densify_ini_file, 'w+') as f:
                f.write("Optimize = %s\n" % (7 if sharp else 3))

            def run_densify():
                system.run('"%s" "%s" %s' % (context.omvs_densify_path, 
                                        openmvs_scene_file,
                                        ' '.join(config + gpu_config + extra_config)))

            try:
                run_densify()
            except system.SubprocessException as e:
                # If the GPU was enabled and the program failed,
                # try to run it again without GPU
                if e.errorCode == 1 and len(gpu_config) == 0:
                    log.ODM_WARNING("OpenMVS failed with GPU, is your graphics card driver up to date? Falling back to CPU.")
                    gpu_config.append("--cuda-device -2")
                    run_densify()
                else:
                    raise e

            self.update_progress(85)
            files_to_remove = []
            scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')

            if args.pc_tile:
                log.ODM_INFO("Computing sub-scenes")

                subscene_densify_ini_file = os.path.join(tree.openmvs, 'subscene-config.ini')
                with open(subscene_densify_ini_file, 'w+') as f:
                    f.write("Optimize = 0\n")

                config = [
                    "--sub-scene-area 660000",
                    "--max-threads %s" % args.max_concurrency,
                    '-w "%s"' % depthmaps_dir, 
                    "-v 0",
                ]
                system.run('"%s" "%s" %s' % (context.omvs_densify_path, 
                                        openmvs_scene_file,
                                        ' '.join(config + gpu_config)))
                
                scene_files = glob.glob(os.path.join(tree.openmvs, "scene_[0-9][0-9][0-9][0-9].mvs"))
                if len(scene_files) == 0:
                    raise system.ExitException("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.")

                log.ODM_INFO("Fusing depthmaps for %s scenes" % len(scene_files))
                
                scene_ply_files = []

                for sf in scene_files:
                    p, _ = os.path.splitext(sf)
                    scene_ply_unfiltered = p + "_dense.ply"
                    scene_ply = p + "_dense_dense_filtered.ply"
                    scene_dense_mvs = p + "_dense.mvs"

                    files_to_remove += [scene_ply, sf, scene_dense_mvs, scene_ply_unfiltered]
                    scene_ply_files.append(scene_ply)

                    if not io.file_exists(scene_ply) or self.rerun():
                        # Fuse
                        config = [
                            '--resolution-level %s' % int(resolution_level),
                            '--min-resolution %s' % depthmap_resolution,
                            '--max-resolution %s' % int(outputs['undist_image_max_size']),
                            '--dense-config-file "%s"' % subscene_densify_ini_file,
                            '--number-views-fuse %s' % number_views_fuse,
                            '--max-threads %s' % args.max_concurrency,
                            '-w "%s"' % depthmaps_dir,
                            '-v 0',
                        ]

                        try:
                            system.run('"%s" "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config + gpu_config + extra_config)))

                            # Filter
                            if args.pc_filter > 0:
                                system.run('"%s" "%s" --filter-point-cloud -1 -v 0 %s' % (context.omvs_densify_path, scene_dense_mvs, ' '.join(gpu_config)))
                            else:
                                # Just rename
                                log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply))
                                os.rename(scene_ply_unfiltered, scene_ply)
                        except:
                            log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf)

                        if not io.file_exists(scene_ply):
                            scene_ply_files.pop()
                            log.ODM_WARNING("Could not compute PLY for subscene %s" % sf)
                    else:
                        log.ODM_WARNING("Found existing dense scene file %s" % scene_ply)

                # Merge
                log.ODM_INFO("Merging %s scene files" % len(scene_ply_files))
                if len(scene_ply_files) == 0:
                    log.ODM_ERROR("Could not compute dense point cloud (no PLY files available).")
                if len(scene_ply_files) == 1:
                    # Simply rename
                    os.replace(scene_ply_files[0], tree.openmvs_model)
                    log.ODM_INFO("%s --> %s"% (scene_ply_files[0], tree.openmvs_model))
                else:
                    # Merge
                    fast_merge_ply(scene_ply_files, tree.openmvs_model)
            else:
                # Filter all at once
                if args.pc_filter > 0:
                    if os.path.exists(scene_dense):
                        config = [
                            "--filter-point-cloud -1",
                            '-i "%s"' % scene_dense,
                            "-v 0"
                        ]
                        system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config)))
                    else:
                        raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
                else:
                    # Just rename
                    scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply')
                    log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model))
                    os.rename(scene_dense_ply, tree.openmvs_model)

            self.update_progress(95)

            if args.optimize_disk_space:
                files = [scene_dense,
                         os.path.join(tree.openmvs, 'scene_dense.ply'),
                         os.path.join(tree.openmvs, 'scene_dense_dense_filtered.mvs'),
                         octx.path("undistorted", "tracks.csv"),
                         octx.path("undistorted", "reconstruction.json")
                        ] + files_to_remove
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
                shutil.rmtree(depthmaps_dir)
        else:
            log.ODM_WARNING('Found a valid OpenMVS reconstruction file in: %s' %
                            tree.openmvs_model)
예제 #6
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
            exit(1)

        octx = OSFMContext(tree.opensfm)
        octx.setup(args,
                   tree.dataset_raw,
                   photos,
                   reconstruction=reconstruction,
                   rerun=self.rerun())
        octx.extract_metadata(self.rerun())
        self.update_progress(20)
        octx.feature_matching(self.rerun())
        self.update_progress(30)
        octx.reconstruct(self.rerun())
        octx.extract_cameras(tree.path("cameras.json"), self.rerun())
        self.update_progress(70)

        if args.optimize_disk_space:
            for folder in ["features", "matches", "exif", "reports"]:
                folder_path = octx.path(folder)
                if os.path.islink(folder_path):
                    os.unlink(folder_path)
                else:
                    shutil.rmtree(folder_path)

        # If we find a special flag file for split/merge we stop right here
        if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")):
            log.ODM_INFO("Stopping OpenSfM early because we found: %s" %
                         octx.path("split_merge_stop_at_reconstruction.txt"))
            self.next_stage = None
            return

        if args.fast_orthophoto:
            output_file = octx.path('reconstruction.ply')
        elif args.use_opensfm_dense:
            output_file = tree.opensfm_model
        else:
            output_file = tree.opensfm_reconstruction

        updated_config_flag_file = octx.path('updated_config.txt')

        # Make sure it's capped by the depthmap-resolution arg,
        # since the undistorted images are used for MVS
        outputs['undist_image_max_size'] = max(
            gsd.image_max_size(photos,
                               args.orthophoto_resolution,
                               tree.opensfm_reconstruction,
                               ignore_gsd=args.ignore_gsd,
                               has_gcp=reconstruction.has_gcp()),
            get_depthmap_resolution(args, photos))

        if not io.file_exists(updated_config_flag_file) or self.rerun():
            octx.update_config({
                'undistorted_image_max_size':
                outputs['undist_image_max_size']
            })
            octx.touch(updated_config_flag_file)

        # These will be used for texturing / MVS
        if args.radiometric_calibration == "none":
            octx.convert_and_undistort(self.rerun())
        else:

            def radiometric_calibrate(shot_id, image):
                photo = reconstruction.get_photo(shot_id)
                return multispectral.dn_to_reflectance(
                    photo,
                    image,
                    use_sun_sensor=args.radiometric_calibration ==
                    "camera+sun")

            octx.convert_and_undistort(self.rerun(), radiometric_calibrate)

        self.update_progress(80)

        if reconstruction.multi_camera:
            # Dump band image lists
            log.ODM_INFO("Multiple bands found")
            for band in reconstruction.multi_camera:
                log.ODM_INFO("Exporting %s band" % band['name'])
                image_list_file = octx.path("image_list_%s.txt" %
                                            band['name'].lower())

                if not io.file_exists(image_list_file) or self.rerun():
                    with open(image_list_file, "w") as f:
                        f.write("\n".join([p.filename
                                           for p in band['photos']]))
                        log.ODM_INFO("Wrote %s" % image_list_file)
                else:
                    log.ODM_WARNING(
                        "Found a valid image list in %s for %s band" %
                        (image_list_file, band['name']))

                nvm_file = octx.path(
                    "undistorted",
                    "reconstruction_%s.nvm" % band['name'].lower())
                if not io.file_exists(nvm_file) or self.rerun():
                    octx.run('export_visualsfm --points --image_list "%s"' %
                             image_list_file)
                    os.rename(tree.opensfm_reconstruction_nvm, nvm_file)
                else:
                    log.ODM_WARNING(
                        "Found a valid NVM file in %s for %s band" %
                        (nvm_file, band['name']))

        if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun():
            octx.run('export_visualsfm --points')
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM NVM reconstruction file in: %s' %
                tree.opensfm_reconstruction_nvm)

        self.update_progress(85)

        # Skip dense reconstruction if necessary and export
        # sparse reconstruction instead
        if args.fast_orthophoto:
            if not io.file_exists(output_file) or self.rerun():
                octx.run('export_ply --no-cameras')
            else:
                log.ODM_WARNING("Found a valid PLY reconstruction in %s" %
                                output_file)

        elif args.use_opensfm_dense:
            if not io.file_exists(output_file) or self.rerun():
                octx.run('compute_depthmaps')
            else:
                log.ODM_WARNING("Found a valid dense reconstruction in %s" %
                                output_file)

        self.update_progress(90)

        if reconstruction.is_georeferenced() and (not io.file_exists(
                tree.opensfm_transformation) or self.rerun()):
            octx.run('export_geocoords --transformation --proj \'%s\'' %
                     reconstruction.georef.proj4())
        else:
            log.ODM_WARNING("Will skip exporting %s" %
                            tree.opensfm_transformation)

        if args.optimize_disk_space:
            os.remove(octx.path("tracks.csv"))
            os.remove(octx.path("undistorted", "tracks.csv"))
            os.remove(octx.path("undistorted", "reconstruction.json"))
            if io.dir_exists(octx.path("undistorted", "depthmaps")):
                files = glob.glob(
                    octx.path("undistorted", "depthmaps", "*.npz"))
                for f in files:
                    os.remove(f)
예제 #7
0
    def setup(self,
              args,
              images_path,
              reconstruction,
              append_config=[],
              rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = os.path.join(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            if reconstruction.multi_camera:
                photos = get_photos_by_band(reconstruction.multi_camera,
                                            args.primary_band)
                if len(photos) < 1:
                    raise Exception("Not enough images in selected band %s" %
                                    args.primary_band.lower())
                log.ODM_INFO("Reconstruction will use %s images from %s band" %
                             (len(photos), args.primary_band.lower()))
            else:
                photos = reconstruction.photos

            # create file list
            has_alt = True
            has_gps = False
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    if photo.latitude is not None and photo.longitude is not None:
                        has_gps = True

                    fout.write('%s\n' %
                               os.path.join(images_path, photo.filename))

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path,
                                             "image_groups.txt")
            if 'split_image_groups_is_set' in args:
                image_groups_file = os.path.abspath(args.split_image_groups)

            if io.file_exists(image_groups_file):
                dst_groups_file = os.path.join(self.opensfm_project_path,
                                               "image_groups.txt")
                io.copy(image_groups_file, dst_groups_file)
                log.ODM_INFO("Copied %s to %s" %
                             (image_groups_file, dst_groups_file))

            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(
                        args.cameras)
                    with open(
                            os.path.join(self.opensfm_project_path,
                                         "camera_models_overrides.json"),
                            'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO(
                        "Wrote camera_models_overrides.json to OpenSfM directory"
                    )
                except Exception as e:
                    log.ODM_WARNING(
                        "Cannot set camera_models_overrides.json: %s" % str(e))

            # Check image masks
            masks = []
            for p in photos:
                if p.mask is not None:
                    masks.append(
                        (p.filename, os.path.join(images_path, p.mask)))

            if masks:
                log.ODM_INFO("Found %s image masks" % len(masks))
                with open(
                        os.path.join(self.opensfm_project_path,
                                     "mask_list.txt"), 'w') as f:
                    for fname, mask in masks:
                        f.write("{} {}\n".format(fname, mask))

            # Compute feature_process_size
            feature_process_size = 2048  # default

            if ('resize_to_is_set' in args) and args.resize_to > 0:
                # Legacy
                log.ODM_WARNING(
                    "Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead."
                )
                feature_process_size = int(args.resize_to)
            else:
                feature_quality_scale = {
                    'ultra': 1,
                    'high': 0.5,
                    'medium': 0.25,
                    'low': 0.125,
                    'lowest': 0.0675,
                }

                max_dim = find_largest_photo_dim(photos)

                if max_dim > 0:
                    log.ODM_INFO("Maximum photo dimensions: %spx" %
                                 str(max_dim))
                    feature_process_size = int(
                        max_dim * feature_quality_scale[args.feature_quality])
                    log.ODM_INFO(
                        "Photo dimensions for feature extraction: %ipx" %
                        feature_process_size)
                else:
                    log.ODM_WARNING(
                        "Cannot compute max image dimensions, going with defaults"
                    )

            depthmap_resolution = get_depthmap_resolution(args, photos)

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "flann_algorithm: KDTREE",  # more stable, faster than KMEANS
                "feature_process_size: %s" % feature_process_size,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % args.matcher_neighbors,
                "matching_gps_distance: 0",
                "matching_graph_rounds: 50",
                "optimize_camera_parameters: %s" %
                ('no'
                 if args.use_fixed_camera_params or args.cameras else 'yes'),
                "reconstruction_algorithm: %s" % (args.sfm_algorithm),
                "undistorted_image_format: tif",
                "bundle_outlier_filtering_type: AUTO",
                "sift_peak_threshold: 0.066",
                "align_orientation_prior: vertical",
                "triangulation_type: ROBUST",
                "retriangulation_ratio: 2",
            ]

            if args.camera_lens != 'auto':
                config.append("camera_projection_type: %s" %
                              args.camera_lens.upper())

            matcher_type = args.matcher_type
            feature_type = args.feature_type.upper()

            osfm_matchers = {
                "bow": "WORDS",
                "flann": "FLANN",
                "bruteforce": "BRUTEFORCE"
            }

            if not has_gps and not 'matcher_type_is_set' in args:
                log.ODM_INFO(
                    "No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)"
                )
                matcher_type = "bow"

            if matcher_type == "bow":
                # Cannot use anything other than HAHOG with BOW
                if feature_type != "HAHOG":
                    log.ODM_WARNING(
                        "Using BOW matching, will use HAHOG feature type, not SIFT"
                    )
                    feature_type = "HAHOG"

            config.append("matcher_type: %s" % osfm_matchers[matcher_type])

            # GPU acceleration?
            if has_gpu():
                max_photo = find_largest_photo(photos)
                w, h = max_photo.width, max_photo.height
                if w > h:
                    h = int((h / w) * feature_process_size)
                    w = int(feature_process_size)
                else:
                    w = int((w / h) * feature_process_size)
                    h = int(feature_process_size)

                if has_popsift_and_can_handle_texsize(
                        w, h) and feature_type == "SIFT":
                    log.ODM_INFO("Using GPU for extracting SIFT features")
                    feature_type = "SIFT_GPU"

            config.append("feature_type: %s" % feature_type)

            if has_alt:
                log.ODM_INFO(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            gcp_path = reconstruction.gcp.gcp_path
            if has_alt or gcp_path:
                config.append("align_method: auto")
            else:
                config.append("align_method: orientation_prior")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment
            else:
                config.append("local_bundle_radius: 0")

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                if not args.force_gps:
                    config.append("bundle_use_gps: no")
                io.copy(gcp_path, self.path("gcp_list.txt"))

            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))

            # We impose our own reference_lla
            if reconstruction.is_georeferenced():
                self.write_reference_lla(
                    reconstruction.georef.utm_east_offset,
                    reconstruction.georef.utm_north_offset,
                    reconstruction.georef.proj4())
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" %
                            list_path)