コード例 #1
0
ファイル: osfm.py プロジェクト: MobileScientists/ODM
    def reconstruct(self, rerun=False):
        tracks_file = os.path.join(self.opensfm_project_path, 'tracks.csv')
        reconstruction_file = os.path.join(self.opensfm_project_path,
                                           'reconstruction.json')

        if not io.file_exists(tracks_file) or rerun:
            self.run('create_tracks')
        else:
            log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' %
                            tracks_file)

        if not io.file_exists(reconstruction_file) or rerun:
            self.run('reconstruct')
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM reconstruction file in: %s' %
                reconstruction_file)

        # Check that a reconstruction file has been created
        if not self.reconstructed():
            raise system.ExitException(
                "The program could not process this dataset using the current settings. "
                "Check that the images have enough overlap, "
                "that there are enough recognizable features "
                "and that the images are in focus. "
                "You could also try to increase the --min-num-features parameter."
                "The program will now exit.")
コード例 #2
0
    def __init__(self, nodeUrl, rolling_shutter=False, rerun=False):
        self.node = Node.from_url(nodeUrl)
        self.params = {
            'tasks': [],
            'threads': [],
            'rolling_shutter': rolling_shutter,
            'rerun': rerun
        }
        self.node_online = True

        log.ODM_INFO("LRE: Initializing using cluster node %s:%s" %
                     (self.node.host, self.node.port))
        try:
            info = self.node.info()
            log.ODM_INFO("LRE: Node is online and running %s version %s" %
                         (info.engine, info.engine_version))
        except exceptions.NodeConnectionError:
            log.ODM_WARNING(
                "LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally."
            )
            self.node_online = False
        except Exception as e:
            raise system.ExitException(
                "LRE: An unexpected problem happened while opening the node connection: %s"
                % str(e))
コード例 #3
0
    def reconstruct(self, rolling_shutter_correct=False, rerun=False):
        reconstruction_file = os.path.join(self.opensfm_project_path,
                                           'reconstruction.json')
        if not io.file_exists(reconstruction_file) or rerun:
            self.run('reconstruct')
            self.check_merge_partial_reconstructions()
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM reconstruction file in: %s' %
                reconstruction_file)

        # Check that a reconstruction file has been created
        if not self.reconstructed():
            raise system.ExitException(
                "The program could not process this dataset using the current settings. "
                "Check that the images have enough overlap, "
                "that there are enough recognizable features "
                "and that the images are in focus. "
                "You could also try to increase the --min-num-features parameter."
                "The program will now exit.")

        if rolling_shutter_correct:
            rs_file = self.path('rs_done.txt')

            if not io.file_exists(rs_file) or rerun:
                self.run('rs_correct')

                log.ODM_INFO("Re-running the reconstruction pipeline")

                self.match_features(True)
                self.create_tracks(True)
                self.reconstruct(rolling_shutter_correct=False, rerun=True)

                self.touch(rs_file)
            else:
                log.ODM_WARNING("Rolling shutter correction already applied")
コード例 #4
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            raise system.ExitException(
                'Not enough photos in photos array to start OpenSfM')

        octx = OSFMContext(tree.opensfm)
        octx.setup(args,
                   tree.dataset_raw,
                   reconstruction=reconstruction,
                   rerun=self.rerun())
        octx.photos_to_metadata(photos, self.rerun())
        self.update_progress(20)
        octx.feature_matching(self.rerun())
        self.update_progress(30)
        octx.reconstruct(self.rerun())
        octx.extract_cameras(tree.path("cameras.json"), self.rerun())
        self.update_progress(70)

        def cleanup_disk_space():
            if args.optimize_disk_space:
                for folder in ["features", "matches", "reports"]:
                    folder_path = octx.path(folder)
                    if os.path.exists(folder_path):
                        if os.path.islink(folder_path):
                            os.unlink(folder_path)
                        else:
                            shutil.rmtree(folder_path)

        # If we find a special flag file for split/merge we stop right here
        if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")):
            log.ODM_INFO("Stopping OpenSfM early because we found: %s" %
                         octx.path("split_merge_stop_at_reconstruction.txt"))
            self.next_stage = None
            cleanup_disk_space()
            return

        # Stats are computed in the local CRS (before geoprojection)
        if not args.skip_report:

            # TODO: this will fail to compute proper statistics if
            # the pipeline is run with --skip-report and is subsequently
            # rerun without --skip-report a --rerun-* parameter (due to the reconstruction.json file)
            # being replaced below. It's an isolated use case.

            octx.export_stats(self.rerun())

        self.update_progress(75)

        # We now switch to a geographic CRS
        if reconstruction.is_georeferenced() and (not io.file_exists(
                tree.opensfm_topocentric_reconstruction) or self.rerun()):
            octx.run(
                'export_geocoords --reconstruction --proj "%s" --offset-x %s --offset-y %s'
                % (reconstruction.georef.proj4(),
                   reconstruction.georef.utm_east_offset,
                   reconstruction.georef.utm_north_offset))
            shutil.move(tree.opensfm_reconstruction,
                        tree.opensfm_topocentric_reconstruction)
            shutil.move(tree.opensfm_geocoords_reconstruction,
                        tree.opensfm_reconstruction)
        else:
            log.ODM_WARNING("Will skip exporting %s" %
                            tree.opensfm_geocoords_reconstruction)

        self.update_progress(80)

        updated_config_flag_file = octx.path('updated_config.txt')

        # Make sure it's capped by the depthmap-resolution arg,
        # since the undistorted images are used for MVS
        outputs['undist_image_max_size'] = max(
            gsd.image_max_size(photos,
                               args.orthophoto_resolution,
                               tree.opensfm_reconstruction,
                               ignore_gsd=args.ignore_gsd,
                               has_gcp=reconstruction.has_gcp()),
            get_depthmap_resolution(args, photos))

        if not io.file_exists(updated_config_flag_file) or self.rerun():
            octx.update_config({
                'undistorted_image_max_size':
                outputs['undist_image_max_size']
            })
            octx.touch(updated_config_flag_file)

        # Undistorted images will be used for texturing / MVS

        alignment_info = None
        primary_band_name = None
        largest_photo = None
        undistort_pipeline = []

        def undistort_callback(shot_id, image):
            for func in undistort_pipeline:
                image = func(shot_id, image)
            return image

        def resize_thermal_images(shot_id, image):
            photo = reconstruction.get_photo(shot_id)
            if photo.is_thermal():
                return thermal.resize_to_match(image, largest_photo)
            else:
                return image

        def radiometric_calibrate(shot_id, image):
            photo = reconstruction.get_photo(shot_id)
            if photo.is_thermal():
                return thermal.dn_to_temperature(photo, image,
                                                 tree.dataset_raw)
            else:
                return multispectral.dn_to_reflectance(
                    photo,
                    image,
                    use_sun_sensor=args.radiometric_calibration ==
                    "camera+sun")

        def align_to_primary_band(shot_id, image):
            photo = reconstruction.get_photo(shot_id)

            # No need to align if requested by user
            if args.skip_band_alignment:
                return image

            # No need to align primary
            if photo.band_name == primary_band_name:
                return image

            ainfo = alignment_info.get(photo.band_name)
            if ainfo is not None:
                return multispectral.align_image(image, ainfo['warp_matrix'],
                                                 ainfo['dimension'])
            else:
                log.ODM_WARNING(
                    "Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected."
                    % (shot_id))
                return image

        if reconstruction.multi_camera:
            largest_photo = find_largest_photo(photos)
            undistort_pipeline.append(resize_thermal_images)

        if args.radiometric_calibration != "none":
            undistort_pipeline.append(radiometric_calibrate)

        image_list_override = None

        if reconstruction.multi_camera:

            # Undistort only secondary bands
            image_list_override = [
                os.path.join(tree.dataset_raw, p.filename) for p in photos
            ]  # if p.band_name.lower() != primary_band_name.lower()

            # We backup the original reconstruction.json, tracks.csv
            # then we augment them by duplicating the primary band
            # camera shots with each band, so that exports, undistortion,
            # etc. include all bands
            # We finally restore the original files later

            added_shots_file = octx.path('added_shots_done.txt')
            s2p, p2s = None, None

            if not io.file_exists(added_shots_file) or self.rerun():
                primary_band_name = multispectral.get_primary_band_name(
                    reconstruction.multi_camera, args.primary_band)
                s2p, p2s = multispectral.compute_band_maps(
                    reconstruction.multi_camera, primary_band_name)

                if not args.skip_band_alignment:
                    alignment_info = multispectral.compute_alignment_matrices(
                        reconstruction.multi_camera,
                        primary_band_name,
                        tree.dataset_raw,
                        s2p,
                        p2s,
                        max_concurrency=args.max_concurrency)
                else:
                    log.ODM_WARNING("Skipping band alignment")
                    alignment_info = {}

                log.ODM_INFO("Adding shots to reconstruction")

                octx.backup_reconstruction()
                octx.add_shots_to_reconstruction(p2s)
                octx.touch(added_shots_file)

            undistort_pipeline.append(align_to_primary_band)

        octx.convert_and_undistort(self.rerun(), undistort_callback,
                                   image_list_override)

        self.update_progress(95)

        if reconstruction.multi_camera:
            octx.restore_reconstruction_backup()

            # Undistort primary band and write undistorted
            # reconstruction.json, tracks.csv
            octx.convert_and_undistort(self.rerun(),
                                       undistort_callback,
                                       runId='primary')

        if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun():
            octx.run('export_visualsfm --points')
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM NVM reconstruction file in: %s' %
                tree.opensfm_reconstruction_nvm)

        if reconstruction.multi_camera:
            log.ODM_INFO("Multiple bands found")

            # Write NVM files for the various bands
            for band in reconstruction.multi_camera:
                nvm_file = octx.path(
                    "undistorted",
                    "reconstruction_%s.nvm" % band['name'].lower())

                if not io.file_exists(nvm_file) or self.rerun():
                    img_map = {}

                    if primary_band_name is None:
                        primary_band_name = multispectral.get_primary_band_name(
                            reconstruction.multi_camera, args.primary_band)
                    if p2s is None:
                        s2p, p2s = multispectral.compute_band_maps(
                            reconstruction.multi_camera, primary_band_name)

                    for fname in p2s:

                        # Primary band maps to itself
                        if band['name'] == primary_band_name:
                            img_map[add_image_format_extension(
                                fname, 'tif')] = add_image_format_extension(
                                    fname, 'tif')
                        else:
                            band_filename = next(
                                (p.filename for p in p2s[fname]
                                 if p.band_name == band['name']), None)

                            if band_filename is not None:
                                img_map[add_image_format_extension(
                                    fname,
                                    'tif')] = add_image_format_extension(
                                        band_filename, 'tif')
                            else:
                                log.ODM_WARNING(
                                    "Cannot find %s band equivalent for %s" %
                                    (band, fname))

                    nvm.replace_nvm_images(tree.opensfm_reconstruction_nvm,
                                           img_map, nvm_file)
                else:
                    log.ODM_WARNING("Found existing NVM file %s" % nvm_file)

        # Skip dense reconstruction if necessary and export
        # sparse reconstruction instead
        if args.fast_orthophoto:
            output_file = octx.path('reconstruction.ply')

            if not io.file_exists(output_file) or self.rerun():
                octx.run('export_ply --no-cameras --point-num-views')
            else:
                log.ODM_WARNING("Found a valid PLY reconstruction in %s" %
                                output_file)

        cleanup_disk_space()

        if args.optimize_disk_space:
            os.remove(octx.path("tracks.csv"))
            if io.file_exists(octx.recon_backup_file()):
                os.remove(octx.recon_backup_file())

            if io.dir_exists(octx.path("undistorted", "depthmaps")):
                files = glob.glob(
                    octx.path("undistorted", "depthmaps", "*.npz"))
                for f in files:
                    os.remove(f)

            # Keep these if using OpenMVS
            if args.fast_orthophoto:
                files = [
                    octx.path("undistorted", "tracks.csv"),
                    octx.path("undistorted", "reconstruction.json")
                ]
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
コード例 #5
0
ファイル: dataset.py プロジェクト: MobileScientists/ODM
    def process(self, args, outputs):
        outputs['start_time'] = system.now_raw()
        tree = types.ODM_Tree(args.project_path, args.gcp, args.geo)
        outputs['tree'] = tree

        if args.time and io.file_exists(tree.benchmarking):
            # Delete the previously made file
            os.remove(tree.benchmarking)
            with open(tree.benchmarking, 'a') as b:
                b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores))
    
        # check if the image filename is supported
        def valid_image_filename(filename):
            (pathfn, ext) = os.path.splitext(filename)
            return ext.lower() in context.supported_extensions and pathfn[-5:] != "_mask"

        # Get supported images from dir
        def get_images(in_dir):
            log.ODM_DEBUG(in_dir)
            entries = os.listdir(in_dir)
            valid, rejects = [], []
            for f in entries:
                if valid_image_filename(f):
                    valid.append(f)
                else:
                    rejects.append(f)
            return valid, rejects

        def find_mask(photo_path, masks):
            (pathfn, ext) = os.path.splitext(os.path.basename(photo_path))
            k = "{}_mask".format(pathfn)
            
            mask = masks.get(k)
            if mask:
                # Spaces are not supported due to OpenSfM's mask_list.txt format reqs
                if not " " in mask:
                    return mask
                else:
                    log.ODM_WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask))

        # get images directory
        images_dir = tree.dataset_raw

        # define paths and create working directories
        system.mkdir_p(tree.odm_georeferencing)

        log.ODM_INFO('Loading dataset from: %s' % images_dir)

        # check if we rerun cell or not
        images_database_file = os.path.join(tree.root_path, 'images.json')
        if not io.file_exists(images_database_file) or self.rerun():
            if not os.path.exists(images_dir):
                raise system.ExitException("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path))

            files, rejects = get_images(images_dir)
            if files:
                # create ODMPhoto list
                path_files = [os.path.join(images_dir, f) for f in files]

                # Lookup table for masks
                masks = {}
                for r in rejects:
                    (p, ext) = os.path.splitext(r)
                    if p[-5:] == "_mask" and ext.lower() in context.supported_extensions:
                        masks[p] = r

                photos = []
                with open(tree.dataset_list, 'w') as dataset_list:
                    log.ODM_INFO("Loading %s images" % len(path_files))
                    for f in path_files:
                        try:
                            p = types.ODM_Photo(f)
                            p.set_mask(find_mask(f, masks))
                            photos.append(p)
                            dataset_list.write(photos[-1].filename + '\n')
                        except PhotoCorruptedException:
                            log.ODM_WARNING("%s seems corrupted and will not be used" % os.path.basename(f))

                # Check if a geo file is available
                if tree.odm_geo_file is not None and os.path.isfile(tree.odm_geo_file):
                    log.ODM_INFO("Found image geolocation file")
                    gf = GeoFile(tree.odm_geo_file)
                    updated = 0
                    for p in photos:
                        entry = gf.get_entry(p.filename)
                        if entry:
                            p.update_with_geo_entry(entry)
                            p.compute_opk()
                            updated += 1
                    log.ODM_INFO("Updated %s image positions" % updated)

                # GPSDOP override if we have GPS accuracy information (such as RTK)
                if 'gps_accuracy_is_set' in args:
                    log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy)

                    for p in photos:
                        p.override_gps_dop(args.gps_accuracy)
                
                # Override projection type
                if args.camera_lens != "auto":
                    log.ODM_INFO("Setting camera lens to %s for all images" % args.camera_lens)

                    for p in photos:
                        p.override_camera_projection(args.camera_lens)

                # Automatic sky removal
                if args.sky_removal:
                    # For each image that :
                    #  - Doesn't already have a mask, AND
                    #  - Is not nadir (or if orientation info is missing), AND
                    #  - There are no spaces in the image filename (OpenSfM requirement)
                    # Automatically generate a sky mask
                    
                    # Generate list of sky images
                    sky_images = []
                    for p in photos:
                        if p.mask is None and (p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename):
                            sky_images.append({'file': os.path.join(images_dir, p.filename), 'p': p})

                    if len(sky_images) > 0:
                        log.ODM_INFO("Automatically generating sky masks for %s images" % len(sky_images))
                        model = ai.get_model("skyremoval", "https://github.com/OpenDroneMap/SkyRemoval/releases/download/v1.0.5/model.zip", "v1.0.5")
                        if model is not None:
                            sf = SkyFilter(model=model)

                            def parallel_sky_filter(item):
                                try:
                                    mask_file = sf.run_img(item['file'], images_dir)

                                    # Check and set
                                    if mask_file is not None and os.path.isfile(mask_file):
                                        item['p'].set_mask(os.path.basename(mask_file))
                                        log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
                                    else:
                                        log.ODM_WARNING("Cannot generate mask for %s" % img)
                                except Exception as e:
                                    log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e)))

                            parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency)

                            log.ODM_INFO("Sky masks generation completed!")
                        else:
                            log.ODM_WARNING("Cannot load AI model (you might need to be connected to the internet?)")
                    else:
                        log.ODM_INFO("No sky masks will be generated (masks already provided, or images are nadir)")

                # End sky removal

                # Save image database for faster restart
                save_images_database(photos, images_database_file)
            else:
                raise system.ExitException('Not enough supported images in %s' % images_dir)
        else:
            # We have an images database, just load it
            photos = load_images_database(images_database_file)

        log.ODM_INFO('Found %s usable images' % len(photos))
        log.logger.log_json_images(len(photos))

        # Create reconstruction object
        reconstruction = types.ODM_Reconstruction(photos)
        
        if tree.odm_georeferencing_gcp and not args.use_exif:
            reconstruction.georeference_with_gcp(tree.odm_georeferencing_gcp,
                                                 tree.odm_georeferencing_coords,
                                                 tree.odm_georeferencing_gcp_utm,
                                                 tree.odm_georeferencing_model_txt_geo,
                                                 rerun=self.rerun())
        else:
            reconstruction.georeference_with_gps(tree.dataset_raw, 
                                                 tree.odm_georeferencing_coords, 
                                                 tree.odm_georeferencing_model_txt_geo,
                                                 rerun=self.rerun())
        
        reconstruction.save_proj_srs(os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj))
        outputs['reconstruction'] = reconstruction

        # Try to load boundaries
        if args.boundary:
            if reconstruction.is_georeferenced():
                outputs['boundary'] = boundary.load_boundary(args.boundary, reconstruction.get_proj_srs())
            else:
                args.boundary = None
                log.ODM_WARNING("Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)")

        # If sfm-algorithm is triangulation, check if photos have OPK
        if args.sfm_algorithm == 'triangulation':
            for p in photos:
                if not p.has_opk():
                    log.ODM_WARNING("No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename)
                    args.sfm_algorithm = 'incremental'
                    break
コード例 #6
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        if outputs['large']:
            if not os.path.exists(tree.submodels_path):
                raise system.ExitException(
                    "We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?"
                    % tree.submodels_path)

            # Merge point clouds
            if args.merge in ['all', 'pointcloud']:
                if not io.file_exists(
                        tree.odm_georeferencing_model_laz) or self.rerun():
                    all_point_clouds = get_submodel_paths(
                        tree.submodels_path, "odm_georeferencing",
                        "odm_georeferenced_model.laz")

                    try:
                        point_cloud.merge(all_point_clouds,
                                          tree.odm_georeferencing_model_laz,
                                          rerun=self.rerun())
                        point_cloud.post_point_cloud_steps(
                            args, tree, self.rerun())
                    except Exception as e:
                        log.ODM_WARNING(
                            "Could not merge point cloud: %s (skipping)" %
                            str(e))
                else:
                    log.ODM_WARNING("Found merged point cloud in %s" %
                                    tree.odm_georeferencing_model_laz)

            self.update_progress(25)

            # Merge crop bounds
            merged_bounds_file = os.path.join(
                tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
            if not io.file_exists(merged_bounds_file) or self.rerun():
                all_bounds = get_submodel_paths(
                    tree.submodels_path, 'odm_georeferencing',
                    'odm_georeferenced_model.bounds.gpkg')
                log.ODM_INFO("Merging all crop bounds: %s" % all_bounds)
                if len(all_bounds) > 0:
                    # Calculate a new crop area
                    # based on the convex hull of all crop areas of all submodels
                    # (without a buffer, otherwise we are double-cropping)
                    Cropper.merge_bounds(all_bounds, merged_bounds_file, 0)
                else:
                    log.ODM_WARNING("No bounds found for any submodel.")

            # Merge orthophotos
            if args.merge in ['all', 'orthophoto']:
                if not io.dir_exists(tree.odm_orthophoto):
                    system.mkdir_p(tree.odm_orthophoto)

                if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun():
                    all_orthos_and_ortho_cuts = get_all_submodel_paths(
                        tree.submodels_path,
                        os.path.join("odm_orthophoto",
                                     "odm_orthophoto_feathered.tif"),
                        os.path.join("odm_orthophoto",
                                     "odm_orthophoto_cut.tif"),
                    )

                    if len(all_orthos_and_ortho_cuts) > 1:
                        log.ODM_INFO(
                            "Found %s submodels with valid orthophotos and cutlines"
                            % len(all_orthos_and_ortho_cuts))

                        # TODO: histogram matching via rasterio
                        # currently parts have different color tones

                        if io.file_exists(tree.odm_orthophoto_tif):
                            os.remove(tree.odm_orthophoto_tif)

                        orthophoto_vars = orthophoto.get_orthophoto_vars(args)
                        orthophoto.merge(all_orthos_and_ortho_cuts,
                                         tree.odm_orthophoto_tif,
                                         orthophoto_vars)
                        orthophoto.post_orthophoto_steps(
                            args, merged_bounds_file, tree.odm_orthophoto_tif,
                            tree.orthophoto_tiles)
                    elif len(all_orthos_and_ortho_cuts) == 1:
                        # Simply copy
                        log.ODM_WARNING(
                            "A single orthophoto/cutline pair was found between all submodels."
                        )
                        shutil.copyfile(all_orthos_and_ortho_cuts[0][0],
                                        tree.odm_orthophoto_tif)
                    else:
                        log.ODM_WARNING(
                            "No orthophoto/cutline pairs were found in any of the submodels. No orthophoto will be generated."
                        )
                else:
                    log.ODM_WARNING("Found merged orthophoto in %s" %
                                    tree.odm_orthophoto_tif)

            self.update_progress(75)

            # Merge DEMs
            def merge_dems(dem_filename, human_name):
                if not io.dir_exists(tree.path('odm_dem')):
                    system.mkdir_p(tree.path('odm_dem'))

                dem_file = tree.path("odm_dem", dem_filename)
                if not io.file_exists(dem_file) or self.rerun():
                    all_dems = get_submodel_paths(tree.submodels_path,
                                                  "odm_dem", dem_filename)
                    log.ODM_INFO("Merging %ss" % human_name)

                    # Merge
                    dem_vars = utils.get_dem_vars(args)
                    eu_map_source = None  # Default

                    # Use DSM's euclidean map for DTMs
                    # (requires the DSM to be computed)
                    if human_name == "DTM":
                        eu_map_source = "dsm"

                    euclidean_merge_dems(all_dems,
                                         dem_file,
                                         dem_vars,
                                         euclidean_map_source=eu_map_source)

                    if io.file_exists(dem_file):
                        # Crop
                        if args.crop > 0 or args.boundary:
                            Cropper.crop(
                                merged_bounds_file,
                                dem_file,
                                dem_vars,
                                keep_original=not args.optimize_disk_space)
                        log.ODM_INFO("Created %s" % dem_file)

                        if args.tiles:
                            generate_dem_tiles(
                                dem_file,
                                tree.path("%s_tiles" % human_name.lower()),
                                args.max_concurrency)

                        if args.cog:
                            convert_to_cogeo(dem_file,
                                             max_workers=args.max_concurrency)
                    else:
                        log.ODM_WARNING("Cannot merge %s, %s was not created" %
                                        (human_name, dem_file))

                else:
                    log.ODM_WARNING("Found merged %s in %s" %
                                    (human_name, dem_filename))

            if args.merge in ['all', 'dem'] and args.dsm:
                merge_dems("dsm.tif", "DSM")

            if args.merge in ['all', 'dem'] and args.dtm:
                merge_dems("dtm.tif", "DTM")

            self.update_progress(95)

            # Merge reports
            if not io.dir_exists(tree.odm_report):
                system.mkdir_p(tree.odm_report)

            geojson_shots = tree.path(tree.odm_report, "shots.geojson")
            if not io.file_exists(geojson_shots) or self.rerun():
                geojson_shots_files = get_submodel_paths(
                    tree.submodels_path, "odm_report", "shots.geojson")
                log.ODM_INFO("Merging %s shots.geojson files" %
                             len(geojson_shots_files))
                merge_geojson_shots(geojson_shots_files, geojson_shots)
            else:
                log.ODM_WARNING("Found merged shots.geojson in %s" %
                                tree.odm_report)

            # Stop the pipeline short by skipping to the postprocess stage.
            # Afterwards, we're done.
            self.next_stage = self.last_stage()
        else:
            log.ODM_INFO("Normal dataset, nothing to merge.")
            self.progress = 0.0
コード例 #7
0
ファイル: openmvs.py プロジェクト: originlake/ODM
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos
        octx = OSFMContext(tree.opensfm)

        if not photos:
            raise system.ExitException('Not enough photos in photos array to start OpenMVS')

        # check if reconstruction was done before
        if not io.file_exists(tree.openmvs_model) or self.rerun():
            if self.rerun():
                if io.dir_exists(tree.openmvs):
                    shutil.rmtree(tree.openmvs)

            # export reconstruction from opensfm
            openmvs_scene_file = os.path.join(tree.openmvs, "scene.mvs")
            if not io.file_exists(openmvs_scene_file) or self.rerun():
                cmd = 'export_openmvs'
                octx.run(cmd)
            else:
                log.ODM_WARNING("Found existing %s" % openmvs_scene_file)
            
            self.update_progress(10)

            depthmaps_dir = os.path.join(tree.openmvs, "depthmaps")

            if io.dir_exists(depthmaps_dir) and self.rerun():
                shutil.rmtree(depthmaps_dir)

            if not io.dir_exists(depthmaps_dir):
                os.mkdir(depthmaps_dir)

            depthmap_resolution = get_depthmap_resolution(args, photos)
            log.ODM_INFO("Depthmap resolution set to: %spx" % depthmap_resolution)

            if outputs["undist_image_max_size"] <= depthmap_resolution:
                resolution_level = 0
            else:
                resolution_level = int(round(math.log(outputs['undist_image_max_size'] / float(depthmap_resolution)) / math.log(2)))

            log.ODM_INFO("Running dense reconstruction. This might take a while.")
            
            log.ODM_INFO("Estimating depthmaps")
            number_views_fuse = 2
            densify_ini_file = os.path.join(tree.openmvs, 'Densify.ini')
            subres_levels = 2 # The number of lower resolutions to process before estimating output resolution depthmap.

            config = [
                " --resolution-level %s" % int(resolution_level),
                '--dense-config-file "%s"' % densify_ini_file,
                "--min-resolution %s" % depthmap_resolution,
                "--max-resolution %s" % int(outputs['undist_image_max_size']),
                "--max-threads %s" % args.max_concurrency,
                "--number-views-fuse %s" % number_views_fuse,
                "--sub-resolution-levels %s" % subres_levels,
                '-w "%s"' % depthmaps_dir, 
                "-v 0"
            ]

            gpu_config = []

            if not has_gpu(args):
                gpu_config.append("--cuda-device -2")

            if args.pc_tile:
                config.append("--fusion-mode 1")

            extra_config = []
            
            if not args.pc_geometric:
                extra_config.append("--geometric-iters 0")
            
            masks_dir = os.path.join(tree.opensfm, "undistorted", "masks")
            masks = os.path.exists(masks_dir) and len(os.listdir(masks_dir)) > 0
            if masks:
                extra_config.append("--ignore-mask-label 0")

            sharp = args.pc_geometric
            with open(densify_ini_file, 'w+') as f:
                f.write("Optimize = %s\n" % (7 if sharp else 3))

            def run_densify():
                system.run('"%s" "%s" %s' % (context.omvs_densify_path, 
                                        openmvs_scene_file,
                                        ' '.join(config + gpu_config + extra_config)))

            try:
                run_densify()
            except system.SubprocessException as e:
                # If the GPU was enabled and the program failed,
                # try to run it again without GPU
                if e.errorCode == 1 and len(gpu_config) == 0:
                    log.ODM_WARNING("OpenMVS failed with GPU, is your graphics card driver up to date? Falling back to CPU.")
                    gpu_config.append("--cuda-device -2")
                    run_densify()
                else:
                    raise e

            self.update_progress(85)
            files_to_remove = []
            scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')

            if args.pc_tile:
                log.ODM_INFO("Computing sub-scenes")

                subscene_densify_ini_file = os.path.join(tree.openmvs, 'subscene-config.ini')
                with open(subscene_densify_ini_file, 'w+') as f:
                    f.write("Optimize = 0\n")

                config = [
                    "--sub-scene-area 660000",
                    "--max-threads %s" % args.max_concurrency,
                    '-w "%s"' % depthmaps_dir, 
                    "-v 0",
                ]
                system.run('"%s" "%s" %s' % (context.omvs_densify_path, 
                                        openmvs_scene_file,
                                        ' '.join(config + gpu_config)))
                
                scene_files = glob.glob(os.path.join(tree.openmvs, "scene_[0-9][0-9][0-9][0-9].mvs"))
                if len(scene_files) == 0:
                    raise system.ExitException("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.")

                log.ODM_INFO("Fusing depthmaps for %s scenes" % len(scene_files))
                
                scene_ply_files = []

                for sf in scene_files:
                    p, _ = os.path.splitext(sf)
                    scene_ply_unfiltered = p + "_dense.ply"
                    scene_ply = p + "_dense_dense_filtered.ply"
                    scene_dense_mvs = p + "_dense.mvs"

                    files_to_remove += [scene_ply, sf, scene_dense_mvs, scene_ply_unfiltered]
                    scene_ply_files.append(scene_ply)

                    if not io.file_exists(scene_ply) or self.rerun():
                        # Fuse
                        config = [
                            '--resolution-level %s' % int(resolution_level),
                            '--min-resolution %s' % depthmap_resolution,
                            '--max-resolution %s' % int(outputs['undist_image_max_size']),
                            '--dense-config-file "%s"' % subscene_densify_ini_file,
                            '--number-views-fuse %s' % number_views_fuse,
                            '--max-threads %s' % args.max_concurrency,
                            '-w "%s"' % depthmaps_dir,
                            '-v 0',
                        ]

                        try:
                            system.run('"%s" "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config + gpu_config + extra_config)))

                            # Filter
                            if args.pc_filter > 0:
                                system.run('"%s" "%s" --filter-point-cloud -1 -v 0 %s' % (context.omvs_densify_path, scene_dense_mvs, ' '.join(gpu_config)))
                            else:
                                # Just rename
                                log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_ply_unfiltered, scene_ply))
                                os.rename(scene_ply_unfiltered, scene_ply)
                        except:
                            log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf)

                        if not io.file_exists(scene_ply):
                            scene_ply_files.pop()
                            log.ODM_WARNING("Could not compute PLY for subscene %s" % sf)
                    else:
                        log.ODM_WARNING("Found existing dense scene file %s" % scene_ply)

                # Merge
                log.ODM_INFO("Merging %s scene files" % len(scene_ply_files))
                if len(scene_ply_files) == 0:
                    log.ODM_ERROR("Could not compute dense point cloud (no PLY files available).")
                if len(scene_ply_files) == 1:
                    # Simply rename
                    os.replace(scene_ply_files[0], tree.openmvs_model)
                    log.ODM_INFO("%s --> %s"% (scene_ply_files[0], tree.openmvs_model))
                else:
                    # Merge
                    fast_merge_ply(scene_ply_files, tree.openmvs_model)
            else:
                # Filter all at once
                if args.pc_filter > 0:
                    if os.path.exists(scene_dense):
                        config = [
                            "--filter-point-cloud -1",
                            '-i "%s"' % scene_dense,
                            "-v 0"
                        ]
                        system.run('"%s" %s' % (context.omvs_densify_path, ' '.join(config + gpu_config + extra_config)))
                    else:
                        raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
                else:
                    # Just rename
                    scene_dense_ply = os.path.join(tree.openmvs, 'scene_dense.ply')
                    log.ODM_INFO("Skipped filtering, %s --> %s" % (scene_dense_ply, tree.openmvs_model))
                    os.rename(scene_dense_ply, tree.openmvs_model)

            self.update_progress(95)

            if args.optimize_disk_space:
                files = [scene_dense,
                         os.path.join(tree.openmvs, 'scene_dense.ply'),
                         os.path.join(tree.openmvs, 'scene_dense_dense_filtered.mvs'),
                         octx.path("undistorted", "tracks.csv"),
                         octx.path("undistorted", "reconstruction.json")
                        ] + files_to_remove
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
                shutil.rmtree(depthmaps_dir)
        else:
            log.ODM_WARNING('Found a valid OpenMVS reconstruction file in: %s' %
                            tree.openmvs_model)
コード例 #8
0
ファイル: dataset.py プロジェクト: originlake/ODM
    def process(self, args, outputs):
        outputs['start_time'] = system.now_raw()
        tree = types.ODM_Tree(args.project_path, args.gcp, args.geo)
        outputs['tree'] = tree

        if args.time and io.file_exists(tree.benchmarking):
            # Delete the previously made file
            os.remove(tree.benchmarking)
            with open(tree.benchmarking, 'a') as b:
                b.write(
                    'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n'
                    % (system.now(), context.num_cores))

        # check if the image filename is supported
        def valid_image_filename(filename):
            (pathfn, ext) = os.path.splitext(filename)
            return ext.lower(
            ) in context.supported_extensions and pathfn[-5:] != "_mask"

        # Get supported images from dir
        def get_images(in_dir):
            log.ODM_DEBUG(in_dir)
            entries = os.listdir(in_dir)
            valid, rejects = [], []
            for f in entries:
                if valid_image_filename(f):
                    valid.append(f)
                else:
                    rejects.append(f)
            return valid, rejects

        def find_mask(photo_path, masks):
            (pathfn, ext) = os.path.splitext(os.path.basename(photo_path))
            k = "{}_mask".format(pathfn)

            mask = masks.get(k)
            if mask:
                # Spaces are not supported due to OpenSfM's mask_list.txt format reqs
                if not " " in mask:
                    return mask
                else:
                    log.ODM_WARNING(
                        "Image mask {} has a space. Spaces are currently not supported for image masks."
                        .format(mask))

        # get images directory
        images_dir = tree.dataset_raw

        # define paths and create working directories
        system.mkdir_p(tree.odm_georeferencing)

        log.ODM_INFO('Loading dataset from: %s' % images_dir)

        # check if we rerun cell or not
        images_database_file = os.path.join(tree.root_path, 'images.json')
        if not io.file_exists(images_database_file) or self.rerun():
            if not os.path.exists(images_dir):
                raise system.ExitException(
                    "There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s"
                    % (images_dir, args.project_path))

            files, rejects = get_images(images_dir)
            if files:
                # create ODMPhoto list
                path_files = [os.path.join(images_dir, f) for f in files]

                # Lookup table for masks
                masks = {}
                for r in rejects:
                    (p, ext) = os.path.splitext(r)
                    if p[-5:] == "_mask" and ext.lower(
                    ) in context.supported_extensions:
                        masks[p] = r

                photos = []
                with open(tree.dataset_list, 'w') as dataset_list:
                    log.ODM_INFO("Loading %s images" % len(path_files))
                    for f in path_files:
                        try:
                            p = types.ODM_Photo(f)
                            p.set_mask(find_mask(f, masks))
                            photos += [p]
                            dataset_list.write(photos[-1].filename + '\n')
                        except PhotoCorruptedException:
                            log.ODM_WARNING(
                                "%s seems corrupted and will not be used" %
                                os.path.basename(f))

                # Check if a geo file is available
                if tree.odm_geo_file is not None and os.path.isfile(
                        tree.odm_geo_file):
                    log.ODM_INFO("Found image geolocation file")
                    gf = GeoFile(tree.odm_geo_file)
                    updated = 0
                    for p in photos:
                        entry = gf.get_entry(p.filename)
                        if entry:
                            p.update_with_geo_entry(entry)
                            p.compute_opk()
                            updated += 1
                    log.ODM_INFO("Updated %s image positions" % updated)

                # GPSDOP override if we have GPS accuracy information (such as RTK)
                if 'gps_accuracy_is_set' in args:
                    log.ODM_INFO("Forcing GPS DOP to %s for all images" %
                                 args.gps_accuracy)

                    for p in photos:
                        p.override_gps_dop(args.gps_accuracy)

                # Override projection type
                if args.camera_lens != "auto":
                    log.ODM_INFO("Setting camera lens to %s for all images" %
                                 args.camera_lens)

                    for p in photos:
                        p.override_camera_projection(args.camera_lens)

                # Save image database for faster restart
                save_images_database(photos, images_database_file)
            else:
                raise system.ExitException(
                    'Not enough supported images in %s' % images_dir)
        else:
            # We have an images database, just load it
            photos = load_images_database(images_database_file)

        log.ODM_INFO('Found %s usable images' % len(photos))
        log.logger.log_json_images(len(photos))

        # Create reconstruction object
        reconstruction = types.ODM_Reconstruction(photos)

        if tree.odm_georeferencing_gcp and not args.use_exif:
            reconstruction.georeference_with_gcp(
                tree.odm_georeferencing_gcp,
                tree.odm_georeferencing_coords,
                tree.odm_georeferencing_gcp_utm,
                tree.odm_georeferencing_model_txt_geo,
                rerun=self.rerun())
        else:
            reconstruction.georeference_with_gps(
                tree.dataset_raw,
                tree.odm_georeferencing_coords,
                tree.odm_georeferencing_model_txt_geo,
                rerun=self.rerun())

        reconstruction.save_proj_srs(
            os.path.join(tree.odm_georeferencing,
                         tree.odm_georeferencing_proj))
        outputs['reconstruction'] = reconstruction

        # Try to load boundaries
        if args.boundary:
            if reconstruction.is_georeferenced():
                outputs['boundary'] = boundary.load_boundary(
                    args.boundary, reconstruction.get_proj_srs())
            else:
                args.boundary = None
                log.ODM_WARNING(
                    "Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)"
                )

        # If sfm-algorithm is triangulation, check if photos have OPK
        if args.sfm_algorithm == 'triangulation':
            for p in photos:
                if not p.has_opk():
                    log.ODM_WARNING(
                        "No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental"
                        % p.filename)
                    args.sfm_algorithm = 'incremental'
                    break
コード例 #9
0
    def process(self, args, outputs):
        outputs['start_time'] = system.now_raw()
        tree = types.ODM_Tree(args.project_path, args.gcp, args.geo)
        outputs['tree'] = tree

        if args.time and io.file_exists(tree.benchmarking):
            # Delete the previously made file
            os.remove(tree.benchmarking)
            with open(tree.benchmarking, 'a') as b:
                b.write(
                    'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n'
                    % (system.now(), context.num_cores))

        # check if the image filename is supported
        def valid_image_filename(filename):
            (pathfn, ext) = os.path.splitext(filename)
            return ext.lower(
            ) in context.supported_extensions and pathfn[-5:] != "_mask"

        # Get supported images from dir
        def get_images(in_dir):
            log.ODM_DEBUG(in_dir)
            entries = os.listdir(in_dir)
            valid, rejects = [], []
            for f in entries:
                if valid_image_filename(f):
                    valid.append(f)
                else:
                    rejects.append(f)
            return valid, rejects

        def find_mask(photo_path, masks):
            (pathfn, ext) = os.path.splitext(os.path.basename(photo_path))
            k = "{}_mask".format(pathfn)

            mask = masks.get(k)
            if mask:
                # Spaces are not supported due to OpenSfM's mask_list.txt format reqs
                if not " " in mask:
                    return mask
                else:
                    log.ODM_WARNING(
                        "Image mask {} has a space. Spaces are currently not supported for image masks."
                        .format(mask))

        # get images directory
        images_dir = tree.dataset_raw

        # define paths and create working directories
        system.mkdir_p(tree.odm_georeferencing)

        log.ODM_INFO('Loading dataset from: %s' % images_dir)

        # check if we rerun cell or not
        images_database_file = os.path.join(tree.root_path, 'images.json')
        if not io.file_exists(images_database_file) or self.rerun():
            if not os.path.exists(images_dir):
                raise system.ExitException(
                    "There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s"
                    % (images_dir, args.project_path))

            files, rejects = get_images(images_dir)
            if files:
                # create ODMPhoto list
                path_files = [os.path.join(images_dir, f) for f in files]

                # Lookup table for masks
                masks = {}
                for r in rejects:
                    (p, ext) = os.path.splitext(r)
                    if p[-5:] == "_mask" and ext.lower(
                    ) in context.supported_extensions:
                        masks[p] = r

                photos = []
                with open(tree.dataset_list, 'w') as dataset_list:
                    log.ODM_INFO("Loading %s images" % len(path_files))
                    for f in path_files:
                        p = types.ODM_Photo(f)
                        p.set_mask(find_mask(f, masks))
                        photos += [p]
                        dataset_list.write(photos[-1].filename + '\n')

                # Check if a geo file is available
                if tree.odm_geo_file is not None and os.path.exists(
                        tree.odm_geo_file):
                    log.ODM_INFO("Found image geolocation file")
                    gf = GeoFile(tree.odm_geo_file)
                    updated = 0
                    for p in photos:
                        entry = gf.get_entry(p.filename)
                        if entry:
                            p.update_with_geo_entry(entry)
                            updated += 1
                    log.ODM_INFO("Updated %s image positions" % updated)

                # Save image database for faster restart
                save_images_database(photos, images_database_file)
            else:
                raise system.ExitException(
                    'Not enough supported images in %s' % images_dir)
        else:
            # We have an images database, just load it
            photos = load_images_database(images_database_file)

        log.ODM_INFO('Found %s usable images' % len(photos))
        log.logger.log_json_images(len(photos))

        # Create reconstruction object
        reconstruction = types.ODM_Reconstruction(photos)

        if tree.odm_georeferencing_gcp and not args.use_exif:
            reconstruction.georeference_with_gcp(
                tree.odm_georeferencing_gcp,
                tree.odm_georeferencing_coords,
                tree.odm_georeferencing_gcp_utm,
                tree.odm_georeferencing_model_txt_geo,
                rerun=self.rerun())
        else:
            reconstruction.georeference_with_gps(
                tree.dataset_raw,
                tree.odm_georeferencing_coords,
                tree.odm_georeferencing_model_txt_geo,
                rerun=self.rerun())

        reconstruction.save_proj_srs(
            os.path.join(tree.odm_georeferencing,
                         tree.odm_georeferencing_proj))
        outputs['reconstruction'] = reconstruction
コード例 #10
0
ファイル: odm_filterpoints.py プロジェクト: originlake/ODM
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        if not os.path.exists(tree.odm_filterpoints):
            system.mkdir_p(tree.odm_filterpoints)

        inputPointCloud = ""

        # check if reconstruction was done before
        if not io.file_exists(tree.filtered_point_cloud) or self.rerun():
            if args.fast_orthophoto:
                inputPointCloud = os.path.join(tree.opensfm,
                                               'reconstruction.ply')
            else:
                inputPointCloud = tree.openmvs_model

            # Check if we need to compute boundary
            if args.auto_boundary:
                if reconstruction.is_georeferenced():
                    if not 'boundary' in outputs:
                        avg_gsd = gsd.opensfm_reconstruction_average_gsd(
                            tree.opensfm_reconstruction)
                        if avg_gsd is not None:
                            outputs['boundary'] = compute_boundary_from_shots(
                                tree.opensfm_reconstruction, avg_gsd * 20,
                                reconstruction.get_proj_offset()
                            )  # 20 is arbitrary
                            if outputs['boundary'] is None:
                                log.ODM_WANING(
                                    "Cannot compute boundary from camera shots"
                                )
                        else:
                            log.ODM_WARNING(
                                "Cannot compute boundary (GSD cannot be estimated)"
                            )
                    else:
                        log.ODM_WARNING(
                            "--auto-boundary set but so is --boundary, will use --boundary"
                        )
                else:
                    log.ODM_WARNING(
                        "Not a georeferenced reconstruction, will ignore --auto-boundary"
                    )

            point_cloud.filter(inputPointCloud,
                               tree.filtered_point_cloud,
                               standard_deviation=args.pc_filter,
                               sample_radius=args.pc_sample,
                               boundary=boundary_offset(
                                   outputs.get('boundary'),
                                   reconstruction.get_proj_offset()),
                               verbose=args.verbose,
                               max_concurrency=args.max_concurrency)

            # Quick check
            info = point_cloud.ply_info(tree.filtered_point_cloud)
            if info["vertex_count"] == 0:
                extra_msg = ''
                if 'boundary' in outputs:
                    extra_msg = '. Also, since you used a boundary setting, make sure that the boundary polygon you specified covers the reconstruction area correctly.'
                raise system.ExitException(
                    "Uh oh! We ended up with an empty point cloud. This means that the reconstruction did not succeed. Have you followed best practices for data acquisition? See https://docs.opendronemap.org/flying/%s"
                    % extra_msg)
        else:
            log.ODM_WARNING('Found a valid point cloud file in: %s' %
                            tree.filtered_point_cloud)

        if args.optimize_disk_space and inputPointCloud:
            if os.path.isfile(inputPointCloud):
                os.remove(inputPointCloud)
コード例 #11
0
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos
        octx = OSFMContext(tree.opensfm)

        if not photos:
            raise system.ExitException('Not enough photos in photos array to start OpenMVS')

        # check if reconstruction was done before
        if not io.file_exists(tree.openmvs_model) or self.rerun():
            if self.rerun():
                if io.dir_exists(tree.openmvs):
                    shutil.rmtree(tree.openmvs)

            # export reconstruction from opensfm
            openmvs_scene_file = os.path.join(tree.openmvs, "scene.mvs")
            if not io.file_exists(openmvs_scene_file) or self.rerun():
                cmd = 'export_openmvs'
                octx.run(cmd)
            else:
                log.ODM_WARNING("Found existing %s" % openmvs_scene_file)
            
            self.update_progress(10)

            depthmaps_dir = os.path.join(tree.openmvs, "depthmaps")

            if io.dir_exists(depthmaps_dir) and self.rerun():
                shutil.rmtree(depthmaps_dir)

            if not io.dir_exists(depthmaps_dir):
                os.mkdir(depthmaps_dir)
            
            depthmap_resolution = get_depthmap_resolution(args, photos)
            if outputs["undist_image_max_size"] <= depthmap_resolution:
                resolution_level = 0
            else:
                resolution_level = int(round(math.log(outputs['undist_image_max_size'] / float(depthmap_resolution)) / math.log(2)))

            log.ODM_INFO("Running dense reconstruction. This might take a while.")
            
            log.ODM_INFO("Estimating depthmaps")

            densify_ini_file = os.path.join(tree.openmvs, 'config.ini')
            with open(densify_ini_file, 'w+') as f:
                f.write("Optimize = 0\n") # Disable depth-maps re-filtering
            
            config = [
                " --resolution-level %s" % int(resolution_level),
	            "--min-resolution %s" % depthmap_resolution,
                "--max-resolution %s" % int(outputs['undist_image_max_size']),
                "--max-threads %s" % args.max_concurrency,
                "--number-views-fuse 2",
                '-w "%s"' % depthmaps_dir, 
                "-v 0"
            ]

            if args.pc_tile:
                config.append("--fusion-mode 1")
            
            if not args.pc_geometric:
                config.append("--geometric-iters 0")

            system.run('%s "%s" %s' % (context.omvs_densify_path, 
                                       openmvs_scene_file,
                                      ' '.join(config)))

            self.update_progress(85)
            files_to_remove = []
            scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')

            if args.pc_tile:
                log.ODM_INFO("Computing sub-scenes")
                config = [
                    "--sub-scene-area 660000",
                    "--max-threads %s" % args.max_concurrency,
                    '-w "%s"' % depthmaps_dir, 
                    "-v 0",
                ]
                system.run('%s "%s" %s' % (context.omvs_densify_path, 
                                        openmvs_scene_file,
                                        ' '.join(config)))
                
                scene_files = glob.glob(os.path.join(tree.openmvs, "scene_[0-9][0-9][0-9][0-9].mvs"))
                if len(scene_files) == 0:
                    raise system.ExitException("No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.")

                log.ODM_INFO("Fusing depthmaps for %s scenes" % len(scene_files))
                
                scene_ply_files = []

                for sf in scene_files:
                    p, _ = os.path.splitext(sf)
                    scene_ply = p + "_dense_dense_filtered.ply"
                    scene_dense_mvs = p + "_dense.mvs"

                    files_to_remove += [scene_ply, sf, scene_dense_mvs]
                    scene_ply_files.append(scene_ply)

                    if not io.file_exists(scene_ply) or self.rerun():
                        # Fuse
                        config = [
                            '--resolution-level %s' % int(resolution_level),
                            '--min-resolution %s' % depthmap_resolution,
                            '--max-resolution %s' % int(outputs['undist_image_max_size']),
                            '--dense-config-file "%s"' % densify_ini_file,
                            '--number-views-fuse 2',
                            '--max-threads %s' % args.max_concurrency,
                            '-w "%s"' % depthmaps_dir,
                            '-v 0',
                        ]

                        try:
                            system.run('%s "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config)))

                            # Filter
                            system.run('%s "%s" --filter-point-cloud -1 -v 0' % (context.omvs_densify_path, scene_dense_mvs))
                        except:
                            log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf)

                        if not io.file_exists(scene_ply):
                            scene_ply_files.pop()
                            log.ODM_WARNING("Could not compute PLY for subscene %s" % sf)
                    else:
                        log.ODM_WARNING("Found existing dense scene file %s" % scene_ply)

                # Merge
                log.ODM_INFO("Merging %s scene files" % len(scene_ply_files))
                if len(scene_ply_files) == 0:
                    log.ODM_ERROR("Could not compute dense point cloud (no PLY files available).")
                if len(scene_ply_files) == 1:
                    # Simply rename
                    os.replace(scene_ply_files[0], tree.openmvs_model)
                    log.ODM_INFO("%s --> %s"% (scene_ply_files[0], tree.openmvs_model))
                else:
                    # Merge
                    fast_merge_ply(scene_ply_files, tree.openmvs_model)
            else:
                # Filter all at once
                if os.path.exists(scene_dense):
                    config = [
                        "--filter-point-cloud -1",
                        '-i "%s"' % scene_dense,
                        "-v 0"
                    ]
                    system.run('%s %s' % (context.omvs_densify_path, ' '.join(config)))
                else:
                    raise system.ExitException("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")

            # TODO: add support for image masks

            self.update_progress(95)

            if args.optimize_disk_space:
                files = [scene_dense,
                         os.path.join(tree.openmvs, 'scene_dense.ply'),
                         os.path.join(tree.openmvs, 'scene_dense_dense_filtered.mvs'),
                         octx.path("undistorted", "tracks.csv"),
                         octx.path("undistorted", "reconstruction.json")
                        ] + files_to_remove
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
                shutil.rmtree(depthmaps_dir)
        else:
            log.ODM_WARNING('Found a valid OpenMVS reconstruction file in: %s' %
                            tree.openmvs_model)