Ejemplo n.º 1
0
    def process(self, inputs, outputs):
        # check if the extension is supported
        def supported_extension(file_name):
            (pathfn, ext) = os.path.splitext(file_name)
            return ext.lower() in context.supported_extensions

        # Get supported images from dir
        def get_images(in_dir):
            # filter images for its extension type
            log.ODM_DEBUG(in_dir)
            return [f for f in io.get_files_list(in_dir) if supported_extension(f)]

        log.ODM_INFO('Running ODM Load Dataset Cell')

        # get inputs
        tree = self.inputs.tree

        # get images directory
        input_dir = tree.input_images
        images_dir = tree.dataset_raw
        resize_dir = tree.dataset_resize

        # Check first if a project already exists. This is a mediocre way to check, by checking the resize dir
        if io.dir_exists(resize_dir):
            log.ODM_DEBUG("resize dir: %s" % resize_dir)
            images_dir = resize_dir
        # if first time running, create project directory and copy images over to project/images
        else:
            if not io.dir_exists(images_dir):
                log.ODM_INFO("Project directory %s doesn't exist. Creating it now. " % images_dir)
                system.mkdir_p(images_dir)
                copied = [copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir)]

        log.ODM_DEBUG('Loading dataset from: %s' % images_dir)

        files = get_images(images_dir)

        if files:
            # create ODMPhoto list
            path_files = [io.join_paths(images_dir, f) for f in files]
            photos = Pool().map(
                partial(make_odm_photo, self.params.force_focal, self.params.force_ccd),
                path_files
            )
            
            log.ODM_INFO('Found %s usable images' % len(photos))            
        else:
            log.ODM_ERROR('Not enough supported images in %s' % images_dir)
            return ecto.QUIT

        # append photos to cell output
        outputs.photos = photos

        log.ODM_INFO('Running ODM Load Dataset Cell - Finished')
        return ecto.OK
Ejemplo n.º 2
0
    def process(self, inputs, outputs):
        # check if the extension is sopported
        def supported_extension(file_name):
            (pathfn, ext) = os.path.splitext(file_name)
            return ext.lower() in context.supported_extensions

        log.ODM_INFO('Running ODM Load Dataset Cell')

        # get inputs
        tree = self.inputs.tree

        # set images directory
        images_dir = tree.dataset_resize

        if not io.dir_exists(images_dir):
            images_dir = tree.dataset_raw
            if not io.dir_exists(images_dir):
                log.ODM_ERROR("You must put your pictures into an <images> directory")
                return ecto.QUIT

        log.ODM_DEBUG('Loading dataset from: %s' % images_dir)

        # find files in the given directory
        files = io.get_files_list(images_dir)

        # filter images for its extension type
        files = [f for f in files if supported_extension(f)]

        if files:
            # create ODMPhoto list
            photos = []
            for f in files:
                path_file = io.join_paths(images_dir, f)
                photo = types.ODM_Photo(path_file,
                                        self.params.force_focal,
                                        self.params.force_ccd)
                photos.append(photo)
            
            log.ODM_INFO('Found %s usable images' % len(photos))            
        else:
            log.ODM_ERROR('Not enough supported images in %s' % images_dir)
            return ecto.QUIT

        # append photos to cell output
        outputs.photos = photos

        log.ODM_INFO('Running ODM Load Dataset Cell - Finished')
        return ecto.OK
Ejemplo n.º 3
0
    def process(self, inputs, outputs):
        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running MVE Cell')

        # get inputs
        tree = inputs.tree
        args = inputs.args
        reconstruction = inputs.reconstruction
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start MVE')
            return ecto.QUIT

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'mve') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'mve' in args.rerun_from)

        # check if reconstruction was done before
        if not io.file_exists(tree.mve_model) or rerun_cell:
            # cleanup if a rerun
            if io.dir_exists(tree.mve_path) and rerun_cell:
                shutil.rmtree(tree.mve_path)

            # make bundle directory
            if not io.file_exists(tree.mve_bundle):
                system.mkdir_p(tree.mve_path)
                system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))
                io.copy(tree.opensfm_image_list, tree.mve_image_list)
                io.copy(tree.opensfm_bundle, tree.mve_bundle)

            # mve makescene wants the output directory
            # to not exists before executing it (otherwise it
            # will prompt the user for confirmation)
            if io.dir_exists(tree.mve):
                shutil.rmtree(tree.mve)

            # run mve makescene
            if not io.dir_exists(tree.mve_views):
                system.run('%s %s %s' %
                           (context.makescene_path, tree.mve_path, tree.mve),
                           env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            # Compute mve output scale based on depthmap_resolution
            max_width = 0
            max_height = 0
            for photo in photos:
                max_width = max(photo.width, max_width)
                max_height = max(photo.height, max_height)

            max_pixels = args.depthmap_resolution * args.depthmap_resolution
            if max_width * max_height <= max_pixels:
                mve_output_scale = 0
            else:
                ratio = float(max_width * max_height) / float(max_pixels)
                mve_output_scale = int(
                    math.ceil(math.log(ratio) / math.log(4.0)))

            dmrecon_config = [
                "-s%s" % mve_output_scale,
                "--progress=silent",
                "--local-neighbors=2",
                "--force",
            ]

            # Run MVE's dmrecon
            log.ODM_INFO(
                '                                                                               '
            )
            log.ODM_INFO(
                '                                    ,*/**                                      '
            )
            log.ODM_INFO(
                '                                  ,*@%*/@%*                                    '
            )
            log.ODM_INFO(
                '                                ,/@%******@&*.                                 '
            )
            log.ODM_INFO(
                '                              ,*@&*********/@&*                                '
            )
            log.ODM_INFO(
                '                            ,*@&**************@&*                              '
            )
            log.ODM_INFO(
                '                          ,/@&******************@&*.                           '
            )
            log.ODM_INFO(
                '                        ,*@&*********************/@&*                          '
            )
            log.ODM_INFO(
                '                      ,*@&**************************@&*.                       '
            )
            log.ODM_INFO(
                '                    ,/@&******************************&&*,                     '
            )
            log.ODM_INFO(
                '                  ,*&&**********************************@&*.                   '
            )
            log.ODM_INFO(
                '                ,*@&**************************************@&*.                 '
            )
            log.ODM_INFO(
                '              ,*@&***************#@@@@@@@@@%****************&&*,               '
            )
            log.ODM_INFO(
                '            .*&&***************&@@@@@@@@@@@@@@****************@@*.             '
            )
            log.ODM_INFO(
                '          .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*.           '
            )
            log.ODM_INFO(
                '        .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*,         '
            )
            log.ODM_INFO(
                '      .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*.       '
            )
            log.ODM_INFO(
                '    .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*.     '
            )
            log.ODM_INFO(
                '  .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*,   '
            )
            log.ODM_INFO(
                ' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(,  '
            )
            log.ODM_INFO(
                ' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&*   '
            )
            log.ODM_INFO(
                '   *#@/*******************************@@@@@***&@&**********************&@*,    '
            )
            log.ODM_INFO(
                '     *#@#******************************&@@@***@#*********************&@*,      '
            )
            log.ODM_INFO(
                '       */@#*****************************@@@************************@@*.        '
            )
            log.ODM_INFO(
                '         *#@/***************************/@@/*********************%@*,          '
            )
            log.ODM_INFO(
                '           *#@#**************************#@@%******************%@*,            '
            )
            log.ODM_INFO(
                '             */@#*************************(@@@@@@@&%/********&@*.              '
            )
            log.ODM_INFO(
                '               *(@(*********************************/%@@%**%@*,                '
            )
            log.ODM_INFO(
                '                 *(@%************************************%@**                  '
            )
            log.ODM_INFO(
                '                   **@%********************************&@*,                    '
            )
            log.ODM_INFO(
                '                     *(@(****************************%@/*                      '
            )
            log.ODM_INFO(
                '                       ,(@%************************#@/*                        '
            )
            log.ODM_INFO(
                '                         ,*@%********************&@/,                          '
            )
            log.ODM_INFO(
                '                           */@#****************#@/*                            '
            )
            log.ODM_INFO(
                '                             ,/@&************#@/*                              '
            )
            log.ODM_INFO(
                '                               ,*@&********%@/,                                '
            )
            log.ODM_INFO(
                '                                 */@#****(@/*                                  '
            )
            log.ODM_INFO(
                '                                   ,/@@@@(*                                    '
            )
            log.ODM_INFO(
                '                                     .**,                                      '
            )
            log.ODM_INFO('')
            log.ODM_INFO(
                "Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung."
            )
            log.ODM_INFO("                              Process is running")
            system.run(
                '%s %s %s' %
                (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve),
                env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            scene2pset_config = ["-F%s" % mve_output_scale]

            # run scene2pset
            system.run('%s %s "%s" "%s"' %
                       (context.scene2pset_path, ' '.join(scene2pset_config),
                        tree.mve, tree.mve_model),
                       env_vars={'OMP_NUM_THREADS': args.max_concurrency})
        else:
            log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' %
                            tree.mve_model)

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'MVE')

        log.ODM_INFO('Running ODM MVE Cell - Finished')
        return ecto.OK if args.end_with != 'mve' else ecto.QUIT
Ejemplo n.º 4
0
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenMVS')
            exit(1)

        # check if reconstruction was done before
        if not io.file_exists(tree.openmvs_model) or self.rerun():
            if io.dir_exists(tree.openmvs):
                shutil.rmtree(tree.openmvs)

            # export reconstruction from opensfm
            octx = OSFMContext(tree.opensfm)
            cmd = 'export_openmvs'
            if reconstruction.multi_camera:
                # Export only the primary band
                primary = reconstruction.multi_camera[0]
                image_list = os.path.join(
                    tree.opensfm,
                    "image_list_%s.txt" % primary['name'].lower())
                cmd += ' --image_list "%s"' % image_list
            octx.run(cmd)

            self.update_progress(10)

            depthmaps_dir = os.path.join(tree.openmvs, "depthmaps")
            if not io.dir_exists(depthmaps_dir):
                os.mkdir(depthmaps_dir)

            if outputs["undist_image_max_size"] <= args.depthmap_resolution:
                resolution_level = 0
            else:
                resolution_level = math.floor(
                    math.log(outputs['undist_image_max_size'] /
                             float(args.depthmap_resolution)) / math.log(2))

            config = [
                " --resolution-level %s" % int(resolution_level),
                "--min-resolution %s" % int(args.depthmap_resolution),
                "--max-resolution %s" % int(outputs['undist_image_max_size']),
                "--max-threads %s" % args.max_concurrency,
                '-w "%s"' % depthmaps_dir,
                "-v 0",
            ]

            log.ODM_INFO(
                "Running dense reconstruction. This might take a while.")

            system.run(
                '%s "%s" %s' %
                (context.omvs_densify_path,
                 os.path.join(tree.openmvs, 'scene.mvs'), ' '.join(config)))

            self.update_progress(85)

            # Filter points
            scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')
            if os.path.exists(scene_dense):
                config = [
                    "--filter-point-cloud -1",
                    '-i "%s"' % scene_dense, "-v 0"
                ]
                system.run('%s %s' %
                           (context.omvs_densify_path, ' '.join(config)))
            else:
                log.ODM_WARNING(
                    "Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting..."
                )
                exit(1)

            self.update_progress(95)

            if args.optimize_disk_space:
                files = [
                    scene_dense,
                    os.path.join(tree.openmvs, 'scene_dense.ply'),
                    os.path.join(tree.openmvs,
                                 'scene_dense_dense_filtered.mvs')
                ]
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
                shutil.rmtree(depthmaps_dir)
        else:
            log.ODM_WARNING(
                'Found a valid OpenMVS reconstruction file in: %s' %
                tree.openmvs_model)
Ejemplo n.º 5
0
    def setup(self, args, images_path, reconstruction, append_config = [], rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = os.path.join(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            if reconstruction.multi_camera:
                photos = get_photos_by_band(reconstruction.multi_camera, args.primary_band)
                if len(photos) < 1:
                    raise Exception("Not enough images in selected band %s" % args.primary_band.lower())
                log.ODM_INFO("Reconstruction will use %s images from %s band" % (len(photos), args.primary_band.lower()))
            else:
                photos = reconstruction.photos

            # create file list
            has_alt = True
            has_gps = False
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    if photo.latitude is not None and photo.longitude is not None:
                        has_gps = True

                    fout.write('%s\n' % os.path.join(images_path, photo.filename))
            
            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path, "image_groups.txt")
            if 'split_image_groups_is_set' in args:
                image_groups_file = os.path.abspath(args.split_image_groups)

            if io.file_exists(image_groups_file):
                dst_groups_file = os.path.join(self.opensfm_project_path, "image_groups.txt")
                io.copy(image_groups_file, dst_groups_file)
                log.ODM_INFO("Copied %s to %s" % (image_groups_file, dst_groups_file))
        
            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(args.cameras)
                    with open(os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO("Wrote camera_models_overrides.json to OpenSfM directory")
                except Exception as e:
                    log.ODM_WARNING("Cannot set camera_models_overrides.json: %s" % str(e))

            use_bow = args.matcher_type == "bow"
            feature_type = "SIFT"

            # GPSDOP override if we have GPS accuracy information (such as RTK)
            if 'gps_accuracy_is_set' in args:
                log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy)
            
            log.ODM_INFO("Writing exif overrides")

            exif_overrides = {}
            for p in photos:
                if 'gps_accuracy_is_set' in args:
                    dop = args.gps_accuracy
                elif p.get_gps_dop() is not None:
                    dop = p.get_gps_dop()
                else:
                    dop = args.gps_accuracy # default value

                if p.latitude is not None and p.longitude is not None:
                    exif_overrides[p.filename] = {
                        'gps': {
                            'latitude': p.latitude,
                            'longitude': p.longitude,
                            'altitude': p.altitude if p.altitude is not None else 0,
                            'dop': dop,
                        }
                    }

            with open(os.path.join(self.opensfm_project_path, "exif_overrides.json"), 'w') as f:
                f.write(json.dumps(exif_overrides))

            # Check image masks
            masks = []
            for p in photos:
                if p.mask is not None:
                    masks.append((p.filename, os.path.join(images_path, p.mask)))
            
            if masks:
                log.ODM_INFO("Found %s image masks" % len(masks))
                with open(os.path.join(self.opensfm_project_path, "mask_list.txt"), 'w') as f:
                    for fname, mask in masks:
                        f.write("{} {}\n".format(fname, mask))
            
            # Compute feature_process_size
            feature_process_size = 2048 # default

            if ('resize_to_is_set' in args) and args.resize_to > 0:
                # Legacy
                log.ODM_WARNING("Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead.")
                feature_process_size = int(args.resize_to)
            else:
                feature_quality_scale = {
                    'ultra': 1,
                    'high': 0.5,
                    'medium': 0.25,
                    'low': 0.125,
                    'lowest': 0.0675,
                }

                max_dim = find_largest_photo_dim(photos)

                if max_dim > 0:
                    log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim))
                    feature_process_size = int(max_dim * feature_quality_scale[args.feature_quality])
                    log.ODM_INFO("Photo dimensions for feature extraction: %ipx" % feature_process_size)
                else:
                    log.ODM_WARNING("Cannot compute max image dimensions, going with defaults")

            depthmap_resolution = get_depthmap_resolution(args, photos)

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "flann_algorithm: KDTREE", # more stable, faster than KMEANS
                "feature_process_size: %s" % feature_process_size,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % args.matcher_neighbors,
                "matching_gps_distance: %s" % args.matcher_distance,
                "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params or args.cameras else 'yes'),
                "undistorted_image_format: tif",
                "bundle_outlier_filtering_type: AUTO",
                "align_orientation_prior: vertical",
                "triangulation_type: ROBUST",
                "retriangulation_ratio: 2",
            ]

            if args.camera_lens != 'auto':
                config.append("camera_projection_type: %s" % args.camera_lens.upper())

            if not has_gps:
                log.ODM_INFO("No GPS information, using BOW matching")
                use_bow = True

            feature_type = args.feature_type.upper()

            if use_bow:
                config.append("matcher_type: WORDS")

                # Cannot use SIFT with BOW
                if feature_type == "SIFT":
                    log.ODM_WARNING("Using BOW matching, will use HAHOG feature type, not SIFT")
                    feature_type = "HAHOG"
            
            # GPU acceleration?
            if has_gpus() and feature_type == "SIFT":
                log.ODM_INFO("Using GPU for extracting SIFT features")
                log.ODM_INFO("--min-num-features will be ignored")
                feature_type = "SIFT_GPU"
            
            config.append("feature_type: %s" % feature_type)

            if has_alt:
                log.ODM_INFO("Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            gcp_path = reconstruction.gcp.gcp_path
            if has_alt or gcp_path:
                config.append("align_method: auto")
            else:
                config.append("align_method: orientation_prior")
            
            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append("bundle_interval: 100")          # Bundle after adding 'bundle_interval' cameras
                config.append("bundle_new_points_ratio: 1.2")  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append("local_bundle_radius: 1")        # Max image graph distance for images to be included in local bundle adjustment
            else:
                config.append("local_bundle_radius: 0")
                
            if gcp_path:
                config.append("bundle_use_gcp: yes")
                if not args.force_gps:
                    config.append("bundle_use_gps: no")
                io.copy(gcp_path, self.path("gcp_list.txt"))
            
            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
Ejemplo n.º 6
0
Archivo: osfm.py Proyecto: tub-sgg/ODM
    def is_feature_matching_done(self):
        features_dir = self.path("features")
        matches_dir = self.path("matches")

        return io.dir_exists(features_dir) and io.dir_exists(matches_dir)
Ejemplo n.º 7
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        if outputs['large']:
            if not os.path.exists(tree.submodels_path):
                log.ODM_ERROR(
                    "We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?"
                    % tree.submodels_path)
                exit(1)

            # Merge point clouds
            if args.merge in ['all', 'pointcloud']:
                if not io.dir_exists(tree.entwine_pointcloud) or self.rerun():
                    all_point_clouds = get_submodel_paths(
                        tree.submodels_path, "odm_georeferencing",
                        "odm_georeferenced_model.laz")

                    try:
                        entwine.build(all_point_clouds,
                                      tree.entwine_pointcloud,
                                      max_concurrency=args.max_concurrency,
                                      rerun=self.rerun())
                    except Exception as e:
                        log.ODM_WARNING(
                            "Could not merge EPT point cloud: %s (skipping)" %
                            str(e))
                else:
                    log.ODM_WARNING("Found merged EPT point cloud in %s" %
                                    tree.entwine_pointcloud)

                if not io.file_exists(
                        tree.odm_georeferencing_model_laz) or self.rerun():
                    if io.dir_exists(tree.entwine_pointcloud):
                        try:
                            system.run('pdal translate "ept://{}" "{}"'.format(
                                tree.entwine_pointcloud,
                                tree.odm_georeferencing_model_laz))
                        except Exception as e:
                            log.ODM_WARNING(
                                "Cannot export EPT dataset to LAZ: %s" %
                                str(e))
                    else:
                        log.ODM_WARNING(
                            "No EPT point cloud found (%s), skipping LAZ conversion)"
                            % tree.entwine_pointcloud)
                else:
                    log.ODM_WARNING("Found merged point cloud in %s" %
                                    tree.odm_georeferencing_model_laz)

            self.update_progress(25)

            # Merge crop bounds
            merged_bounds_file = os.path.join(
                tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
            if not io.file_exists(merged_bounds_file) or self.rerun():
                all_bounds = get_submodel_paths(
                    tree.submodels_path, 'odm_georeferencing',
                    'odm_georeferenced_model.bounds.gpkg')
                log.ODM_INFO("Merging all crop bounds: %s" % all_bounds)
                if len(all_bounds) > 0:
                    # Calculate a new crop area
                    # based on the convex hull of all crop areas of all submodels
                    # (without a buffer, otherwise we are double-cropping)
                    Cropper.merge_bounds(all_bounds, merged_bounds_file, 0)
                else:
                    log.ODM_WARNING("No bounds found for any submodel.")

            # Merge orthophotos
            if args.merge in ['all', 'orthophoto']:
                if not io.dir_exists(tree.odm_orthophoto):
                    system.mkdir_p(tree.odm_orthophoto)

                if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun():
                    all_orthos_and_cutlines = get_all_submodel_paths(
                        tree.submodels_path,
                        os.path.join("odm_orthophoto", "odm_orthophoto.tif"),
                        os.path.join("odm_orthophoto", "cutline.gpkg"),
                    )

                    if len(all_orthos_and_cutlines) > 1:
                        log.ODM_INFO(
                            "Found %s submodels with valid orthophotos and cutlines"
                            % len(all_orthos_and_cutlines))

                        # TODO: histogram matching via rasterio
                        # currently parts have different color tones

                        merged_geotiff = os.path.join(
                            tree.odm_orthophoto, "odm_orthophoto.merged.tif")

                        kwargs = {
                            'orthophoto_merged':
                            merged_geotiff,
                            'input_files':
                            ' '.join(
                                map(lambda i: quote(i[0]),
                                    all_orthos_and_cutlines)),
                            'max_memory':
                            get_max_memory(),
                            'threads':
                            args.max_concurrency,
                        }

                        # use bounds as cutlines (blending)
                        if io.file_exists(merged_geotiff):
                            os.remove(merged_geotiff)

                        system.run('gdal_merge.py -o {orthophoto_merged} '
                                   #'-createonly '
                                   '-co "BIGTIFF=YES" '
                                   '-co "BLOCKXSIZE=512" '
                                   '-co "BLOCKYSIZE=512" '
                                   '--config GDAL_CACHEMAX {max_memory}% '
                                   '{input_files} '.format(**kwargs))

                        for ortho_cutline in all_orthos_and_cutlines:
                            kwargs['input_file'], kwargs[
                                'cutline'] = ortho_cutline

                            # Note: cblend has a high performance penalty
                            system.run(
                                'gdalwarp -cutline {cutline} '
                                '-cblend 20 '
                                '-r bilinear -multi '
                                '-wo NUM_THREADS={threads} '
                                '--config GDAL_CACHEMAX {max_memory}% '
                                '{input_file} {orthophoto_merged}'.format(
                                    **kwargs))

                        # Apply orthophoto settings (compression, tiling, etc.)
                        orthophoto_vars = orthophoto.get_orthophoto_vars(args)

                        if io.file_exists(tree.odm_orthophoto_tif):
                            os.remove(tree.odm_orthophoto_tif)

                        kwargs = {
                            'vars':
                            ' '.join([
                                '-co %s=%s' % (k, orthophoto_vars[k])
                                for k in orthophoto_vars
                            ]),
                            'max_memory':
                            get_max_memory(),
                            'merged':
                            merged_geotiff,
                            'log':
                            tree.odm_orthophoto_tif_log,
                            'orthophoto':
                            tree.odm_orthophoto_tif,
                        }

                        system.run(
                            'gdal_translate '
                            '{vars} '
                            '--config GDAL_CACHEMAX {max_memory}% '
                            '{merged} {orthophoto} > {log}'.format(**kwargs))

                        os.remove(merged_geotiff)

                        # Crop
                        if args.crop > 0:
                            Cropper.crop(merged_bounds_file,
                                         tree.odm_orthophoto_tif,
                                         orthophoto_vars)

                        # Overviews
                        if args.build_overviews:
                            orthophoto.build_overviews(tree.odm_orthophoto_tif)

                    elif len(all_orthos_and_cutlines) == 1:
                        # Simply copy
                        log.ODM_WARNING(
                            "A single orthophoto/cutline pair was found between all submodels."
                        )
                        shutil.copyfile(all_orthos_and_cutlines[0][0],
                                        tree.odm_orthophoto_tif)
                    else:
                        log.ODM_WARNING(
                            "No orthophoto/cutline pairs were found in any of the submodels. No orthophoto will be generated."
                        )
                else:
                    log.ODM_WARNING("Found merged orthophoto in %s" %
                                    tree.odm_orthophoto_tif)

            self.update_progress(75)

            # Merge DEMs
            def merge_dems(dem_filename, human_name):
                if not io.dir_exists(tree.path('odm_dem')):
                    system.mkdir_p(tree.path('odm_dem'))

                dem_file = tree.path("odm_dem", dem_filename)
                if not io.file_exists(dem_file) or self.rerun():
                    all_dems = get_submodel_paths(tree.submodels_path,
                                                  "odm_dem", dem_filename)
                    log.ODM_INFO("Merging %ss" % human_name)

                    # Merge
                    dem_vars = utils.get_dem_vars(args)
                    eu_map_source = None  # Default

                    # Use DSM's euclidean map for DTMs
                    # (requires the DSM to be computed)
                    if human_name == "DTM":
                        eu_map_source = "dsm"

                    euclidean_merge_dems(all_dems,
                                         dem_file,
                                         dem_vars,
                                         euclidean_map_source=eu_map_source)

                    if io.file_exists(dem_file):
                        # Crop
                        if args.crop > 0:
                            Cropper.crop(merged_bounds_file, dem_file,
                                         dem_vars)
                        log.ODM_INFO("Created %s" % dem_file)
                    else:
                        log.ODM_WARNING("Cannot merge %s, %s was not created" %
                                        (human_name, dem_file))
                else:
                    log.ODM_WARNING("Found merged %s in %s" %
                                    (human_name, dem_filename))

            if args.merge in ['all', 'dem'] and args.dsm:
                merge_dems("dsm.tif", "DSM")

            if args.merge in ['all', 'dem'] and args.dtm:
                merge_dems("dtm.tif", "DTM")

            # Stop the pipeline short! We're done.
            self.next_stage = None
        else:
            log.ODM_INFO("Normal dataset, nothing to merge.")
            self.progress = 0.0
Ejemplo n.º 8
0
    def process(self, inputs, outputs):
        # check if the extension is supported
        def supported_extension(file_name):
            (pathfn, ext) = os.path.splitext(file_name)
            return ext.lower() in context.supported_extensions

        # Get supported images from dir
        def get_images(in_dir):
            # filter images for its extension type
            log.ODM_DEBUG(in_dir)
            return [f for f in io.get_files_list(in_dir) if supported_extension(f)]

        log.ODM_INFO('Running ODM Load Dataset Cell')

        # get inputs
        tree = self.inputs.tree
        args = self.inputs.args

        # get images directory
        input_dir = tree.input_images
        images_dir = tree.dataset_raw

        if not io.dir_exists(images_dir):
            log.ODM_INFO("Project directory %s doesn't exist. Creating it now. " % images_dir)
            system.mkdir_p(images_dir)
            copied = [copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir)]

        # define paths and create working directories
        system.mkdir_p(tree.odm_georeferencing)
        if args.use_25dmesh: system.mkdir_p(tree.odm_25dgeoreferencing)

        log.ODM_DEBUG('Loading dataset from: %s' % images_dir)

        files = get_images(images_dir)

        if files:
            # create ODMPhoto list
            path_files = [io.join_paths(images_dir, f) for f in files]

            photos = []
            with open(tree.dataset_list, 'w') as dataset_list:
                for files in path_files:
                    photos += [make_odm_photo(self.params.force_focal, self.params.force_ccd, files)]
                    dataset_list.write(photos[-1].filename + '\n')

            log.ODM_INFO('Found %s usable images' % len(photos))
        else:
            log.ODM_ERROR('Not enough supported images in %s' % images_dir)
            return ecto.QUIT

        # append photos to cell output
        if not self.params.proj:
            if tree.odm_georeferencing_gcp:
                outputs.reconstruction = types.ODM_Reconstruction(photos, coords_file=tree.odm_georeferencing_gcp)
            else:
                verbose = '-verbose' if self.params.verbose else ''
                # Generate UTM from images
                # odm_georeference definitions
                kwargs = {
                    'bin': context.odm_modules_path,
                    'imgs': tree.dataset_raw,
                    'imgs_list': tree.dataset_list,
                    'coords': tree.odm_georeferencing_coords,
                    'log': tree.odm_georeferencing_utm_log,
                    'verbose': verbose
                }

                # run UTM extraction binary
                extract_utm = system.run_and_return('{bin}/odm_extract_utm -imagesPath {imgs}/ '
                                                    '-imageListFile {imgs_list} -outputCoordFile {coords} {verbose} '
                                                    '-logFile {log}'.format(**kwargs))

                if extract_utm != '':
                    log.ODM_WARNING('Could not generate coordinates file. '
                                    'Ignore if there is a GCP file. Error: %s'
                                    % extract_utm)

                outputs.reconstruction = types.ODM_Reconstruction(photos, coords_file=tree.odm_georeferencing_coords)
        else:
            outputs.reconstruction = types.ODM_Reconstruction(photos, projstring=self.params.proj)

        # Save proj to file for future use
        with open(io.join_paths(tree.odm_georeferencing, tree.odm_georeferencing_proj), 'w') as f:
            f.write(outputs.reconstruction.projection.srs)

        log.ODM_INFO('Running ODM Load Dataset Cell - Finished')
        return ecto.OK if args.end_with != 'dataset' else ecto.QUIT
Ejemplo n.º 9
0
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenMVS')
            exit(1)

        # check if reconstruction was done before
        if not io.file_exists(tree.openmvs_model) or self.rerun():
            if self.rerun():
                if io.dir_exists(tree.openmvs):
                    shutil.rmtree(tree.openmvs)

            # export reconstruction from opensfm
            openmvs_scene_file = os.path.join(tree.openmvs, "scene.mvs")
            if not io.file_exists(openmvs_scene_file) or self.rerun():
                octx = OSFMContext(tree.opensfm)
                cmd = 'export_openmvs'
                octx.run(cmd)
            else:
                log.ODM_WARNING("Found existing %s" % openmvs_scene_file)

            self.update_progress(10)

            depthmaps_dir = os.path.join(tree.openmvs, "depthmaps")

            if io.dir_exists(depthmaps_dir) and self.rerun():
                shutil.rmtree(depthmaps_dir)

            if not io.dir_exists(depthmaps_dir):
                os.mkdir(depthmaps_dir)

            depthmap_resolution = get_depthmap_resolution(args, photos)
            if outputs["undist_image_max_size"] <= depthmap_resolution:
                resolution_level = 0
            else:
                resolution_level = int(
                    round(math.log(outputs['undist_image_max_size'] / float(depthmap_resolution)) / math.log(2)))

            log.ODM_INFO("Running dense reconstruction. This might take a while.")

            log.ODM_INFO("Estimating depthmaps")

            densify_ini_file = os.path.join(tree.openmvs, 'config.ini')
            with open(densify_ini_file, 'w+') as f:
                f.write("Optimize = 0\n")  # Disable depth-maps re-filtering

            config = [
                " --resolution-level %s" % int(resolution_level),
                "--min-resolution %s" % depthmap_resolution,
                "--max-resolution %s" % int(outputs['undist_image_max_size']),
                "--max-threads %s" % args.max_concurrency,
                "--number-views-fuse 2",
                '-w "%s"' % depthmaps_dir,
                "-v 0"
            ]

            if args.pc_tile:
                config.append("--fusion-mode 1")

            system.run('%s "%s" %s' % (context.omvs_densify_path,
                                       openmvs_scene_file,
                                       ' '.join(config)))

            self.update_progress(85)
            files_to_remove = []
            scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')

            if args.pc_tile:
                log.ODM_INFO("Computing sub-scenes")
                config = [
                    "--sub-scene-area 660000",
                    "--max-threads %s" % args.max_concurrency,
                    '-w "%s"' % depthmaps_dir,
                    "-v 0",
                ]
                system.run('%s "%s" %s' % (context.omvs_densify_path,
                                           openmvs_scene_file,
                                           ' '.join(config)))

                scene_files = glob.glob(os.path.join(tree.openmvs, "scene_[0-9][0-9][0-9][0-9].mvs"))
                if len(scene_files) == 0:
                    log.ODM_ERROR(
                        "No OpenMVS scenes found. This could be a bug, or the reconstruction could not be processed.")
                    exit(1)

                log.ODM_INFO("Fusing depthmaps for %s scenes" % len(scene_files))

                scene_ply_files = []

                for sf in scene_files:
                    p, _ = os.path.splitext(sf)
                    scene_ply = p + "_dense_dense_filtered.ply"
                    scene_dense_mvs = p + "_dense.mvs"

                    files_to_remove += [scene_ply, sf, scene_dense_mvs]
                    scene_ply_files.append(scene_ply)

                    if not io.file_exists(scene_ply) or self.rerun():
                        # Fuse
                        config = [
                            '--resolution-level %s' % int(resolution_level),
                            '--min-resolution %s' % depthmap_resolution,
                            '--max-resolution %s' % int(outputs['undist_image_max_size']),
                            '--dense-config-file "%s"' % densify_ini_file,
                            '--number-views-fuse 2',
                            '--max-threads %s' % args.max_concurrency,
                            '-w "%s"' % depthmaps_dir,
                            '-v 0',
                        ]

                        try:
                            system.run('%s "%s" %s' % (context.omvs_densify_path, sf, ' '.join(config)))

                            # Filter
                            system.run(
                                '%s "%s" --filter-point-cloud -1 -v 0' % (context.omvs_densify_path, scene_dense_mvs))
                        except:
                            log.ODM_WARNING("Sub-scene %s could not be reconstructed, skipping..." % sf)

                        if not io.file_exists(scene_ply):
                            scene_ply_files.pop()
                            log.ODM_WARNING("Could not compute PLY for subscene %s" % sf)
                    else:
                        log.ODM_WARNING("Found existing dense scene file %s" % scene_ply)

                # Merge
                log.ODM_INFO("Merging %s scene files" % len(scene_ply_files))
                if len(scene_ply_files) == 0:
                    log.ODM_ERROR("Could not compute dense point cloud (no PLY files available).")
                if len(scene_ply_files) == 1:
                    # Simply rename
                    os.rename(scene_ply_files[0], tree.openmvs_model)
                    log.ODM_INFO("%s --> %s" % (scene_ply_files[0], tree.openmvs_model))
                else:
                    # Merge
                    fast_merge_ply(scene_ply_files, tree.openmvs_model)
            else:
                # Filter all at once
                if os.path.exists(scene_dense):
                    config = [
                        "--filter-point-cloud -1",
                        '-i "%s"' % scene_dense,
                        "-v 0"
                    ]
                    system.run('%s %s' % (context.omvs_densify_path, ' '.join(config)))
                else:
                    log.ODM_WARNING("Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting...")
                    exit(1)

            # TODO: add support for image masks

            self.update_progress(95)

            if args.optimize_disk_space:
                files = [scene_dense,
                         os.path.join(tree.openmvs, 'scene_dense.ply'),
                         os.path.join(tree.openmvs, 'scene_dense_dense_filtered.mvs'),
                         octx.path("undistorted", "tracks.csv"),
                         octx.path("undistorted", "reconstruction.json")
                         ] + files_to_remove
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
                shutil.rmtree(depthmaps_dir)
        else:
            log.ODM_WARNING('Found a valid OpenMVS reconstruction file in: %s' % tree.openmvs_model)
Ejemplo n.º 10
0
from opendm import io

import ecto
import os

from scripts.odm_app import ODMApp

if __name__ == '__main__':

    args = config.config()

    log.ODM_INFO('Initializing OpenDroneMap app - %s' % system.now())

    # Add project dir if doesn't exist
    args.project_path = io.join_paths(args.project_path, args.name)
    if not io.dir_exists(args.project_path):
        log.ODM_WARNING('Directory %s does not exist. Creating it now.' %
                        args.name)
        system.mkdir_p(os.path.abspath(args.project_path))

    # If user asks to rerun everything, delete all of the existing progress directories.
    # TODO: Move this somewhere it's not hard-coded
    if args.rerun_all:
        log.ODM_DEBUG("Rerun all -- Removing old data")
        os.system("rm -rf " + args.project_path + "/images_resize " +
                  args.project_path + "/odm_georeferencing " +
                  args.project_path + "/odm_meshing " + args.project_path +
                  "/odm_orthophoto " + args.project_path + "/odm_texturing " +
                  args.project_path + "/opensfm " + args.project_path +
                  "/smvs")
Ejemplo n.º 11
0
    def setup(self,
              args,
              images_path,
              photos,
              gcp_path=None,
              append_config=[],
              rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            # create file list
            has_alt = True
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    fout.write('%s\n' %
                               io.join_paths(images_path, photo.filename))

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "feature_process_size: %s" % args.resize_to,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % args.matcher_neighbors,
                "matching_gps_distance: %s" % args.matcher_distance,
                "depthmap_method: %s" % args.opensfm_depthmap_method,
                "depthmap_resolution: %s" % args.depthmap_resolution,
                "depthmap_min_patch_sd: %s" %
                args.opensfm_depthmap_min_patch_sd,
                "depthmap_min_consistent_views: %s" %
                args.opensfm_depthmap_min_consistent_views,
                "optimize_camera_parameters: %s" %
                ('no' if args.use_fixed_camera_params else 'yes'),
                "undistorted_image_format: png"  # mvs-texturing exhibits artifacts with JPG
            ]

            if has_alt:
                log.ODM_DEBUG(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")
                config.append("align_method: naive")
            else:
                config.append("align_method: orientation_prior")
                config.append("align_orientation_prior: vertical")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_DEBUG("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                io.copy(gcp_path, self.path("gcp_list.txt"))

            config = config + append_config

            # write config file
            log.ODM_DEBUG(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path,
                                             "image_groups.txt")
            if io.file_exists(image_groups_file):
                log.ODM_DEBUG("Copied image_groups.txt to OpenSfM directory")
                io.copy(
                    image_groups_file,
                    os.path.join(self.opensfm_project_path,
                                 "image_groups.txt"))
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" %
                            list_path)
Ejemplo n.º 12
0
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenMVS')
            exit(1)

        # check if reconstruction was done before
        if not io.file_exists(tree.openmvs_model) or self.rerun():
            if io.dir_exists(tree.openmvs):
                shutil.rmtree(tree.openmvs)

            # export reconstruction from opensfm
            octx = OSFMContext(tree.opensfm)
            cmd = 'export_openmvs'
            octx.run(cmd)

            self.update_progress(10)

            depthmaps_dir = os.path.join(tree.openmvs, "depthmaps")
            if not io.dir_exists(depthmaps_dir):
                os.mkdir(depthmaps_dir)

            depthmap_resolution = get_depthmap_resolution(args, photos)
            if outputs["undist_image_max_size"] <= depthmap_resolution:
                resolution_level = 0
            else:
                resolution_level = int(
                    round(
                        math.log(outputs['undist_image_max_size'] /
                                 float(depthmap_resolution)) / math.log(2)))

            config = [
                " --resolution-level %s" % int(resolution_level),
                "--min-resolution %s" % depthmap_resolution,
                "--max-resolution %s" % int(outputs['undist_image_max_size']),
                "--max-threads %s" % args.max_concurrency,
                "--number-views-fuse 3",
                '-w "%s"' % depthmaps_dir,
                "-v 0",
            ]

            log.ODM_INFO(
                "Running dense reconstruction. This might take a while.")

            # TODO: add support for image masks

            system.run(
                '%s "%s" %s' %
                (context.omvs_densify_path,
                 os.path.join(tree.openmvs, 'scene.mvs'), ' '.join(config)))

            self.update_progress(85)

            # Filter points
            scene_dense = os.path.join(tree.openmvs, 'scene_dense.mvs')
            if os.path.exists(scene_dense):
                config = [
                    "--filter-point-cloud -1",
                    '-i "%s"' % scene_dense, "-v 0"
                ]
                system.run('%s %s' %
                           (context.omvs_densify_path, ' '.join(config)))
            else:
                log.ODM_WARNING(
                    "Cannot find scene_dense.mvs, dense reconstruction probably failed. Exiting..."
                )
                exit(1)

            self.update_progress(95)

            if args.optimize_disk_space:
                files = [
                    scene_dense,
                    os.path.join(tree.openmvs, 'scene_dense.ply'),
                    os.path.join(tree.openmvs,
                                 'scene_dense_dense_filtered.mvs'),
                    octx.path("undistorted", "tracks.csv"),
                    octx.path("undistorted", "reconstruction.json")
                ]
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
                shutil.rmtree(depthmaps_dir)
        else:
            log.ODM_WARNING(
                'Found a valid OpenMVS reconstruction file in: %s' %
                tree.openmvs_model)
Ejemplo n.º 13
0
    def process(self, inputs, outputs):
        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running ODM DEM Cell')

        # get inputs
        args = self.inputs.args
        tree = self.inputs.tree
        las_model_found = io.file_exists(tree.odm_georeferencing_model_laz)

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'odm_dem') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'odm_dem' in args.rerun_from)

        log.ODM_INFO('Classify: ' + str(args.pc_classify))
        log.ODM_INFO('Create DSM: ' + str(args.dsm))
        log.ODM_INFO('Create DTM: ' + str(args.dtm))
        log.ODM_INFO('DEM input file {0} found: {1}'.format(
            tree.odm_georeferencing_model_laz, str(las_model_found)))

        slope, cellsize = (0.15, 1)

        # define paths and create working directories
        odm_dem_root = tree.path('odm_dem')
        if not io.dir_exists(odm_dem_root):
            system.mkdir_p(odm_dem_root)

        if args.pc_classify and las_model_found:
            pc_classify_marker = os.path.join(odm_dem_root,
                                              'pc_classify_done.txt')

            if not io.file_exists(pc_classify_marker) or rerun_cell:
                log.ODM_INFO(
                    "Classifying {} using Simple Morphological Filter".format(
                        tree.odm_georeferencing_model_laz))
                commands.classify(tree.odm_georeferencing_model_laz,
                                  slope,
                                  cellsize,
                                  verbose=args.verbose)

                with open(pc_classify_marker, 'w') as f:
                    f.write('Classify: smrf\n')
                    f.write('Slope: {}\n'.format(slope))
                    f.write('Cellsize: {}\n'.format(cellsize))

        # Do we need to process anything here?
        if (args.dsm or args.dtm) and las_model_found:
            dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif')
            dtm_output_filename = os.path.join(odm_dem_root, 'dtm.tif')

            if (args.dtm and not io.file_exists(dtm_output_filename)) or \
                (args.dsm and not io.file_exists(dsm_output_filename)) or \
                rerun_cell:

                products = []
                if args.dsm: products.append('dsm')
                if args.dtm: products.append('dtm')

                resolution = gsd.cap_resolution(args.dem_resolution,
                                                tree.opensfm_reconstruction,
                                                gsd_error_estimate=-3,
                                                ignore_gsd=args.ignore_gsd)
                radius_steps = [(resolution / 100.0) / 2.0]
                for _ in range(args.dem_gapfill_steps - 1):
                    radius_steps.append(
                        radius_steps[-1] *
                        2)  # 2 is arbitrary, maybe there's a better value?

                for product in products:
                    commands.create_dems(
                        [tree.odm_georeferencing_model_laz],
                        product,
                        radius=map(str, radius_steps),
                        gapfill=True,
                        outdir=odm_dem_root,
                        resolution=resolution / 100.0,
                        decimation=args.dem_decimation,
                        verbose=args.verbose,
                        max_workers=get_max_concurrency_for_dem(
                            args.max_concurrency,
                            tree.odm_georeferencing_model_laz))

                    if args.crop > 0:
                        bounds_shapefile_path = os.path.join(
                            tree.odm_georeferencing,
                            'odm_georeferenced_model.bounds.shp')
                        if os.path.exists(bounds_shapefile_path):
                            Cropper.crop(
                                bounds_shapefile_path,
                                os.path.join(odm_dem_root,
                                             "{}.tif".format(product)),
                                {
                                    'TILED': 'YES',
                                    'COMPRESS': 'LZW',
                                    'BLOCKXSIZE': 512,
                                    'BLOCKYSIZE': 512,
                                    'NUM_THREADS': self.params.max_concurrency
                                })
            else:
                log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root)
        else:
            log.ODM_WARNING('DEM will not be generated')

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'Dem')

        log.ODM_INFO('Running ODM DEM Cell - Finished')
        return ecto.OK if args.end_with != 'odm_dem' else ecto.QUIT
Ejemplo n.º 14
0
    def process(self, inputs, outputs):
        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running ODM DEM Cell')

        # get inputs
        args = self.inputs.args
        tree = self.inputs.tree
        las_model_found = io.file_exists(tree.odm_georeferencing_model_laz)

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'odm_dem') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'odm_dem' in args.rerun_from)

        log.ODM_INFO('Classify: ' + str(args.pc_classify != "none"))
        log.ODM_INFO('Create DSM: ' + str(args.dsm))
        log.ODM_INFO('Create DTM: ' + str(args.dtm))
        log.ODM_INFO('DEM input file {0} found: {1}'.format(
            tree.odm_georeferencing_model_laz, str(las_model_found)))

        # Setup terrain parameters
        terrain_params_map = {
            'flatnonforest': (1, 3),
            'flatforest': (1, 2),
            'complexnonforest': (5, 2),
            'complexforest': (10, 2)
        }
        terrain_params = terrain_params_map[args.dem_terrain_type.lower()]
        slope, cellsize = terrain_params

        # define paths and create working directories
        odm_dem_root = tree.path('odm_dem')
        if not io.dir_exists(odm_dem_root):
            system.mkdir_p(odm_dem_root)

        if args.pc_classify != "none" and las_model_found:
            pc_classify_marker = os.path.join(odm_dem_root,
                                              'pc_classify_done.txt')

            if not io.file_exists(pc_classify_marker) or rerun_cell:
                log.ODM_INFO("Classifying {} using {}".format(
                    tree.odm_georeferencing_model_laz, args.pc_classify))
                commands.classify(tree.odm_georeferencing_model_laz,
                                  args.pc_classify == "smrf",
                                  slope,
                                  cellsize,
                                  approximate=args.dem_approximate,
                                  initialDistance=args.dem_initial_distance,
                                  verbose=args.verbose)
                with open(pc_classify_marker, 'w') as f:
                    f.write('Classify: {}\n'.format(args.pc_classify))
                    f.write('Slope: {}\n'.format(slope))
                    f.write('Cellsize: {}\n'.format(cellsize))
                    f.write('Approximate: {}\n'.format(args.dem_approximate))
                    f.write('InitialDistance: {}\n'.format(
                        args.dem_initial_distance))

        # Do we need to process anything here?
        if (args.dsm or args.dtm) and las_model_found:
            dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif')
            dtm_output_filename = os.path.join(odm_dem_root, 'dtm.tif')

            if (args.dtm and not io.file_exists(dtm_output_filename)) or \
                (args.dsm and not io.file_exists(dsm_output_filename)) or \
                rerun_cell:

                products = []
                if args.dsm: products.append('dsm')
                if args.dtm: products.append('dtm')

                radius_steps = [args.dem_resolution]
                for _ in range(args.dem_gapfill_steps - 1):
                    radius_steps.append(
                        radius_steps[-1] *
                        3)  # 3 is arbitrary, maybe there's a better value?

                for product in products:
                    commands.create_dems([tree.odm_georeferencing_model_laz],
                                         product,
                                         radius=map(str, radius_steps),
                                         gapfill=True,
                                         outdir=odm_dem_root,
                                         resolution=args.dem_resolution,
                                         maxsd=args.dem_maxsd,
                                         maxangle=args.dem_maxangle,
                                         decimation=args.dem_decimation,
                                         verbose=args.verbose)

                    if args.crop > 0:
                        bounds_shapefile_path = os.path.join(
                            tree.odm_georeferencing,
                            'odm_georeferenced_model.bounds.shp')
                        if os.path.exists(bounds_shapefile_path):
                            Cropper.crop(
                                bounds_shapefile_path,
                                os.path.join(odm_dem_root,
                                             "{}.tif".format(product)), {
                                                 'TILED': 'YES',
                                                 'COMPRESS': 'LZW',
                                                 'BLOCKXSIZE': 512,
                                                 'BLOCKYSIZE': 512,
                                                 'NUM_THREADS': 'ALL_CPUS'
                                             })
            else:
                log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root)
        else:
            log.ODM_WARNING('DEM will not be generated')

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'Dem')

        log.ODM_INFO('Running ODM DEM Cell - Finished')
        return ecto.OK if args.end_with != 'odm_dem' else ecto.QUIT
Ejemplo n.º 15
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
            exit(1)

        octx = OSFMContext(tree.opensfm)
        octx.setup(args,
                   tree.dataset_raw,
                   photos,
                   reconstruction=reconstruction,
                   rerun=self.rerun())
        octx.extract_metadata(self.rerun())
        self.update_progress(20)
        octx.feature_matching(self.rerun())
        self.update_progress(30)
        octx.reconstruct(self.rerun())
        octx.extract_cameras(tree.path("cameras.json"), self.rerun())
        self.update_progress(70)

        # If we find a special flag file for split/merge we stop right here
        if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")):
            log.ODM_INFO("Stopping OpenSfM early because we found: %s" %
                         octx.path("split_merge_stop_at_reconstruction.txt"))
            self.next_stage = None
            return

        if args.fast_orthophoto:
            output_file = octx.path('reconstruction.ply')
        elif args.use_opensfm_dense:
            output_file = tree.opensfm_model
        else:
            output_file = tree.opensfm_reconstruction

        updated_config_flag_file = octx.path('updated_config.txt')

        # Make sure it's capped by the depthmap-resolution arg,
        # since the undistorted images are used for MVS
        outputs['undist_image_max_size'] = max(
            gsd.image_max_size(photos,
                               args.orthophoto_resolution,
                               tree.opensfm_reconstruction,
                               ignore_gsd=args.ignore_gsd,
                               has_gcp=reconstruction.has_gcp()),
            args.depthmap_resolution)

        if not io.file_exists(updated_config_flag_file) or self.rerun():
            octx.update_config({
                'undistorted_image_max_size':
                outputs['undist_image_max_size']
            })
            octx.touch(updated_config_flag_file)

        # These will be used for texturing / MVS
        undistorted_images_path = octx.path("undistorted", "images")

        if not io.dir_exists(undistorted_images_path) or self.rerun():
            octx.run('undistort')
        else:
            log.ODM_WARNING("Found an undistorted directory in %s" %
                            undistorted_images_path)

        self.update_progress(80)

        if reconstruction.multi_camera:
            # Dump band image lists
            log.ODM_INFO("Multiple bands found")
            for band in reconstruction.multi_camera:
                log.ODM_INFO("Exporting %s band" % band['name'])
                image_list_file = octx.path("image_list_%s.txt" %
                                            band['name'].lower())

                if not io.file_exists(image_list_file) or self.rerun():
                    with open(image_list_file, "w") as f:
                        f.write("\n".join([p.filename
                                           for p in band['photos']]))
                        log.ODM_INFO("Wrote %s" % image_list_file)
                else:
                    log.ODM_WARNING(
                        "Found a valid image list in %s for %s band" %
                        (image_list_file, band['name']))

                nvm_file = octx.path(
                    "undistorted",
                    "reconstruction_%s.nvm" % band['name'].lower())
                if not io.file_exists(nvm_file) or self.rerun():
                    octx.run('export_visualsfm --points --image_list "%s"' %
                             image_list_file)
                    os.rename(tree.opensfm_reconstruction_nvm, nvm_file)
                else:
                    log.ODM_WARNING(
                        "Found a valid NVM file in %s for %s band" %
                        (nvm_file, band['name']))

        if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun():
            octx.run('export_visualsfm --points')
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM NVM reconstruction file in: %s' %
                tree.opensfm_reconstruction_nvm)

        self.update_progress(85)

        # Skip dense reconstruction if necessary and export
        # sparse reconstruction instead
        if args.fast_orthophoto:
            if not io.file_exists(output_file) or self.rerun():
                octx.run('export_ply --no-cameras')
            else:
                log.ODM_WARNING("Found a valid PLY reconstruction in %s" %
                                output_file)

        elif args.use_opensfm_dense:
            if not io.file_exists(output_file) or self.rerun():
                octx.run('compute_depthmaps')
            else:
                log.ODM_WARNING("Found a valid dense reconstruction in %s" %
                                output_file)

        self.update_progress(90)

        if reconstruction.is_georeferenced() and (not io.file_exists(
                tree.opensfm_transformation) or self.rerun()):
            octx.run('export_geocoords --transformation --proj \'%s\'' %
                     reconstruction.georef.proj4())
        else:
            log.ODM_WARNING("Will skip exporting %s" %
                            tree.opensfm_transformation)
Ejemplo n.º 16
0
    def process(self, inputs, outputs):

        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running MVS Texturing Cell')

        # get inputs
        args = inputs.args
        tree = inputs.tree
        reconstruction = inputs.reconstruction

        # define paths and create working directories
        system.mkdir_p(tree.odm_texturing)
        if not args.use_3dmesh: system.mkdir_p(tree.odm_25dtexturing)

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'mvs_texturing') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'mvs_texturing' in args.rerun_from)

        runs = [{
            'out_dir': tree.odm_texturing,
            'model': tree.odm_mesh,
            'nadir': False
        }]

        if args.skip_3dmodel:
            runs = []

        if not args.use_3dmesh:
            runs += [{
                    'out_dir': tree.odm_25dtexturing,
                    'model': tree.odm_25dmesh,
                    'nadir': True
                }]

        for r in runs:
            odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj)

            if not io.file_exists(odm_textured_model_obj) or rerun_cell:
                log.ODM_DEBUG('Writing MVS Textured file in: %s'
                              % odm_textured_model_obj)

                # Format arguments to fit Mvs-Texturing app
                skipGeometricVisibilityTest = ""
                skipGlobalSeamLeveling = ""
                skipLocalSeamLeveling = ""
                skipHoleFilling = ""
                keepUnseenFaces = ""
                nadir = ""

                if (self.params.skip_vis_test):
                    skipGeometricVisibilityTest = "--skip_geometric_visibility_test"
                if (self.params.skip_glob_seam_leveling):
                    skipGlobalSeamLeveling = "--skip_global_seam_leveling"
                if (self.params.skip_loc_seam_leveling):
                    skipLocalSeamLeveling = "--skip_local_seam_leveling"
                if (self.params.skip_hole_fill):
                    skipHoleFilling = "--skip_hole_filling"
                if (self.params.keep_unseen_faces):
                    keepUnseenFaces = "--keep_unseen_faces"
                if (r['nadir']):
                    nadir = '--nadir_mode'

                # mvstex definitions
                kwargs = {
                    'bin': context.mvstex_path,
                    'out_dir': io.join_paths(r['out_dir'], "odm_textured_model"),
                    'model': r['model'],
                    'dataTerm': self.params.data_term,
                    'outlierRemovalType': self.params.outlier_rem_type,
                    'skipGeometricVisibilityTest': skipGeometricVisibilityTest,
                    'skipGlobalSeamLeveling': skipGlobalSeamLeveling,
                    'skipLocalSeamLeveling': skipLocalSeamLeveling,
                    'skipHoleFilling': skipHoleFilling,
                    'keepUnseenFaces': keepUnseenFaces,
                    'toneMapping': self.params.tone_mapping,
                    'nadirMode': nadir,
                    'nadirWeight': 2 ** args.texturing_nadir_weight - 1,
                    'nvm_file': io.join_paths(tree.opensfm, "reconstruction.nvm")
                }

                # Make sure tmp directory is empty
                mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp')
                if io.dir_exists(mvs_tmp_dir):
                    log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir))
                    shutil.rmtree(mvs_tmp_dir)

                # run texturing binary
                system.run('{bin} {nvm_file} {model} {out_dir} '
                           '-d {dataTerm} -o {outlierRemovalType} '
                           '-t {toneMapping} '
                           '{skipGeometricVisibilityTest} '
                           '{skipGlobalSeamLeveling} '
                           '{skipLocalSeamLeveling} '
                           '{skipHoleFilling} '
                           '{keepUnseenFaces} '
                           '{nadirMode} '
                           '-n {nadirWeight}'.format(**kwargs))
            else:
                log.ODM_WARNING('Found a valid ODM Texture file in: %s'
                                % odm_textured_model_obj)

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'Texturing')

        log.ODM_INFO('Running ODM Texturing Cell - Finished')
        return ecto.OK if args.end_with != 'mvs_texturing' else ecto.QUIT
Ejemplo n.º 17
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
            exit(1)

        octx = OSFMContext(tree.opensfm)
        octx.setup(args,
                   tree.dataset_raw,
                   photos,
                   gcp_path=tree.odm_georeferencing_gcp,
                   rerun=self.rerun())
        octx.extract_metadata(self.rerun())
        self.update_progress(20)
        octx.feature_matching(self.rerun())
        self.update_progress(30)
        octx.reconstruct(self.rerun())
        self.update_progress(70)

        # If we find a special flag file for split/merge we stop right here
        if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")):
            log.ODM_INFO("Stopping OpenSfM early because we found: %s" %
                         octx.path("split_merge_stop_at_reconstruction.txt"))
            self.next_stage = None
            return

        if args.fast_orthophoto:
            output_file = octx.path('reconstruction.ply')
        elif args.use_opensfm_dense:
            output_file = tree.opensfm_model
        else:
            output_file = tree.opensfm_reconstruction

        # Always export VisualSFM's reconstruction and undistort images
        # as we'll use these for texturing (after GSD estimation and resizing)
        if not args.ignore_gsd:
            image_scale = gsd.image_scale_factor(args.orthophoto_resolution,
                                                 tree.opensfm_reconstruction)
        else:
            image_scale = 1.0

        if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun():
            octx.run(
                'export_visualsfm --image_extension png --scale_focal %s' %
                image_scale)
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM NVM reconstruction file in: %s' %
                tree.opensfm_reconstruction_nvm)

        # These will be used for texturing
        undistorted_images_path = octx.path("undistorted")

        if not io.dir_exists(undistorted_images_path) or self.rerun():
            octx.run('undistort --image_format png --image_scale %s' %
                     image_scale)
        else:
            log.ODM_WARNING("Found an undistorted directory in %s" %
                            undistorted_images_path)

        self.update_progress(80)

        # Skip dense reconstruction if necessary and export
        # sparse reconstruction instead
        if args.fast_orthophoto:
            if not io.file_exists(output_file) or self.rerun():
                octx.run('export_ply --no-cameras')
            else:
                log.ODM_WARNING("Found a valid PLY reconstruction in %s" %
                                output_file)

        elif args.use_opensfm_dense:
            # Undistort images at full scale in JPG
            # (TODO: we could compare the size of the PNGs if they are < than depthmap_resolution
            # and use those instead of re-exporting full resolution JPGs)
            if not io.file_exists(output_file) or self.rerun():
                octx.run('undistort')
                octx.run('compute_depthmaps')
            else:
                log.ODM_WARNING("Found a valid dense reconstruction in %s" %
                                output_file)

        # check if reconstruction was exported to bundler before
        octx.export_bundler(tree.opensfm_bundle_list, self.rerun())

        self.update_progress(90)

        if reconstruction.georef and (not io.file_exists(
                tree.opensfm_transformation) or self.rerun()):
            octx.run('export_geocoords --transformation --proj \'%s\'' %
                     reconstruction.georef.projection.srs)
        else:
            log.ODM_WARNING("Will skip exporting %s" %
                            tree.opensfm_transformation)
Ejemplo n.º 18
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        dem_input = tree.odm_georeferencing_model_laz
        pc_model_found = io.file_exists(dem_input)
        ignore_resolution = False
        pseudo_georeference = False
        
        if not reconstruction.is_georeferenced():
            log.ODM_WARNING("Not georeferenced, using ungeoreferenced point cloud...")
            ignore_resolution = True
            pseudo_georeference = True

        resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction, 
                        gsd_error_estimate=-3, 
                        ignore_gsd=args.ignore_gsd,
                        ignore_resolution=ignore_resolution,
                        has_gcp=reconstruction.has_gcp())

        log.ODM_INFO('Classify: ' + str(args.pc_classify))
        log.ODM_INFO('Create DSM: ' + str(args.dsm))
        log.ODM_INFO('Create DTM: ' + str(args.dtm))
        log.ODM_INFO('DEM input file {0} found: {1}'.format(dem_input, str(pc_model_found)))

        # define paths and create working directories
        odm_dem_root = tree.path('odm_dem')
        if not io.dir_exists(odm_dem_root):
            system.mkdir_p(odm_dem_root)

        if args.pc_classify and pc_model_found:
            pc_classify_marker = os.path.join(odm_dem_root, 'pc_classify_done.txt')

            if not io.file_exists(pc_classify_marker) or self.rerun():
                log.ODM_INFO("Classifying {} using Simple Morphological Filter".format(dem_input))
                commands.classify(dem_input,
                                  args.smrf_scalar, 
                                  args.smrf_slope, 
                                  args.smrf_threshold, 
                                  args.smrf_window,
                                  verbose=args.verbose
                                )

                with open(pc_classify_marker, 'w') as f:
                    f.write('Classify: smrf\n')
                    f.write('Scalar: {}\n'.format(args.smrf_scalar))
                    f.write('Slope: {}\n'.format(args.smrf_slope))
                    f.write('Threshold: {}\n'.format(args.smrf_threshold))
                    f.write('Window: {}\n'.format(args.smrf_window))
            
        progress = 20
        self.update_progress(progress)

        if args.pc_rectify:
            commands.rectify(dem_input, args.debug)

        # Do we need to process anything here?
        if (args.dsm or args.dtm) and pc_model_found:
            dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif')
            dtm_output_filename = os.path.join(odm_dem_root, 'dtm.tif')

            if (args.dtm and not io.file_exists(dtm_output_filename)) or \
                (args.dsm and not io.file_exists(dsm_output_filename)) or \
                self.rerun():

                products = []

                if args.dsm or (args.dtm and args.dem_euclidean_map): products.append('dsm')
                if args.dtm: products.append('dtm')

                radius_steps = [(resolution / 100.0) / 2.0]
                for _ in range(args.dem_gapfill_steps - 1):
                    radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value?

                for product in products:
                    commands.create_dem(
                            dem_input,
                            product,
                            output_type='idw' if product == 'dtm' else 'max',
                            radiuses=list(map(str, radius_steps)),
                            gapfill=args.dem_gapfill_steps > 0,
                            outdir=odm_dem_root,
                            resolution=resolution / 100.0,
                            decimation=args.dem_decimation,
                            verbose=args.verbose,
                            max_workers=args.max_concurrency,
                            keep_unfilled_copy=args.dem_euclidean_map
                        )

                    dem_geotiff_path = os.path.join(odm_dem_root, "{}.tif".format(product))
                    bounds_file_path = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')

                    if args.crop > 0:
                        # Crop DEM
                        Cropper.crop(bounds_file_path, dem_geotiff_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)

                    if args.dem_euclidean_map:
                        unfilled_dem_path = io.related_file_path(dem_geotiff_path, postfix=".unfilled")
                        
                        if args.crop > 0:
                            # Crop unfilled DEM
                            Cropper.crop(bounds_file_path, unfilled_dem_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space)

                        commands.compute_euclidean_map(unfilled_dem_path, 
                                            io.related_file_path(dem_geotiff_path, postfix=".euclideand"), 
                                            overwrite=True)

                    if pseudo_georeference:
                        # 0.1 is arbitrary
                        pseudogeo.add_pseudo_georeferencing(dem_geotiff_path, 0.1)
                    
                    if pseudo_georeference:
                        pseudogeo.add_pseudo_georeferencing(dem_geotiff_path)

                    if args.tiles:
                        generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency)
                    
                    progress += 30
                    self.update_progress(progress)
            else:
                log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root)
        else:
            log.ODM_WARNING('DEM will not be generated')
Ejemplo n.º 19
0
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start MVE')
            exit(1)

        # check if reconstruction was done before
        if not io.file_exists(tree.mve_model) or self.rerun():
            # cleanup if a rerun
            if io.dir_exists(tree.mve_path) and self.rerun():
                shutil.rmtree(tree.mve_path)

            # make bundle directory
            if not io.file_exists(tree.mve_bundle):
                system.mkdir_p(tree.mve_path)
                system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))

                octx = OSFMContext(tree.opensfm)
                octx.save_absolute_image_list_to(tree.mve_image_list)
                io.copy(tree.opensfm_bundle, tree.mve_bundle)

            # mve makescene wants the output directory
            # to not exists before executing it (otherwise it
            # will prompt the user for confirmation)
            if io.dir_exists(tree.mve):
                shutil.rmtree(tree.mve)

            # run mve makescene
            if not io.dir_exists(tree.mve_views):
                system.run('%s %s %s' %
                           (context.makescene_path, tree.mve_path, tree.mve),
                           env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            # Compute mve output scale based on depthmap_resolution
            max_width = 0
            max_height = 0
            for photo in photos:
                max_width = max(photo.width, max_width)
                max_height = max(photo.height, max_height)

            max_pixels = args.depthmap_resolution * args.depthmap_resolution
            if max_width * max_height <= max_pixels:
                mve_output_scale = 0
            else:
                ratio = float(max_width * max_height) / float(max_pixels)
                mve_output_scale = int(
                    math.ceil(math.log(ratio) / math.log(4.0)))

            dmrecon_config = [
                "-s%s" % mve_output_scale,
                "--progress=silent",
                "--local-neighbors=2",
            ]

            # Run MVE's dmrecon
            log.ODM_INFO(
                '                                                                               '
            )
            log.ODM_INFO(
                '                                    ,*/**                                      '
            )
            log.ODM_INFO(
                '                                  ,*@%*/@%*                                    '
            )
            log.ODM_INFO(
                '                                ,/@%******@&*.                                 '
            )
            log.ODM_INFO(
                '                              ,*@&*********/@&*                                '
            )
            log.ODM_INFO(
                '                            ,*@&**************@&*                              '
            )
            log.ODM_INFO(
                '                          ,/@&******************@&*.                           '
            )
            log.ODM_INFO(
                '                        ,*@&*********************/@&*                          '
            )
            log.ODM_INFO(
                '                      ,*@&**************************@&*.                       '
            )
            log.ODM_INFO(
                '                    ,/@&******************************&&*,                     '
            )
            log.ODM_INFO(
                '                  ,*&&**********************************@&*.                   '
            )
            log.ODM_INFO(
                '                ,*@&**************************************@&*.                 '
            )
            log.ODM_INFO(
                '              ,*@&***************#@@@@@@@@@%****************&&*,               '
            )
            log.ODM_INFO(
                '            .*&&***************&@@@@@@@@@@@@@@****************@@*.             '
            )
            log.ODM_INFO(
                '          .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*.           '
            )
            log.ODM_INFO(
                '        .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*,         '
            )
            log.ODM_INFO(
                '      .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*.       '
            )
            log.ODM_INFO(
                '    .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*.     '
            )
            log.ODM_INFO(
                '  .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*,   '
            )
            log.ODM_INFO(
                ' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(,  '
            )
            log.ODM_INFO(
                ' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&*   '
            )
            log.ODM_INFO(
                '   *#@/*******************************@@@@@***&@&**********************&@*,    '
            )
            log.ODM_INFO(
                '     *#@#******************************&@@@***@#*********************&@*,      '
            )
            log.ODM_INFO(
                '       */@#*****************************@@@************************@@*.        '
            )
            log.ODM_INFO(
                '         *#@/***************************/@@/*********************%@*,          '
            )
            log.ODM_INFO(
                '           *#@#**************************#@@%******************%@*,            '
            )
            log.ODM_INFO(
                '             */@#*************************(@@@@@@@&%/********&@*.              '
            )
            log.ODM_INFO(
                '               *(@(*********************************/%@@%**%@*,                '
            )
            log.ODM_INFO(
                '                 *(@%************************************%@**                  '
            )
            log.ODM_INFO(
                '                   **@%********************************&@*,                    '
            )
            log.ODM_INFO(
                '                     *(@(****************************%@/*                      '
            )
            log.ODM_INFO(
                '                       ,(@%************************#@/*                        '
            )
            log.ODM_INFO(
                '                         ,*@%********************&@/,                          '
            )
            log.ODM_INFO(
                '                           */@#****************#@/*                            '
            )
            log.ODM_INFO(
                '                             ,/@&************#@/*                              '
            )
            log.ODM_INFO(
                '                               ,*@&********%@/,                                '
            )
            log.ODM_INFO(
                '                                 */@#****(@/*                                  '
            )
            log.ODM_INFO(
                '                                   ,/@@@@(*                                    '
            )
            log.ODM_INFO(
                '                                     .**,                                      '
            )
            log.ODM_INFO('')
            log.ODM_INFO(
                "Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung."
            )
            log.ODM_INFO("                              Process is running")

            # TODO: find out why MVE is crashing at random
            # https://gist.github.com/pierotofy/c49447e86a187e8ede50fb302cf5a47b
            # MVE *seems* to have a race condition, triggered randomly, regardless of dataset
            # size. Core dump stack trace points to patch_sampler.cc:109.
            # Hard to reproduce. Removing -03 optimizations from dmrecon
            # seems to reduce the chances of hitting the bug.
            # Temporary workaround is to retry the reconstruction until we get it right
            # (up to a certain number of retries).
            retry_count = 1
            while retry_count < 10:
                try:
                    system.run(
                        '%s %s %s' % (context.dmrecon_path,
                                      ' '.join(dmrecon_config), tree.mve),
                        env_vars={'OMP_NUM_THREADS': args.max_concurrency})
                    break
                except Exception as e:
                    if str(e) == "Child returned 134" or str(
                            e) == "Child returned 1":
                        retry_count += 1
                        log.ODM_WARNING(
                            "Caught error code, retrying attempt #%s" %
                            retry_count)
                    else:
                        raise e

            scene2pset_config = ["-F%s" % mve_output_scale]

            # run scene2pset
            system.run('%s %s "%s" "%s"' %
                       (context.scene2pset_path, ' '.join(scene2pset_config),
                        tree.mve, tree.mve_model),
                       env_vars={'OMP_NUM_THREADS': args.max_concurrency})
        else:
            log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' %
                            tree.mve_model)
Ejemplo n.º 20
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        class nonloc:
            runs = []

        def add_run(nvm_file, primary=True, band=None):
            subdir = ""
            if not primary and band is not None:
                subdir = band

            if not args.skip_3dmodel and (primary or args.use_3dmesh):
                nonloc.runs += [{
                    'out_dir': os.path.join(tree.odm_texturing, subdir),
                    'model': tree.odm_mesh,
                    'nadir': False,
                    'nvm_file': nvm_file
                }]

            if not args.use_3dmesh:
                nonloc.runs += [{
                    'out_dir': os.path.join(tree.odm_25dtexturing, subdir),
                    'model': tree.odm_25dmesh,
                    'nadir': True,
                    'nvm_file': nvm_file
                }]

        if reconstruction.multi_camera:
            for band in reconstruction.multi_camera:
                primary = band == reconstruction.multi_camera[0]
                nvm_file = os.path.join(tree.opensfm, "undistorted", "reconstruction_%s.nvm" % band['name'].lower())
                add_run(nvm_file, primary, band['name'].lower())
        else:
            add_run(tree.opensfm_reconstruction_nvm)

        progress_per_run = 100.0 / len(nonloc.runs)
        progress = 0.0

        for r in nonloc.runs:
            if not io.dir_exists(r['out_dir']):
                system.mkdir_p(r['out_dir'])

            odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj)

            if not io.file_exists(odm_textured_model_obj) or self.rerun():
                log.ODM_INFO('Writing MVS Textured file in: %s'
                              % odm_textured_model_obj)

                # Format arguments to fit Mvs-Texturing app
                skipGeometricVisibilityTest = ""
                skipGlobalSeamLeveling = ""
                skipLocalSeamLeveling = ""
                skipHoleFilling = ""
                keepUnseenFaces = ""
                nadir = ""

                if (self.params.get('skip_vis_test')):
                    skipGeometricVisibilityTest = "--skip_geometric_visibility_test"
                if (self.params.get('skip_glob_seam_leveling')):
                    skipGlobalSeamLeveling = "--skip_global_seam_leveling"
                if (self.params.get('skip_loc_seam_leveling')):
                    skipLocalSeamLeveling = "--skip_local_seam_leveling"
                if (self.params.get('skip_hole_fill')):
                    skipHoleFilling = "--skip_hole_filling"
                if (self.params.get('keep_unseen_faces')):
                    keepUnseenFaces = "--keep_unseen_faces"
                if (r['nadir']):
                    nadir = '--nadir_mode'

                # mvstex definitions
                kwargs = {
                    'bin': context.mvstex_path,
                    'out_dir': io.join_paths(r['out_dir'], "odm_textured_model"),
                    'model': r['model'],
                    'dataTerm': self.params.get('data_term'),
                    'outlierRemovalType': self.params.get('outlier_rem_type'),
                    'skipGeometricVisibilityTest': skipGeometricVisibilityTest,
                    'skipGlobalSeamLeveling': skipGlobalSeamLeveling,
                    'skipLocalSeamLeveling': skipLocalSeamLeveling,
                    'skipHoleFilling': skipHoleFilling,
                    'keepUnseenFaces': keepUnseenFaces,
                    'toneMapping': self.params.get('tone_mapping'),
                    'nadirMode': nadir,
                    'nadirWeight': 2 ** args.texturing_nadir_weight - 1,
                    'nvm_file': r['nvm_file']
                }

                mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp')

                # Make sure tmp directory is empty
                if io.dir_exists(mvs_tmp_dir):
                    log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir))
                    shutil.rmtree(mvs_tmp_dir)

                # run texturing binary
                system.run('{bin} {nvm_file} {model} {out_dir} '
                        '-d {dataTerm} -o {outlierRemovalType} '
                        '-t {toneMapping} '
                        '{skipGeometricVisibilityTest} '
                        '{skipGlobalSeamLeveling} '
                        '{skipLocalSeamLeveling} '
                        '{skipHoleFilling} '
                        '{keepUnseenFaces} '
                        '{nadirMode} '
                        '-n {nadirWeight}'.format(**kwargs))
                
                progress += progress_per_run
                self.update_progress(progress)
            else:
                log.ODM_WARNING('Found a valid ODM Texture file in: %s'
                                % odm_textured_model_obj)
Ejemplo n.º 21
0
    def setup(self,
              args,
              images_path,
              reconstruction,
              append_config=[],
              rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = os.path.join(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            if reconstruction.multi_camera:
                photos = get_photos_by_band(reconstruction.multi_camera,
                                            args.primary_band)
                if len(photos) < 1:
                    raise Exception("Not enough images in selected band %s" %
                                    args.primary_band.lower())
                log.ODM_INFO("Reconstruction will use %s images from %s band" %
                             (len(photos), args.primary_band.lower()))
            else:
                photos = reconstruction.photos

            # create file list
            num_zero_alt = 0
            has_alt = True
            has_gps = False
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if photo.altitude is None:
                        has_alt = False
                    elif photo.altitude == 0:
                        num_zero_alt += 1
                    if photo.latitude is not None and photo.longitude is not None:
                        has_gps = True

                    fout.write('%s\n' %
                               os.path.join(images_path, photo.filename))

            # check 0 altitude images percentage when has_alt is True
            if has_alt and num_zero_alt / len(photos) > 0.05:
                log.ODM_WARNING(
                    "More than 5% of images have zero altitude, this might be an indicator that the images have no altitude information"
                )
                has_alt = False

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path,
                                             "image_groups.txt")
            if 'split_image_groups_is_set' in args:
                image_groups_file = os.path.abspath(args.split_image_groups)

            if io.file_exists(image_groups_file):
                dst_groups_file = os.path.join(self.opensfm_project_path,
                                               "image_groups.txt")
                io.copy(image_groups_file, dst_groups_file)
                log.ODM_INFO("Copied %s to %s" %
                             (image_groups_file, dst_groups_file))

            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(
                        args.cameras)
                    with open(
                            os.path.join(self.opensfm_project_path,
                                         "camera_models_overrides.json"),
                            'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO(
                        "Wrote camera_models_overrides.json to OpenSfM directory"
                    )
                except Exception as e:
                    log.ODM_WARNING(
                        "Cannot set camera_models_overrides.json: %s" % str(e))

            # Check image masks
            masks = []
            for p in photos:
                if p.mask is not None:
                    masks.append(
                        (p.filename, os.path.join(images_path, p.mask)))

            if masks:
                log.ODM_INFO("Found %s image masks" % len(masks))
                with open(
                        os.path.join(self.opensfm_project_path,
                                     "mask_list.txt"), 'w') as f:
                    for fname, mask in masks:
                        f.write("{} {}\n".format(fname, mask))

            # Compute feature_process_size
            feature_process_size = 2048  # default

            if ('resize_to_is_set' in args) and args.resize_to > 0:
                # Legacy
                log.ODM_WARNING(
                    "Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead."
                )
                feature_process_size = int(args.resize_to)
            else:
                feature_quality_scale = {
                    'ultra': 1,
                    'high': 0.5,
                    'medium': 0.25,
                    'low': 0.125,
                    'lowest': 0.0675,
                }

                max_dim = find_largest_photo_dim(photos)

                if max_dim > 0:
                    log.ODM_INFO("Maximum photo dimensions: %spx" %
                                 str(max_dim))
                    feature_process_size = int(
                        max_dim * feature_quality_scale[args.feature_quality])
                    log.ODM_INFO(
                        "Photo dimensions for feature extraction: %ipx" %
                        feature_process_size)
                else:
                    log.ODM_WARNING(
                        "Cannot compute max image dimensions, going with defaults"
                    )

            # create config file for OpenSfM
            if args.matcher_neighbors > 0:
                matcher_graph_rounds = 0
                matcher_neighbors = args.matcher_neighbors
            else:
                matcher_graph_rounds = 50
                matcher_neighbors = 0

            config = [
                "use_exif_size: no",
                "flann_algorithm: KDTREE",  # more stable, faster than KMEANS
                "feature_process_size: %s" % feature_process_size,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % matcher_neighbors,
                "matching_gps_distance: 0",
                "matching_graph_rounds: %s" % matcher_graph_rounds,
                "optimize_camera_parameters: %s" %
                ('no' if args.use_fixed_camera_params else 'yes'),
                "reconstruction_algorithm: %s" % (args.sfm_algorithm),
                "undistorted_image_format: tif",
                "bundle_outlier_filtering_type: AUTO",
                "sift_peak_threshold: 0.066",
                "align_orientation_prior: vertical",
                "triangulation_type: ROBUST",
                "retriangulation_ratio: 2",
            ]

            if args.camera_lens != 'auto':
                config.append("camera_projection_type: %s" %
                              args.camera_lens.upper())

            matcher_type = args.matcher_type
            feature_type = args.feature_type.upper()

            osfm_matchers = {
                "bow": "WORDS",
                "flann": "FLANN",
                "bruteforce": "BRUTEFORCE"
            }

            if not has_gps and not 'matcher_type_is_set' in args:
                log.ODM_INFO(
                    "No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)"
                )
                matcher_type = "bow"

            if matcher_type == "bow":
                # Cannot use anything other than HAHOG with BOW
                if feature_type != "HAHOG":
                    log.ODM_WARNING(
                        "Using BOW matching, will use HAHOG feature type, not SIFT"
                    )
                    feature_type = "HAHOG"

            config.append("matcher_type: %s" % osfm_matchers[matcher_type])

            # GPU acceleration?
            if has_gpu(args):
                max_photo = find_largest_photo(photos)
                w, h = max_photo.width, max_photo.height
                if w > h:
                    h = int((h / w) * feature_process_size)
                    w = int(feature_process_size)
                else:
                    w = int((w / h) * feature_process_size)
                    h = int(feature_process_size)

                if has_popsift_and_can_handle_texsize(
                        w, h) and feature_type == "SIFT":
                    log.ODM_INFO("Using GPU for extracting SIFT features")
                    feature_type = "SIFT_GPU"
                    self.gpu_sift_feature_extraction = True

            config.append("feature_type: %s" % feature_type)

            if has_alt:
                log.ODM_INFO(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            gcp_path = reconstruction.gcp.gcp_path
            if has_alt or gcp_path:
                config.append("align_method: auto")
            else:
                config.append("align_method: orientation_prior")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment
            else:
                config.append("local_bundle_radius: 0")

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                if not args.force_gps:
                    config.append("bundle_use_gps: no")
                else:
                    config.append("bundle_compensate_gps_bias: yes")

                io.copy(gcp_path, self.path("gcp_list.txt"))

            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))

            # We impose our own reference_lla
            if reconstruction.is_georeferenced():
                self.write_reference_lla(
                    reconstruction.georef.utm_east_offset,
                    reconstruction.georef.utm_north_offset,
                    reconstruction.georef.proj4())
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" %
                            list_path)
Ejemplo n.º 22
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        outputs['large'] = len(photos) > args.split

        if outputs['large']:
            # If we have a cluster address, we'll use a distributed workflow
            local_workflow = not bool(args.sm_cluster)

            octx = OSFMContext(tree.opensfm)
            split_done_file = octx.path("split_done.txt")

            if not io.file_exists(split_done_file) or self.rerun():
                orig_max_concurrency = args.max_concurrency
                if not local_workflow:
                    args.max_concurrency = max(1, args.max_concurrency - 1)
                    log.ODM_INFO(
                        "Setting max-concurrency to %s to better handle remote splits"
                        % args.max_concurrency)

                log.ODM_INFO(
                    "Large dataset detected (%s photos) and split set at %s. Preparing split merge."
                    % (len(photos), args.split))
                config = [
                    "submodels_relpath: ../submodels/opensfm",
                    "submodel_relpath_template: ../submodels/submodel_%04d/opensfm",
                    "submodel_images_relpath_template: ../submodels/submodel_%04d/images",
                    "submodel_size: %s" % args.split,
                    "submodel_overlap: %s" % args.split_overlap,
                ]

                octx.setup(args,
                           tree.dataset_raw,
                           photos,
                           gcp_path=reconstruction.gcp.gcp_path,
                           append_config=config,
                           rerun=self.rerun())
                octx.extract_metadata(self.rerun())

                self.update_progress(5)

                if local_workflow:
                    octx.feature_matching(self.rerun())

                self.update_progress(20)

                # Create submodels
                if not io.dir_exists(tree.submodels_path) or self.rerun():
                    if io.dir_exists(tree.submodels_path):
                        log.ODM_WARNING(
                            "Removing existing submodels directory: %s" %
                            tree.submodels_path)
                        shutil.rmtree(tree.submodels_path)

                    octx.run("create_submodels")
                else:
                    log.ODM_WARNING(
                        "Submodels directory already exist at: %s" %
                        tree.submodels_path)

                # Find paths of all submodels
                mds = metadataset.MetaDataSet(tree.opensfm)
                submodel_paths = [
                    os.path.abspath(p) for p in mds.get_submodel_paths()
                ]

                for sp in submodel_paths:
                    sp_octx = OSFMContext(sp)

                    # Copy filtered GCP file if needed
                    # One in OpenSfM's directory, one in the submodel project directory
                    if reconstruction.gcp and reconstruction.gcp.exists():
                        submodel_gcp_file = os.path.abspath(
                            sp_octx.path("..", "gcp_list.txt"))
                        submodel_images_dir = os.path.abspath(
                            sp_octx.path("..", "images"))

                        if reconstruction.gcp.make_filtered_copy(
                                submodel_gcp_file, submodel_images_dir):
                            log.ODM_INFO("Copied filtered GCP file to %s" %
                                         submodel_gcp_file)
                            io.copy(
                                submodel_gcp_file,
                                os.path.abspath(sp_octx.path("gcp_list.txt")))
                        else:
                            log.ODM_INFO(
                                "No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP"
                                % sp_octx.name())

                # Reconstruct each submodel
                log.ODM_INFO(
                    "Dataset has been split into %s submodels. Reconstructing each submodel..."
                    % len(submodel_paths))
                self.update_progress(25)

                if local_workflow:
                    for sp in submodel_paths:
                        log.ODM_INFO("Reconstructing %s" % sp)
                        OSFMContext(sp).reconstruct(self.rerun())
                else:
                    lre = LocalRemoteExecutor(args.sm_cluster, self.rerun())
                    lre.set_projects([
                        os.path.abspath(os.path.join(p, ".."))
                        for p in submodel_paths
                    ])
                    lre.run_reconstruction()

                self.update_progress(50)

                # Align
                octx.align_reconstructions(self.rerun())

                self.update_progress(55)

                # Aligned reconstruction is in reconstruction.aligned.json
                # We need to rename it to reconstruction.json
                remove_paths = []
                for sp in submodel_paths:
                    sp_octx = OSFMContext(sp)

                    aligned_recon = sp_octx.path('reconstruction.aligned.json')
                    unaligned_recon = sp_octx.path(
                        'reconstruction.unaligned.json')
                    main_recon = sp_octx.path('reconstruction.json')

                    if io.file_exists(main_recon) and io.file_exists(
                            unaligned_recon) and not self.rerun():
                        log.ODM_INFO("Submodel %s has already been aligned." %
                                     sp_octx.name())
                        continue

                    if not io.file_exists(aligned_recon):
                        log.ODM_WARNING(
                            "Submodel %s does not have an aligned reconstruction (%s). "
                            "This could mean that the submodel could not be reconstructed "
                            " (are there enough features to reconstruct it?). Skipping."
                            % (sp_octx.name(), aligned_recon))
                        remove_paths.append(sp)
                        continue

                    if io.file_exists(main_recon):
                        shutil.move(main_recon, unaligned_recon)

                    shutil.move(aligned_recon, main_recon)
                    log.ODM_INFO("%s is now %s" % (aligned_recon, main_recon))

                # Remove invalid submodels
                submodel_paths = [
                    p for p in submodel_paths if not p in remove_paths
                ]

                # Run ODM toolchain for each submodel
                if local_workflow:
                    for sp in submodel_paths:
                        sp_octx = OSFMContext(sp)

                        log.ODM_INFO("========================")
                        log.ODM_INFO("Processing %s" % sp_octx.name())
                        log.ODM_INFO("========================")

                        argv = get_submodel_argv(args.name,
                                                 tree.submodels_path,
                                                 sp_octx.name())

                        # Re-run the ODM toolchain on the submodel
                        system.run(" ".join(map(quote, argv)),
                                   env_vars=os.environ.copy())
                else:
                    lre.set_projects([
                        os.path.abspath(os.path.join(p, ".."))
                        for p in submodel_paths
                    ])
                    lre.run_toolchain()

                # Restore max_concurrency value
                args.max_concurrency = orig_max_concurrency

                octx.touch(split_done_file)
            else:
                log.ODM_WARNING('Found a split done file in: %s' %
                                split_done_file)
        else:
            log.ODM_INFO("Normal dataset, will process all at once.")
            self.progress = 0.0
Ejemplo n.º 23
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
            exit(1)

        octx = OSFMContext(tree.opensfm)
        octx.setup(args,
                   tree.dataset_raw,
                   photos,
                   gcp_path=reconstruction.gcp.gcp_path,
                   rerun=self.rerun())
        octx.extract_metadata(self.rerun())
        self.update_progress(20)
        octx.feature_matching(self.rerun())
        self.update_progress(30)
        octx.reconstruct(self.rerun())
        self.update_progress(70)

        # If we find a special flag file for split/merge we stop right here
        if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")):
            log.ODM_INFO("Stopping OpenSfM early because we found: %s" %
                         octx.path("split_merge_stop_at_reconstruction.txt"))
            self.next_stage = None
            return

        if args.fast_orthophoto:
            output_file = octx.path('reconstruction.ply')
        elif args.use_opensfm_dense:
            output_file = tree.opensfm_model
        else:
            output_file = tree.opensfm_reconstruction

        updated_config_flag_file = octx.path('updated_config.txt')
        if not io.file_exists(updated_config_flag_file) or self.rerun():
            octx.update_config({
                'undistorted_image_max_size':
                gsd.image_max_size(photos,
                                   args.orthophoto_resolution,
                                   tree.opensfm_reconstruction,
                                   ignore_gsd=args.ignore_gsd)
            })
            octx.touch(updated_config_flag_file)

        # These will be used for texturing
        undistorted_images_path = octx.path("undistorted")

        if not io.dir_exists(undistorted_images_path) or self.rerun():
            octx.run('undistort')
        else:
            log.ODM_WARNING("Found an undistorted directory in %s" %
                            undistorted_images_path)

        self.update_progress(80)

        if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun():
            octx.run('export_visualsfm --undistorted')
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM NVM reconstruction file in: %s' %
                tree.opensfm_reconstruction_nvm)

        self.update_progress(85)

        # Skip dense reconstruction if necessary and export
        # sparse reconstruction instead
        if args.fast_orthophoto:
            if not io.file_exists(output_file) or self.rerun():
                octx.run('export_ply --no-cameras')
            else:
                log.ODM_WARNING("Found a valid PLY reconstruction in %s" %
                                output_file)

        elif args.use_opensfm_dense:
            if not io.file_exists(output_file) or self.rerun():
                octx.run('compute_depthmaps')
            else:
                log.ODM_WARNING("Found a valid dense reconstruction in %s" %
                                output_file)

        # check if reconstruction was exported to bundler before
        octx.export_bundler(tree.opensfm_bundle_list, self.rerun())

        self.update_progress(90)

        if reconstruction.is_georeferenced() and (not io.file_exists(
                tree.opensfm_transformation) or self.rerun()):
            octx.run('export_geocoords --transformation --proj \'%s\'' %
                     reconstruction.georef.proj4())
        else:
            log.ODM_WARNING("Will skip exporting %s" %
                            tree.opensfm_transformation)
Ejemplo n.º 24
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
            exit(1)

        octx = OSFMContext(tree.opensfm)
        octx.setup(args,
                   tree.dataset_raw,
                   reconstruction=reconstruction,
                   rerun=self.rerun())
        octx.extract_metadata(self.rerun())
        self.update_progress(20)
        octx.feature_matching(self.rerun())
        self.update_progress(30)
        octx.reconstruct(self.rerun())
        octx.extract_cameras(tree.path("cameras.json"), self.rerun())
        self.update_progress(70)

        if args.optimize_disk_space:
            for folder in ["features", "matches", "exif", "reports"]:
                folder_path = octx.path(folder)
                if os.path.islink(folder_path):
                    os.unlink(folder_path)
                else:
                    shutil.rmtree(folder_path)

        # If we find a special flag file for split/merge we stop right here
        if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")):
            log.ODM_INFO("Stopping OpenSfM early because we found: %s" %
                         octx.path("split_merge_stop_at_reconstruction.txt"))
            self.next_stage = None
            return

        updated_config_flag_file = octx.path('updated_config.txt')

        # Make sure it's capped by the depthmap-resolution arg,
        # since the undistorted images are used for MVS
        outputs['undist_image_max_size'] = max(
            gsd.image_max_size(photos,
                               args.orthophoto_resolution,
                               tree.opensfm_reconstruction,
                               ignore_gsd=args.ignore_gsd,
                               has_gcp=reconstruction.has_gcp()),
            get_depthmap_resolution(args, photos))

        if not io.file_exists(updated_config_flag_file) or self.rerun():
            octx.update_config({
                'undistorted_image_max_size':
                outputs['undist_image_max_size']
            })
            octx.touch(updated_config_flag_file)

        # Undistorted images will be used for texturing / MVS

        alignment_info = None
        primary_band_name = None
        undistort_pipeline = []

        def undistort_callback(shot_id, image):
            for func in undistort_pipeline:
                image = func(shot_id, image)
            return image

        def radiometric_calibrate(shot_id, image):
            photo = reconstruction.get_photo(shot_id)
            return multispectral.dn_to_reflectance(
                photo,
                image,
                use_sun_sensor=args.radiometric_calibration == "camera+sun")

        def align_to_primary_band(shot_id, image):
            photo = reconstruction.get_photo(shot_id)

            # No need to align if requested by user
            if args.skip_band_alignment:
                return image

            # No need to align primary
            if photo.band_name == primary_band_name:
                return image

            ainfo = alignment_info.get(photo.band_name)
            if ainfo is not None:
                return multispectral.align_image(image, ainfo['warp_matrix'],
                                                 ainfo['dimension'])
            else:
                log.ODM_WARNING(
                    "Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected."
                    % (shot_id))
                return image

        if args.radiometric_calibration != "none":
            undistort_pipeline.append(radiometric_calibrate)

        image_list_override = None

        if reconstruction.multi_camera:

            # Undistort only secondary bands
            image_list_override = [
                os.path.join(tree.dataset_raw, p.filename) for p in photos
            ]  # if p.band_name.lower() != primary_band_name.lower()

            # We backup the original reconstruction.json, tracks.csv
            # then we augment them by duplicating the primary band
            # camera shots with each band, so that exports, undistortion,
            # etc. include all bands
            # We finally restore the original files later

            added_shots_file = octx.path('added_shots_done.txt')

            if not io.file_exists(added_shots_file) or self.rerun():
                primary_band_name = multispectral.get_primary_band_name(
                    reconstruction.multi_camera, args.primary_band)
                s2p, p2s = multispectral.compute_band_maps(
                    reconstruction.multi_camera, primary_band_name)

                if not args.skip_band_alignment:
                    alignment_info = multispectral.compute_alignment_matrices(
                        reconstruction.multi_camera,
                        primary_band_name,
                        tree.dataset_raw,
                        s2p,
                        p2s,
                        max_concurrency=args.max_concurrency)
                else:
                    log.ODM_WARNING("Skipping band alignment")
                    alignment_info = {}

                log.ODM_INFO("Adding shots to reconstruction")

                octx.backup_reconstruction()
                octx.add_shots_to_reconstruction(p2s)
                octx.touch(added_shots_file)

            undistort_pipeline.append(align_to_primary_band)

        octx.convert_and_undistort(self.rerun(), undistort_callback,
                                   image_list_override)

        self.update_progress(80)

        if reconstruction.multi_camera:
            octx.restore_reconstruction_backup()

            # Undistort primary band and write undistorted
            # reconstruction.json, tracks.csv
            octx.convert_and_undistort(self.rerun(),
                                       undistort_callback,
                                       runId='primary')

        if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun():
            octx.run('export_visualsfm --points')
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM NVM reconstruction file in: %s' %
                tree.opensfm_reconstruction_nvm)

        if reconstruction.multi_camera:
            log.ODM_INFO("Multiple bands found")

            # Write NVM files for the various bands
            for band in reconstruction.multi_camera:
                nvm_file = octx.path(
                    "undistorted",
                    "reconstruction_%s.nvm" % band['name'].lower())

                if not io.file_exists(nvm_file) or self.rerun():
                    img_map = {}

                    if primary_band_name is None:
                        primary_band_name = multispectral.get_primary_band_name(
                            reconstruction.multi_camera, args.primary_band)
                    if p2s is None:
                        s2p, p2s = multispectral.compute_band_maps(
                            reconstruction.multi_camera, primary_band_name)

                    for fname in p2s:

                        # Primary band maps to itself
                        if band['name'] == primary_band_name:
                            img_map[fname + '.tif'] = fname + '.tif'
                        else:
                            band_filename = next(
                                (p.filename for p in p2s[fname]
                                 if p.band_name == band['name']), None)

                            if band_filename is not None:
                                img_map[fname +
                                        '.tif'] = band_filename + '.tif'
                            else:
                                log.ODM_WARNING(
                                    "Cannot find %s band equivalent for %s" %
                                    (band, fname))

                    nvm.replace_nvm_images(tree.opensfm_reconstruction_nvm,
                                           img_map, nvm_file)
                else:
                    log.ODM_WARNING("Found existing NVM file %s" % nvm_file)

        self.update_progress(85)

        # Skip dense reconstruction if necessary and export
        # sparse reconstruction instead
        if args.fast_orthophoto:
            output_file = octx.path('reconstruction.ply')

            if not io.file_exists(output_file) or self.rerun():
                octx.run('export_ply --no-cameras')
            else:
                log.ODM_WARNING("Found a valid PLY reconstruction in %s" %
                                output_file)

        elif args.use_opensfm_dense:
            output_file = tree.opensfm_model

            if not io.file_exists(output_file) or self.rerun():
                octx.run('compute_depthmaps')
            else:
                log.ODM_WARNING("Found a valid dense reconstruction in %s" %
                                output_file)

        self.update_progress(90)

        if reconstruction.is_georeferenced() and (not io.file_exists(
                tree.opensfm_transformation) or self.rerun()):
            octx.run('export_geocoords --transformation --proj \'%s\'' %
                     reconstruction.georef.proj4())
        else:
            log.ODM_WARNING("Will skip exporting %s" %
                            tree.opensfm_transformation)

        if args.optimize_disk_space:
            os.remove(octx.path("tracks.csv"))
            if io.file_exists(octx.recon_backup_file()):
                os.remove(octx.recon_backup_file())

            if io.dir_exists(octx.path("undistorted", "depthmaps")):
                files = glob.glob(
                    octx.path("undistorted", "depthmaps", "*.npz"))
                for f in files:
                    os.remove(f)

            # Keep these if using OpenMVS
            if args.fast_orthophoto or args.use_opensfm_dense:
                files = [
                    octx.path("undistorted", "tracks.csv"),
                    octx.path("undistorted", "reconstruction.json")
                ]
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
Ejemplo n.º 25
0
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start MVE')
            exit(1)

        # check if reconstruction was done before
        if not io.file_exists(tree.mve_model) or self.rerun():
            # cleanup if a rerun
            if io.dir_exists(tree.mve_path) and self.rerun():
                shutil.rmtree(tree.mve_path)

            # make bundle directory
            if not io.file_exists(tree.mve_bundle):
                system.mkdir_p(tree.mve_path)
                system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))

                octx = OSFMContext(tree.opensfm)
                octx.save_absolute_image_list_to(tree.mve_image_list)
                io.copy(tree.opensfm_bundle, tree.mve_bundle)

            # mve makescene wants the output directory
            # to not exists before executing it (otherwise it
            # will prompt the user for confirmation)
            if io.dir_exists(tree.mve):
                shutil.rmtree(tree.mve)

            # run mve makescene
            if not io.dir_exists(tree.mve_views):
                system.run('%s "%s" "%s"' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            self.update_progress(10)

            # Compute mve output scale based on depthmap_resolution
            max_width = 0
            max_height = 0
            for photo in photos:
                max_width = max(photo.width, max_width)
                max_height = max(photo.height, max_height)

            max_pixels = args.depthmap_resolution * args.depthmap_resolution
            if max_width * max_height <= max_pixels:
                mve_output_scale = 0
            else:
                ratio = float(max_width * max_height) / float(max_pixels)
                mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0)))

            dmrecon_config = [
                "-s%s" % mve_output_scale,
	            "--progress=silent",
                "--local-neighbors=2",
                # "--filter-width=3",
            ]

            # Run MVE's dmrecon
            log.ODM_INFO('                                                                               ')
            log.ODM_INFO('                                    ,*/**                                      ')
            log.ODM_INFO('                                  ,*@%*/@%*                                    ')
            log.ODM_INFO('                                ,/@%******@&*.                                 ')
            log.ODM_INFO('                              ,*@&*********/@&*                                ')
            log.ODM_INFO('                            ,*@&**************@&*                              ')
            log.ODM_INFO('                          ,/@&******************@&*.                           ')
            log.ODM_INFO('                        ,*@&*********************/@&*                          ')
            log.ODM_INFO('                      ,*@&**************************@&*.                       ')
            log.ODM_INFO('                    ,/@&******************************&&*,                     ')
            log.ODM_INFO('                  ,*&&**********************************@&*.                   ')
            log.ODM_INFO('                ,*@&**************************************@&*.                 ')
            log.ODM_INFO('              ,*@&***************#@@@@@@@@@%****************&&*,               ')
            log.ODM_INFO('            .*&&***************&@@@@@@@@@@@@@@****************@@*.             ')
            log.ODM_INFO('          .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*.           ')
            log.ODM_INFO('        .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*,         ')
            log.ODM_INFO('      .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*.       ')
            log.ODM_INFO('    .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*.     ')
            log.ODM_INFO('  .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*,   ')
            log.ODM_INFO(' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(,  ')
            log.ODM_INFO(' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&*   ')
            log.ODM_INFO('   *#@/*******************************@@@@@***&@&**********************&@*,    ')
            log.ODM_INFO('     *#@#******************************&@@@***@#*********************&@*,      ')
            log.ODM_INFO('       */@#*****************************@@@************************@@*.        ')
            log.ODM_INFO('         *#@/***************************/@@/*********************%@*,          ')
            log.ODM_INFO('           *#@#**************************#@@%******************%@*,            ')
            log.ODM_INFO('             */@#*************************(@@@@@@@&%/********&@*.              ')
            log.ODM_INFO('               *(@(*********************************/%@@%**%@*,                ')
            log.ODM_INFO('                 *(@%************************************%@**                  ')
            log.ODM_INFO('                   **@%********************************&@*,                    ')
            log.ODM_INFO('                     *(@(****************************%@/*                      ')
            log.ODM_INFO('                       ,(@%************************#@/*                        ')
            log.ODM_INFO('                         ,*@%********************&@/,                          ')
            log.ODM_INFO('                           */@#****************#@/*                            ')
            log.ODM_INFO('                             ,/@&************#@/*                              ')
            log.ODM_INFO('                               ,*@&********%@/,                                ')
            log.ODM_INFO('                                 */@#****(@/*                                  ')
            log.ODM_INFO('                                   ,/@@@@(*                                    ')
            log.ODM_INFO('                                     .**,                                      ')
            log.ODM_INFO('')
            log.ODM_INFO("Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung.")
            log.ODM_INFO("                              Process is running")
            
            # TODO: find out why MVE is crashing at random
            # MVE *seems* to have a race condition, triggered randomly, regardless of dataset
            # https://gist.github.com/pierotofy/6c9ce93194ba510b61e42e3698cfbb89
            # Temporary workaround is to retry the reconstruction until we get it right
            # (up to a certain number of retries).
            retry_count = 1
            while retry_count < 10:
                try:
                    system.run('%s "%s" "%s"' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency})
                    break
                except Exception as e:
                    if str(e) == "Child returned 134" or str(e) == "Child returned 1":
                        retry_count += 1
                        log.ODM_WARNING("Caught error code, retrying attempt #%s" % retry_count)
                    else:
                        raise e

            self.update_progress(90)

            scene2pset_config = [
                "-F%s" % mve_output_scale
            ]

            # run scene2pset
            system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency})
        
            # run cleanmesh (filter points by MVE confidence threshold)
            if args.mve_confidence > 0:
                mve_filtered_model = io.related_file_path(tree.mve_model, postfix=".filtered")
                system.run('%s -t%s --no-clean --component-size=0 "%s" "%s"' % (context.meshclean_path, min(1.0, args.mve_confidence), tree.mve_model, mve_filtered_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency})

                if io.file_exists(mve_filtered_model):
                    os.remove(tree.mve_model)
                    os.rename(mve_filtered_model, tree.mve_model)
                else:
                    log.ODM_WARNING("Couldn't filter MVE model (%s does not exist)." % mve_filtered_model)
        else:
            log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' %
                            tree.mve_model)
Ejemplo n.º 26
0
    def process(self, inputs, outputs):
        # check if the extension is supported
        def supported_extension(file_name):
            (pathfn, ext) = os.path.splitext(file_name)
            return ext.lower() in context.supported_extensions

        # Get supported images from dir
        def get_images(in_dir):
            # filter images for its extension type
            log.ODM_DEBUG(in_dir)
            return [
                f for f in io.get_files_list(in_dir) if supported_extension(f)
            ]

        log.ODM_INFO('Running ODM Load Dataset Cell')

        # get inputs
        tree = self.inputs.tree

        # get images directory
        input_dir = tree.input_images
        images_dir = tree.dataset_raw
        resize_dir = tree.dataset_resize

        # Check first if a project already exists. This is a mediocre way to check, by checking the resize dir
        if io.dir_exists(resize_dir):
            log.ODM_DEBUG("resize dir: %s" % resize_dir)
            images_dir = resize_dir
        # if first time running, create project directory and copy images over to project/images
        else:
            if not io.dir_exists(images_dir):
                log.ODM_INFO(
                    "Project directory %s doesn't exist. Creating it now. " %
                    images_dir)
                system.mkdir_p(images_dir)
                copied = [
                    copyfile(io.join_paths(input_dir, f),
                             io.join_paths(images_dir, f))
                    for f in get_images(input_dir)
                ]

        log.ODM_DEBUG('Loading dataset from: %s' % images_dir)

        files = get_images(images_dir)

        if files:
            # create ODMPhoto list
            path_files = [io.join_paths(images_dir, f) for f in files]
            photos = Pool().map(
                partial(make_odm_photo, self.params.force_focal,
                        self.params.force_ccd), path_files)

            log.ODM_INFO('Found %s usable images' % len(photos))
        else:
            log.ODM_ERROR('Not enough supported images in %s' % images_dir)
            return ecto.QUIT

        # append photos to cell output
        outputs.photos = photos

        log.ODM_INFO('Running ODM Load Dataset Cell - Finished')
        return ecto.OK
Ejemplo n.º 27
0
def process(args, tree, reconstruction, current_path):
	odm_orthophoto = io.join_paths(current_path, 'orthophoto')
	odm_orthophoto_path = odm_orthophoto
	odm_orthophoto_render = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_render.tif')
	odm_orthophoto_tif = io.join_paths(odm_orthophoto_path, 'odm_orthophoto.tif')
	odm_orthophoto_corners = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_corners.tif')
	odm_orthophoto_log = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_log.tif')		
        odm_orthophoto_tif_log = io.join_paths(odm_orthophoto_path, 'gdal_translate_log.txt')
	odm_25dgeoreferencing = io.join_paths(current_path, 'odm_georeferencing')
	odm_georeferencing = io.join_paths(current_path, 'odm_georeferencing')

	odm_georeferencing_coords = io.join_paths(
            odm_georeferencing, 'coords.txt')
        odm_georeferencing_gcp = io.find('gcp_list.txt', current_path)
        odm_georeferencing_gcp_utm = io.join_paths(odm_georeferencing, 'gcp_list_utm.txt')
        odm_georeferencing_utm_log = io.join_paths(
            odm_georeferencing, 'odm_georeferencing_utm_log.txt')
        odm_georeferencing_log = 'odm_georeferencing_log.txt'
        odm_georeferencing_transform_file = 'odm_georeferencing_transform.txt'
        odm_georeferencing_proj = 'proj.txt'
        odm_georeferencing_model_txt_geo = 'odm_georeferencing_model_geo.txt'
        odm_georeferencing_model_obj_geo = 'odm_textured_model_geo.obj'
        odm_georeferencing_xyz_file = io.join_paths(
            odm_georeferencing, 'odm_georeferenced_model.csv')
        odm_georeferencing_las_json = io.join_paths(
            odm_georeferencing, 'las.json')
        odm_georeferencing_model_laz = io.join_paths(
            odm_georeferencing, 'odm_georeferenced_model.laz')
        odm_georeferencing_model_las = io.join_paths(
            odm_georeferencing, 'odm_georeferenced_model.las')
        odm_georeferencing_dem = io.join_paths(
            odm_georeferencing, 'odm_georeferencing_model_dem.tif')

        opensfm_reconstruction = io.join_paths(current_path,'reconstruction.json')
	odm_texturing = io.join_paths(current_path,'mvs')
	odm_textured_model_obj = io.join_paths(odm_texturing, 'odm_textured_model.obj')
	images_dir = io.join_paths(current_path, 'images')
        tree = tree
	opensfm_bundle = os.path.join(current_path, 'opensfm', 'bundle_r000.out')
	opensfm_bundle_list = os.path.join(current_path, 'opensfm', 'list_r000.out')
	opensfm_transformation = os.path.join(current_path, 'opensfm', 'geocoords_transformation.txt')
	filtered_point_cloud = os.path.join(current_path, 'filterpoints', 'point_cloud.ply')
        

        doPointCloudGeo = True
        transformPointCloud = True
        verbose =''

        class nonloc:
            runs = []

        def add_run(primary=True, band=None):
            subdir = ""
            if not primary and band is not None:
                subdir = band

            # Make sure 2.5D mesh is georeferenced before the 3D mesh
            # Because it will be used to calculate a transform
            # for the point cloud. If we use the 3D model transform,
            # DEMs and orthophoto might not align!
	    b = True
            if b:
                nonloc.runs += [{
                    'georeferencing_dir': os.path.join(odm_georeferencing, subdir),
                    'texturing_dir': os.path.join(odm_texturing, subdir),
                }]
            
            if not args.skip_3dmodel and (primary or args.use_3dmesh):
                nonloc.runs += [{
                    'georeferencing_dir': odm_georeferencing,
                    'texturing_dir': os.path.join(odm_texturing, subdir),
                }]
        
        if reconstruction.multi_camera:
            for band in reconstruction.multi_camera:
                primary = band == reconstruction.multi_camera[0]
                add_run(primary, band['name'].lower())
        else:
            add_run()

        #progress_per_run = 100.0 / len(nonloc.runs)
        #progress = 0.0

        for r in nonloc.runs:
            if not io.dir_exists(r['georeferencing_dir']):
                system.mkdir_p(r['georeferencing_dir'])

            odm_georeferencing_model_obj_geo = os.path.join(r['texturing_dir'], odm_georeferencing_model_obj_geo)
            odm_georeferencing_model_obj = os.path.join(r['texturing_dir'], odm_textured_model_obj)
            odm_georeferencing_log = os.path.join(r['georeferencing_dir'], odm_georeferencing_log)
            odm_georeferencing_transform_file = os.path.join(r['georeferencing_dir'], odm_georeferencing_transform_file)
            odm_georeferencing_model_txt_geo_file = os.path.join(r['georeferencing_dir'], odm_georeferencing_model_txt_geo)
	    pio = True
	    
	    if pio:
            #if not io.file_exists(odm_georeferencing_model_obj_geo) or \
               #not io.file_exists(odm_georeferencing_model_laz)

                # odm_georeference definitions
		
                kwargs = {
                    'bin': context.odm_modules_path,
                    'input_pc_file': filtered_point_cloud,
                    'bundle': opensfm_bundle,
                    'imgs': images_dir,
                    'imgs_list': opensfm_bundle_list,
                    'model': odm_georeferencing_model_obj,
                    'log': odm_georeferencing_log,
                    'input_trans_file': opensfm_transformation,
                    'transform_file': odm_georeferencing_transform_file,
                    'coords': odm_georeferencing_coords,
                    'output_pc_file': odm_georeferencing_model_laz,
                    'geo_sys': odm_georeferencing_model_txt_geo_file,
                    'model_geo': odm_georeferencing_model_obj_geo,
                    'verbose': verbose
                }

                if transformPointCloud:
                    kwargs['pc_params'] = '-inputPointCloudFile {input_pc_file} -outputPointCloudFile {output_pc_file}'.format(**kwargs)

                    if reconstruction.is_georeferenced():
                        kwargs['pc_params'] += ' -outputPointCloudSrs %s' % pipes.quote(reconstruction.georef.proj4())
                    else:
                        log.ODM_WARNING('NO SRS: The output point cloud will not have a SRS.')
                else:
                    kwargs['pc_params'] = ''
 
                if io.file_exists(opensfm_transformation) and io.file_exists(odm_georeferencing_coords):
                    log.ODM_INFO('Running georeferencing with OpenSfM transformation matrix')
                    system.run('{bin}/odm_georef -bundleFile {bundle} -inputTransformFile {input_trans_file} -inputCoordFile {coords} '
                               '-inputFile {model} -outputFile {model_geo} '
                               '{pc_params} {verbose} '
                               '-logFile {log} -outputTransformFile {transform_file} -georefFileOutputPath {geo_sys}'.format(**kwargs))
                elif io.file_exists(odm_georeferencing_coords):
		    print('running georeferencing')
                    log.ODM_INFO('Running georeferencing with generated coords file.')
                    system.run('{bin}/odm_georef -bundleFile {bundle} -inputCoordFile {coords} '
                               '-inputFile {model} -outputFile {model_geo} '
                               '{pc_params} {verbose} '
                               '-logFile {log} -outputTransformFile {transform_file} -georefFileOutputPath {geo_sys}'.format(**kwargs))
                else:
                    log.ODM_WARNING('Georeferencing failed. Make sure your '
                                    'photos have geotags in the EXIF or you have '
                                    'provided a GCP file. ')
                    doPointCloudGeo = False # skip the rest of the georeferencing

                if doPointCloudGeo:
                    reconstruction.georef.extract_offsets(odm_georeferencing_model_txt_geo_file)
                    point_cloud.post_point_cloud_steps(args, tree)
                    
                    if args.crop > 0:
                        log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud")
                        cropper = Cropper(odm_georeferencing, 'odm_georeferenced_model')
                        
                        decimation_step = 40 if args.fast_orthophoto or args.use_opensfm_dense else 90
                        
                        # More aggressive decimation for large datasets
                        if not args.fast_orthophoto:
                            decimation_step *= int(len(reconstruction.photos) / 1000) + 1

                        cropper.create_bounds_gpkg(odm_georeferencing_model_laz, args.crop, 
                                                    decimation_step=decimation_step)

                    # Do not execute a second time, since
                    # We might be doing georeferencing for
                    # multiple models (3D, 2.5D, ...)
                    doPointCloudGeo = False
                    transformPointCloud = False
            else:
                log.ODM_WARNING('Found a valid georeferenced model in: %s'
                                % odm_georeferencing_model_laz)

            if args.optimize_disk_space and io.file_exists(odm_georeferencing_model_laz) and io.file_exists(filtered_point_cloud):
                os.remove(filtered_point_cloud)
Ejemplo n.º 28
0
Archivo: osfm.py Proyecto: tub-sgg/ODM
 def extract_metadata(self, rerun=False):
     metadata_dir = self.path("exif")
     if not io.dir_exists(metadata_dir) or rerun:
         self.run('extract_metadata')
Ejemplo n.º 29
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        class nonloc:
            runs = []

        def add_run(nvm_file, primary=True, band=None):
            subdir = ""
            if not primary and band is not None:
                subdir = band

            if not args.skip_3dmodel and (primary or args.use_3dmesh):
                nonloc.runs += [{
                    'out_dir': os.path.join(tree.odm_texturing, subdir),
                    'model': tree.odm_mesh,
                    'nadir': False,
                    'primary': primary,
                    'nvm_file': nvm_file,
                    'labeling_file': os.path.join(tree.odm_texturing, "odm_textured_model_geo_labeling.vec") if subdir else None
                }]

            if not args.use_3dmesh:
                nonloc.runs += [{
                    'out_dir': os.path.join(tree.odm_25dtexturing, subdir),
                    'model': tree.odm_25dmesh,
                    'nadir': True,
                    'primary': primary,
                    'nvm_file': nvm_file,
                    'labeling_file': os.path.join(tree.odm_25dtexturing, "odm_textured_model_geo_labeling.vec") if subdir else None
                }]

        if reconstruction.multi_camera:

            for band in reconstruction.multi_camera:
                primary = band['name'] == get_primary_band_name(reconstruction.multi_camera, args.primary_band)
                nvm_file = os.path.join(tree.opensfm, "undistorted", "reconstruction_%s.nvm" % band['name'].lower())
                add_run(nvm_file, primary, band['name'].lower())
            
            # Sort to make sure primary band is processed first
            nonloc.runs.sort(key=lambda r: r['primary'], reverse=True)
        else:
            add_run(tree.opensfm_reconstruction_nvm)
        
        progress_per_run = 100.0 / len(nonloc.runs)
        progress = 0.0

        for r in nonloc.runs:
            if not io.dir_exists(r['out_dir']):
                system.mkdir_p(r['out_dir'])

            odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj)

            if not io.file_exists(odm_textured_model_obj) or self.rerun():
                log.ODM_INFO('Writing MVS Textured file in: %s'
                              % odm_textured_model_obj)

                # Format arguments to fit Mvs-Texturing app
                skipGlobalSeamLeveling = ""
                skipLocalSeamLeveling = ""
                keepUnseenFaces = ""
                nadir = ""

                if (self.params.get('skip_glob_seam_leveling')):
                    skipGlobalSeamLeveling = "--skip_global_seam_leveling"
                if (self.params.get('skip_loc_seam_leveling')):
                    skipLocalSeamLeveling = "--skip_local_seam_leveling"
                if (self.params.get('keep_unseen_faces')):
                    keepUnseenFaces = "--keep_unseen_faces"
                if (r['nadir']):
                    nadir = '--nadir_mode'

                # mvstex definitions
                kwargs = {
                    'bin': context.mvstex_path,
                    'out_dir': os.path.join(r['out_dir'], "odm_textured_model_geo"),
                    'model': r['model'],
                    'dataTerm': self.params.get('data_term'),
                    'outlierRemovalType': self.params.get('outlier_rem_type'),
                    'skipGlobalSeamLeveling': skipGlobalSeamLeveling,
                    'skipLocalSeamLeveling': skipLocalSeamLeveling,
                    'keepUnseenFaces': keepUnseenFaces,
                    'toneMapping': self.params.get('tone_mapping'),
                    'nadirMode': nadir,
                    'nvm_file': r['nvm_file'],
                    'intermediate': '--no_intermediate_results' if (r['labeling_file'] or not reconstruction.multi_camera) else '',
                    'labelingFile': '-L "%s"' % r['labeling_file'] if r['labeling_file'] else ''
                }

                mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp')

                # Make sure tmp directory is empty
                if io.dir_exists(mvs_tmp_dir):
                    log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir))
                    shutil.rmtree(mvs_tmp_dir)

                # run texturing binary
                system.run('"{bin}" "{nvm_file}" "{model}" "{out_dir}" '
                        '-d {dataTerm} -o {outlierRemovalType} '
                        '-t {toneMapping} '
                        '{intermediate} '
                        '{skipGlobalSeamLeveling} '
                        '{skipLocalSeamLeveling} '
                        '{keepUnseenFaces} '
                        '{nadirMode} '
                        '{labelingFile} '.format(**kwargs))
                
                # Backward compatibility: copy odm_textured_model_geo.mtl to odm_textured_model.mtl
                # for certain older WebODM clients which expect a odm_textured_model.mtl
                # to be present for visualization
                # We should remove this at some point in the future
                geo_mtl = os.path.join(r['out_dir'], 'odm_textured_model_geo.mtl')
                if io.file_exists(geo_mtl):
                    nongeo_mtl = os.path.join(r['out_dir'], 'odm_textured_model.mtl')
                    shutil.copy(geo_mtl, nongeo_mtl)
                
                progress += progress_per_run
                self.update_progress(progress)
            else:
                log.ODM_WARNING('Found a valid ODM Texture file in: %s'
                                % odm_textured_model_obj)
        
        if args.optimize_disk_space:
            for r in nonloc.runs:
                if io.file_exists(r['model']):
                    os.remove(r['model'])
            
            undistorted_images_path = os.path.join(tree.opensfm, "undistorted", "images")
            if io.dir_exists(undistorted_images_path):
                shutil.rmtree(undistorted_images_path)
Ejemplo n.º 30
0
Archivo: osfm.py Proyecto: tub-sgg/ODM
    def setup(self,
              args,
              images_path,
              photos,
              reconstruction,
              append_config=[],
              rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            # create file list
            has_alt = True
            has_gps = False
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    if photo.latitude is not None and photo.longitude is not None:
                        has_gps = True
                    fout.write('%s\n' %
                               io.join_paths(images_path, photo.filename))

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path,
                                             "image_groups.txt")
            if io.file_exists(image_groups_file):
                log.ODM_INFO("Copied image_groups.txt to OpenSfM directory")
                io.copy(
                    image_groups_file,
                    os.path.join(self.opensfm_project_path,
                                 "image_groups.txt"))

            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(
                        args.cameras)
                    with open(
                            os.path.join(self.opensfm_project_path,
                                         "camera_models_overrides.json"),
                            'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO(
                        "Wrote camera_models_overrides.json to OpenSfM directory"
                    )
                except Exception as e:
                    log.ODM_WARNING(
                        "Cannot set camera_models_overrides.json: %s" % str(e))

            use_bow = False
            feature_type = "SIFT"

            matcher_neighbors = args.matcher_neighbors
            if matcher_neighbors != 0 and reconstruction.multi_camera is not None:
                matcher_neighbors *= len(reconstruction.multi_camera)
                log.ODM_INFO(
                    "Increasing matcher neighbors to %s to accomodate multi-camera setup"
                    % matcher_neighbors)
                log.ODM_INFO("Multi-camera setup, using BOW matching")
                use_bow = True

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "feature_process_size: %s" % args.resize_to,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % matcher_neighbors,
                "matching_gps_distance: %s" % args.matcher_distance,
                "depthmap_method: %s" % args.opensfm_depthmap_method,
                "depthmap_resolution: %s" % args.depthmap_resolution,
                "depthmap_min_patch_sd: %s" %
                args.opensfm_depthmap_min_patch_sd,
                "depthmap_min_consistent_views: %s" %
                args.opensfm_depthmap_min_consistent_views,
                "optimize_camera_parameters: %s" %
                ('no'
                 if args.use_fixed_camera_params or args.cameras else 'yes'),
                "undistorted_image_format: tif",
                "bundle_outlier_filtering_type: AUTO",
                "align_orientation_prior: vertical",
                "triangulation_type: ROBUST",
                "bundle_common_position_constraints: %s" %
                ('no' if reconstruction.multi_camera is None else 'yes'),
            ]

            if args.camera_lens != 'auto':
                config.append("camera_projection_type: %s" %
                              args.camera_lens.upper())

            if not has_gps:
                log.ODM_INFO("No GPS information, using BOW matching")
                use_bow = True

            feature_type = args.feature_type.upper()

            if use_bow:
                config.append("matcher_type: WORDS")

                # Cannot use SIFT with BOW
                if feature_type == "SIFT":
                    log.ODM_WARNING(
                        "Using BOW matching, will use HAHOG feature type, not SIFT"
                    )
                    feature_type = "HAHOG"

            config.append("feature_type: %s" % feature_type)

            if has_alt:
                log.ODM_INFO(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            gcp_path = reconstruction.gcp.gcp_path
            if has_alt or gcp_path:
                config.append("align_method: auto")
            else:
                config.append("align_method: orientation_prior")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment
            else:
                config.append("local_bundle_radius: 0")

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                if not args.force_gps:
                    config.append("bundle_use_gps: no")
                io.copy(gcp_path, self.path("gcp_list.txt"))

            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" %
                            list_path)
Ejemplo n.º 31
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        outputs['large'] = len(photos) > args.split

        if outputs['large']:
            # If we have a cluster address, we'll use a distributed workflow
            local_workflow = not bool(args.sm_cluster)

            octx = OSFMContext(tree.opensfm)
            split_done_file = octx.path("split_done.txt")

            if not io.file_exists(split_done_file) or self.rerun():
                orig_max_concurrency = args.max_concurrency
                if not local_workflow:
                    args.max_concurrency = max(1, args.max_concurrency - 1)
                    log.ODM_INFO(
                        "Setting max-concurrency to %s to better handle remote splits"
                        % args.max_concurrency)

                log.ODM_INFO(
                    "Large dataset detected (%s photos) and split set at %s. Preparing split merge."
                    % (len(photos), args.split))
                config = [
                    "submodels_relpath: " +
                    os.path.join("..", "submodels", "opensfm"),
                    "submodel_relpath_template: " + os.path.join(
                        "..", "submodels", "submodel_%04d", "opensfm"),
                    "submodel_images_relpath_template: " +
                    os.path.join("..", "submodels", "submodel_%04d", "images"),
                    "submodel_size: %s" % args.split,
                    "submodel_overlap: %s" % args.split_overlap,
                ]

                octx.setup(args,
                           tree.dataset_raw,
                           reconstruction=reconstruction,
                           append_config=config,
                           rerun=self.rerun())
                octx.extract_metadata(self.rerun())

                self.update_progress(5)

                if local_workflow:
                    octx.feature_matching(self.rerun())

                self.update_progress(20)

                # Create submodels
                if not io.dir_exists(tree.submodels_path) or self.rerun():
                    if io.dir_exists(tree.submodels_path):
                        log.ODM_WARNING(
                            "Removing existing submodels directory: %s" %
                            tree.submodels_path)
                        shutil.rmtree(tree.submodels_path)

                    octx.run("create_submodels")
                else:
                    log.ODM_WARNING(
                        "Submodels directory already exist at: %s" %
                        tree.submodels_path)

                # Find paths of all submodels
                mds = metadataset.MetaDataSet(tree.opensfm)
                submodel_paths = [
                    os.path.abspath(p) for p in mds.get_submodel_paths()
                ]

                for sp in submodel_paths:
                    sp_octx = OSFMContext(sp)

                    # Copy filtered GCP file if needed
                    # One in OpenSfM's directory, one in the submodel project directory
                    if reconstruction.gcp and reconstruction.gcp.exists():
                        submodel_gcp_file = os.path.abspath(
                            sp_octx.path("..", "gcp_list.txt"))
                        submodel_images_dir = os.path.abspath(
                            sp_octx.path("..", "images"))

                        if reconstruction.gcp.make_filtered_copy(
                                submodel_gcp_file, submodel_images_dir):
                            log.ODM_INFO("Copied filtered GCP file to %s" %
                                         submodel_gcp_file)
                            io.copy(
                                submodel_gcp_file,
                                os.path.abspath(sp_octx.path("gcp_list.txt")))
                        else:
                            log.ODM_INFO(
                                "No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP"
                                % sp_octx.name())

                # Reconstruct each submodel
                log.ODM_INFO(
                    "Dataset has been split into %s submodels. Reconstructing each submodel..."
                    % len(submodel_paths))
                self.update_progress(25)

                if local_workflow:
                    for sp in submodel_paths:
                        log.ODM_INFO("Reconstructing %s" % sp)
                        OSFMContext(sp).reconstruct(self.rerun())
                else:
                    lre = LocalRemoteExecutor(args.sm_cluster, self.rerun())
                    lre.set_projects([
                        os.path.abspath(os.path.join(p, ".."))
                        for p in submodel_paths
                    ])
                    lre.run_reconstruction()

                self.update_progress(50)

                # TODO: this is currently not working and needs a champion to fix it
                # https://community.opendronemap.org/t/filenotfound-error-cameras-json/6047/2

                # resplit_done_file = octx.path('resplit_done.txt')
                # if not io.file_exists(resplit_done_file) and bool(args.split_multitracks):
                #     submodels = mds.get_submodel_paths()
                #     i = 0
                #     for s in submodels:
                #         template = octx.path("../aligned_submodels/submodel_%04d")
                #         with open(s+"/reconstruction.json", "r") as f:
                #             j = json.load(f)
                #         for k in range(0, len(j)):
                #             v = j[k]
                #             path = template % i

                #             #Create the submodel path up to opensfm
                #             os.makedirs(path+"/opensfm")
                #             os.makedirs(path+"/images")

                #             #symlinks for common data
                #             images = os.listdir(octx.path("../images"))
                #             for image in images:
                #                 os.symlink("../../../images/"+image, path+"/images/"+image)
                #             os.symlink("../../../opensfm/exif", path+"/opensfm/exif")
                #             os.symlink("../../../opensfm/features", path+"/opensfm/features")
                #             os.symlink("../../../opensfm/matches", path+"/opensfm/matches")
                #             os.symlink("../../../opensfm/reference_lla.json", path+"/opensfm/reference_lla.json")
                #             os.symlink("../../../opensfm/camera_models.json", path+"/opensfm/camera_models.json")

                #             shutil.copy(s+"/../cameras.json", path+"/cameras.json")

                #             shutil.copy(s+"/../images.json", path+"/images.json")

                #             with open(octx.path("config.yaml")) as f:
                #                 doc = yaml.safe_load(f)

                #             dmcv = "depthmap_min_consistent_views"
                #             if dmcv in doc:
                #                 if len(v["shots"]) < doc[dmcv]:
                #                     doc[dmcv] = len(v["shots"])
                #                     print("WARNING: Reduced "+dmcv+" to accommodate short track")

                #             with open(path+"/opensfm/config.yaml", "w") as f:
                #                 yaml.dump(doc, f)

                #             #We need the original tracks file for the visualsfm export, since
                #             #there may still be point matches between the tracks
                #             shutil.copy(s+"/tracks.csv", path+"/opensfm/tracks.csv")

                #             #Create our new reconstruction file with only the relevant track
                #             with open(path+"/opensfm/reconstruction.json", "w") as o:
                #                 json.dump([v], o)

                #             #Create image lists
                #             with open(path+"/opensfm/image_list.txt", "w") as o:
                #                 o.writelines(list(map(lambda x: "../images/"+x+'\n', v["shots"].keys())))
                #             with open(path+"/img_list.txt", "w") as o:
                #                 o.writelines(list(map(lambda x: x+'\n', v["shots"].keys())))

                #             i+=1
                #     os.rename(octx.path("../submodels"), octx.path("../unaligned_submodels"))
                #     os.rename(octx.path("../aligned_submodels"), octx.path("../submodels"))
                #     octx.touch(resplit_done_file)

                mds = metadataset.MetaDataSet(tree.opensfm)
                submodel_paths = [
                    os.path.abspath(p) for p in mds.get_submodel_paths()
                ]

                # Align
                octx.align_reconstructions(self.rerun())

                self.update_progress(55)

                # Aligned reconstruction is in reconstruction.aligned.json
                # We need to rename it to reconstruction.json
                remove_paths = []
                for sp in submodel_paths:
                    sp_octx = OSFMContext(sp)

                    aligned_recon = sp_octx.path('reconstruction.aligned.json')
                    unaligned_recon = sp_octx.path(
                        'reconstruction.unaligned.json')
                    main_recon = sp_octx.path('reconstruction.json')

                    if io.file_exists(main_recon) and io.file_exists(
                            unaligned_recon) and not self.rerun():
                        log.ODM_INFO("Submodel %s has already been aligned." %
                                     sp_octx.name())
                        continue

                    if not io.file_exists(aligned_recon):
                        log.ODM_WARNING(
                            "Submodel %s does not have an aligned reconstruction (%s). "
                            "This could mean that the submodel could not be reconstructed "
                            " (are there enough features to reconstruct it?). Skipping."
                            % (sp_octx.name(), aligned_recon))
                        remove_paths.append(sp)
                        continue

                    if io.file_exists(main_recon):
                        shutil.move(main_recon, unaligned_recon)

                    shutil.move(aligned_recon, main_recon)
                    log.ODM_INFO("%s is now %s" % (aligned_recon, main_recon))

                # Remove invalid submodels
                submodel_paths = [
                    p for p in submodel_paths if not p in remove_paths
                ]

                # Run ODM toolchain for each submodel
                if local_workflow:
                    for sp in submodel_paths:
                        sp_octx = OSFMContext(sp)

                        log.ODM_INFO("========================")
                        log.ODM_INFO("Processing %s" % sp_octx.name())
                        log.ODM_INFO("========================")

                        argv = get_submodel_argv(args, tree.submodels_path,
                                                 sp_octx.name())

                        # Re-run the ODM toolchain on the submodel
                        system.run(" ".join(map(double_quote, map(str, argv))),
                                   env_vars=os.environ.copy())
                else:
                    lre.set_projects([
                        os.path.abspath(os.path.join(p, ".."))
                        for p in submodel_paths
                    ])
                    lre.run_toolchain()

                # Restore max_concurrency value
                args.max_concurrency = orig_max_concurrency

                octx.touch(split_done_file)
            else:
                log.ODM_WARNING('Found a split done file in: %s' %
                                split_done_file)
        else:
            log.ODM_INFO("Normal dataset, will process all at once.")
            self.progress = 0.0
Ejemplo n.º 32
0
    def process(self, inputs, outputs):
        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running MVE Cell')

        # get inputs
        tree = inputs.tree
        args = inputs.args
        reconstruction = inputs.reconstruction
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start MVE')
            return ecto.QUIT

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'mve') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'mve' in args.rerun_from)

        # check if reconstruction was done before
        if not io.file_exists(tree.mve_model) or rerun_cell:
            # cleanup if a rerun
            if io.dir_exists(tree.mve_path) and rerun_cell:
                shutil.rmtree(tree.mve_path)

            # make bundle directory
            if not io.file_exists(tree.mve_bundle):
                system.mkdir_p(tree.mve_path)
                system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))
                io.copy(tree.opensfm_image_list, tree.mve_image_list)
                io.copy(tree.opensfm_bundle, tree.mve_bundle)

            # mve makescene wants the output directory
            # to not exists before executing it (otherwise it
            # will prompt the user for confirmation)
            if io.dir_exists(tree.mve):
                shutil.rmtree(tree.mve)

            # run mve makescene
            if not io.dir_exists(tree.mve_views):
                system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            # Compute mve output scale based on depthmap_resolution
            max_width = 0
            max_height = 0
            for photo in photos:
                max_width = max(photo.width, max_width)
                max_height = max(photo.height, max_height)

            max_pixels = args.depthmap_resolution * args.depthmap_resolution
            if max_width * max_height <= max_pixels:
                mve_output_scale = 0
            else:
                ratio = float(max_width * max_height) / float(max_pixels)
                mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0)))

            dmrecon_config = [
                "-s%s" % mve_output_scale,
	            "--progress=silent",
                "--local-neighbors=2",
                "--force",
            ]

            # Run MVE's dmrecon
            log.ODM_INFO('                                                                               ')
            log.ODM_INFO('                                    ,*/**                                      ')
            log.ODM_INFO('                                  ,*@%*/@%*                                    ')
            log.ODM_INFO('                                ,/@%******@&*.                                 ')
            log.ODM_INFO('                              ,*@&*********/@&*                                ')
            log.ODM_INFO('                            ,*@&**************@&*                              ')
            log.ODM_INFO('                          ,/@&******************@&*.                           ')
            log.ODM_INFO('                        ,*@&*********************/@&*                          ')
            log.ODM_INFO('                      ,*@&**************************@&*.                       ')
            log.ODM_INFO('                    ,/@&******************************&&*,                     ')
            log.ODM_INFO('                  ,*&&**********************************@&*.                   ')
            log.ODM_INFO('                ,*@&**************************************@&*.                 ')
            log.ODM_INFO('              ,*@&***************#@@@@@@@@@%****************&&*,               ')
            log.ODM_INFO('            .*&&***************&@@@@@@@@@@@@@@****************@@*.             ')
            log.ODM_INFO('          .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*.           ')
            log.ODM_INFO('        .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*,         ')
            log.ODM_INFO('      .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*.       ')
            log.ODM_INFO('    .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*.     ')
            log.ODM_INFO('  .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*,   ')
            log.ODM_INFO(' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(,  ')
            log.ODM_INFO(' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&*   ')
            log.ODM_INFO('   *#@/*******************************@@@@@***&@&**********************&@*,    ')
            log.ODM_INFO('     *#@#******************************&@@@***@#*********************&@*,      ')
            log.ODM_INFO('       */@#*****************************@@@************************@@*.        ')
            log.ODM_INFO('         *#@/***************************/@@/*********************%@*,          ')
            log.ODM_INFO('           *#@#**************************#@@%******************%@*,            ')
            log.ODM_INFO('             */@#*************************(@@@@@@@&%/********&@*.              ')
            log.ODM_INFO('               *(@(*********************************/%@@%**%@*,                ')
            log.ODM_INFO('                 *(@%************************************%@**                  ')
            log.ODM_INFO('                   **@%********************************&@*,                    ')
            log.ODM_INFO('                     *(@(****************************%@/*                      ')
            log.ODM_INFO('                       ,(@%************************#@/*                        ')
            log.ODM_INFO('                         ,*@%********************&@/,                          ')
            log.ODM_INFO('                           */@#****************#@/*                            ')
            log.ODM_INFO('                             ,/@&************#@/*                              ')
            log.ODM_INFO('                               ,*@&********%@/,                                ')
            log.ODM_INFO('                                 */@#****(@/*                                  ')
            log.ODM_INFO('                                   ,/@@@@(*                                    ')
            log.ODM_INFO('                                     .**,                                      ')
            log.ODM_INFO('')
            log.ODM_INFO("Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung.")
            log.ODM_INFO("                              Process is running")
            system.run('%s %s %s' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            scene2pset_config = [
                "-F%s" % mve_output_scale
            ]

            # run scene2pset
            system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency})
        else:
            log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' %
                            tree.mve_model)

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'MVE')

        log.ODM_INFO('Running ODM MVE Cell - Finished')
        return ecto.OK if args.end_with != 'mve' else ecto.QUIT
Ejemplo n.º 33
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        if outputs['large']:
            if not os.path.exists(tree.submodels_path):
                log.ODM_ERROR(
                    "We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?"
                    % tree.submodels_path)
                exit(1)

            # Merge point clouds
            if args.merge in ['all', 'pointcloud']:
                if not io.file_exists(
                        tree.odm_georeferencing_model_laz) or self.rerun():
                    all_point_clouds = get_submodel_paths(
                        tree.submodels_path, "odm_georeferencing",
                        "odm_georeferenced_model.laz")

                    try:
                        point_cloud.merge(all_point_clouds,
                                          tree.odm_georeferencing_model_laz,
                                          rerun=self.rerun())
                        point_cloud.post_point_cloud_steps(
                            args, tree, self.rerun())
                    except Exception as e:
                        log.ODM_WARNING(
                            "Could not merge point cloud: %s (skipping)" %
                            str(e))
                else:
                    log.ODM_WARNING("Found merged point cloud in %s" %
                                    tree.odm_georeferencing_model_laz)

            self.update_progress(25)

            # Merge crop bounds
            merged_bounds_file = os.path.join(
                tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg')
            if not io.file_exists(merged_bounds_file) or self.rerun():
                all_bounds = get_submodel_paths(
                    tree.submodels_path, 'odm_georeferencing',
                    'odm_georeferenced_model.bounds.gpkg')
                log.ODM_INFO("Merging all crop bounds: %s" % all_bounds)
                if len(all_bounds) > 0:
                    # Calculate a new crop area
                    # based on the convex hull of all crop areas of all submodels
                    # (without a buffer, otherwise we are double-cropping)
                    Cropper.merge_bounds(all_bounds, merged_bounds_file, 0)
                else:
                    log.ODM_WARNING("No bounds found for any submodel.")

            # Merge orthophotos
            if args.merge in ['all', 'orthophoto']:
                if not io.dir_exists(tree.odm_orthophoto):
                    system.mkdir_p(tree.odm_orthophoto)

                if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun():
                    all_orthos_and_ortho_cuts = get_all_submodel_paths(
                        tree.submodels_path,
                        os.path.join("odm_orthophoto",
                                     "odm_orthophoto_feathered.tif"),
                        os.path.join("odm_orthophoto",
                                     "odm_orthophoto_cut.tif"),
                    )

                    if len(all_orthos_and_ortho_cuts) > 1:
                        log.ODM_INFO(
                            "Found %s submodels with valid orthophotos and cutlines"
                            % len(all_orthos_and_ortho_cuts))

                        # TODO: histogram matching via rasterio
                        # currently parts have different color tones

                        if io.file_exists(tree.odm_orthophoto_tif):
                            os.remove(tree.odm_orthophoto_tif)

                        orthophoto_vars = orthophoto.get_orthophoto_vars(args)
                        orthophoto.merge(all_orthos_and_ortho_cuts,
                                         tree.odm_orthophoto_tif,
                                         orthophoto_vars)
                        orthophoto.post_orthophoto_steps(
                            args, merged_bounds_file, tree.odm_orthophoto_tif,
                            tree.orthophoto_tiles)
                    elif len(all_orthos_and_ortho_cuts) == 1:
                        # Simply copy
                        log.ODM_WARNING(
                            "A single orthophoto/cutline pair was found between all submodels."
                        )
                        shutil.copyfile(all_orthos_and_ortho_cuts[0][0],
                                        tree.odm_orthophoto_tif)
                    else:
                        log.ODM_WARNING(
                            "No orthophoto/cutline pairs were found in any of the submodels. No orthophoto will be generated."
                        )
                else:
                    log.ODM_WARNING("Found merged orthophoto in %s" %
                                    tree.odm_orthophoto_tif)

            self.update_progress(75)

            # Merge DEMs
            def merge_dems(dem_filename, human_name):
                if not io.dir_exists(tree.path('odm_dem')):
                    system.mkdir_p(tree.path('odm_dem'))

                dem_file = tree.path("odm_dem", dem_filename)
                if not io.file_exists(dem_file) or self.rerun():
                    all_dems = get_submodel_paths(tree.submodels_path,
                                                  "odm_dem", dem_filename)
                    log.ODM_INFO("Merging %ss" % human_name)

                    # Merge
                    dem_vars = utils.get_dem_vars(args)
                    eu_map_source = None  # Default

                    # Use DSM's euclidean map for DTMs
                    # (requires the DSM to be computed)
                    if human_name == "DTM":
                        eu_map_source = "dsm"

                    euclidean_merge_dems(all_dems,
                                         dem_file,
                                         dem_vars,
                                         euclidean_map_source=eu_map_source)

                    if io.file_exists(dem_file):
                        # Crop
                        if args.crop > 0:
                            Cropper.crop(
                                merged_bounds_file,
                                dem_file,
                                dem_vars,
                                keep_original=not args.optimize_disk_space)
                        log.ODM_INFO("Created %s" % dem_file)

                        if args.tiles:
                            generate_dem_tiles(
                                dem_file,
                                tree.path("%s_tiles" % human_name.lower()),
                                args.max_concurrency)

                        if args.cog:
                            convert_to_cogeo(dem_file,
                                             max_workers=args.max_concurrency)
                    else:
                        log.ODM_WARNING("Cannot merge %s, %s was not created" %
                                        (human_name, dem_file))

                else:
                    log.ODM_WARNING("Found merged %s in %s" %
                                    (human_name, dem_filename))

            if args.merge in ['all', 'dem'] and args.dsm:
                merge_dems("dsm.tif", "DSM")

            if args.merge in ['all', 'dem'] and args.dtm:
                merge_dems("dtm.tif", "DTM")

            self.update_progress(95)

            # Merge reports
            if not io.dir_exists(tree.odm_report):
                system.mkdir_p(tree.odm_report)

            geojson_shots = tree.path(tree.odm_report, "shots.geojson")
            if not io.file_exists(geojson_shots) or self.rerun():
                geojson_shots_files = get_submodel_paths(
                    tree.submodels_path, "odm_report", "shots.geojson")
                log.ODM_INFO("Merging %s shots.geojson files" %
                             len(geojson_shots_files))
                merge_geojson_shots(geojson_shots_files, geojson_shots)
            else:
                log.ODM_WARNING("Found merged shots.geojson in %s" %
                                tree.odm_report)

            # Stop the pipeline short! We're done.
            self.next_stage = None
        else:
            log.ODM_INFO("Normal dataset, nothing to merge.")
            self.progress = 0.0
Ejemplo n.º 34
0
from opendm import io

import ecto
import os

from scripts.odm_app import ODMApp

if __name__ == '__main__':

    args = config.config()

    log.ODM_INFO('Initializing OpenDroneMap app - %s' % system.now())

    # Add project dir if doesn't exist
    args.project_path = io.join_paths(args.project_path, args.name)
    if not io.dir_exists(args.project_path):
        log.ODM_WARNING('Directory %s does not exist. Creating it now.' % args.name)
        system.mkdir_p(os.path.abspath(args.project_path))

    # If user asks to rerun everything, delete all of the existing progress directories.
    # TODO: Move this somewhere it's not hard-coded
    if args.rerun_all:
        os.system("rm -rf "
                  + args.project_path + "images_resize/ "
                  + args.project_path + "odm_georeferencing/ "
                  + args.project_path + "odm_meshing/ "
                  + args.project_path + "odm_orthophoto/ "
                  + args.project_path + "odm_texturing/ "
                  + args.project_path + "opensfm/ "
                  + args.project_path + "pmvs/")
Ejemplo n.º 35
0
def compute_cutline(orthophoto_file,
                    crop_area_file,
                    destination,
                    max_concurrency=1,
                    tmpdir=None,
                    scale=1):
    if io.file_exists(orthophoto_file) and io.file_exists(crop_area_file):
        from opendm.grass_engine import grass
        log.ODM_INFO("Computing cutline")

        if tmpdir and not io.dir_exists(tmpdir):
            system.mkdir_p(tmpdir)

        scale = max(0.0001, min(1, scale))
        scaled_orthophoto = None

        if scale < 1:
            log.ODM_INFO("Scaling orthophoto to %s%% to compute cutline" %
                         (scale * 100))

            scaled_orthophoto = os.path.join(
                tmpdir,
                os.path.basename(
                    io.related_file_path(orthophoto_file, postfix=".scaled")))
            # Scale orthophoto before computing cutline
            system.run("gdal_translate -outsize {}% 0 "
                       "-co NUM_THREADS={} "
                       "--config GDAL_CACHEMAX {}% "
                       "{} {}".format(scale * 100, max_concurrency,
                                      concurrency.get_max_memory(),
                                      orthophoto_file, scaled_orthophoto))
            orthophoto_file = scaled_orthophoto

        try:
            ortho_width, ortho_height = get_image_size.get_image_size(
                orthophoto_file, fallback_on_error=False)
            log.ODM_INFO("Orthophoto dimensions are %sx%s" %
                         (ortho_width, ortho_height))
            number_lines = int(
                max(8, math.ceil(min(ortho_width, ortho_height) / 256.0)))
        except:
            log.ODM_INFO(
                "Cannot compute orthophoto dimensions, setting arbitrary number of lines."
            )
            number_lines = 32

        log.ODM_INFO("Number of lines: %s" % number_lines)

        gctx = grass.create_context({'auto_cleanup': False, 'tmpdir': tmpdir})
        gctx.add_param('orthophoto_file', orthophoto_file)
        gctx.add_param('crop_area_file', crop_area_file)
        gctx.add_param('number_lines', number_lines)
        gctx.add_param('max_concurrency', max_concurrency)
        gctx.add_param('memory', int(concurrency.get_max_memory_mb(300)))
        gctx.set_location(orthophoto_file)

        cutline_file = gctx.execute(
            os.path.join("opendm", "grass", "compute_cutline.grass"))
        if cutline_file != 'error':
            if io.file_exists(cutline_file):
                shutil.move(cutline_file, destination)
                log.ODM_INFO("Generated cutline file: %s --> %s" %
                             (cutline_file, destination))
                gctx.cleanup()
                return destination
            else:
                log.ODM_WARNING(
                    "Unexpected script result: %s. No cutline file has been generated."
                    % cutline_file)
        else:
            log.ODM_WARNING(
                "Could not generate orthophoto cutline. An error occured when running GRASS. No orthophoto will be generated."
            )
    else:
        log.ODM_WARNING(
            "We've been asked to compute cutline, but either %s or %s is missing. Skipping..."
            % (orthophoto_file, crop_area_file))