コード例 #1
0
ファイル: resize.py プロジェクト: tspeedy/OpenDroneMap
def no_resize(src_dir, target_dir, rerun_cell, photo):
    # define image paths
    path_file = photo.path_file
    new_path_file = io.join_paths(target_dir, photo.filename)
    # set raw image path in case we want to rerun cell
    if io.file_exists(new_path_file) and rerun_cell:
        path_file = io.join_paths(src_dir, photo.filename)

    if not io.file_exists(new_path_file) or rerun_cell:
        img = cv2.imread(path_file)
        io.copy(path_file, new_path_file)
        photo.path_file = new_path_file
        photo.width = img.shape[0]
        photo.height = img.shape[1]
        photo.update_focal()

        # log message
        log.ODM_DEBUG('Copied %s | dimensions: %s' %
                      (photo.filename, img.shape))
    else:
        # log message
        log.ODM_WARNING('Already copied %s | dimensions: %s x %s' %
                        (photo.filename, photo.width, photo.height))

    return photo
コード例 #2
0
ファイル: resize.py プロジェクト: pierotofy/OpenDroneMap
def no_resize(src_dir,target_dir,rerun_cell,photo):
    # define image paths
    path_file = photo.path_file
    new_path_file = io.join_paths(target_dir, photo.filename)
    # set raw image path in case we want to rerun cell
    if io.file_exists(new_path_file) and rerun_cell:
        path_file = io.join_paths(src_dir, photo.filename)

    if not io.file_exists(new_path_file) or rerun_cell:
        img = cv2.imread(path_file)
        io.copy(path_file, new_path_file)
        photo.path_file = new_path_file
        photo.width = img.shape[1]
        photo.height = img.shape[0]
        photo.update_focal()
        
        # log message
        log.ODM_DEBUG('Copied %s | dimensions: %s' %
                      (photo.filename, img.shape))
    else:
        # log message
        log.ODM_WARNING('Already copied %s | dimensions: %s x %s' %
                        (photo.filename, photo.width, photo.height))
                        
    return photo
コード例 #3
0
    def process(self, inputs, outputs):

        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running ODM OpenSfM Cell')

        # get inputs
        tree = inputs.tree
        args = inputs.args
        reconstruction = inputs.reconstruction
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
            return ecto.QUIT

        # create working directories
        system.mkdir_p(tree.opensfm)
        system.mkdir_p(tree.pmvs)

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'opensfm') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'opensfm' in args.rerun_from)

        if not args.use_pmvs:
            output_file = tree.opensfm_model
            if args.fast_orthophoto:
                output_file = io.join_paths(tree.opensfm, 'reconstruction.ply')
        else:
            output_file = tree.opensfm_reconstruction

        # check if reconstruction was done before
        if not io.file_exists(output_file) or rerun_cell:
            # create file list
            list_path = io.join_paths(tree.opensfm, 'image_list.txt')
            has_alt = True
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    fout.write('%s\n' % photo.path_file)

            # create config file for OpenSfM
            config = [
                "use_exif_size: %s" %
                ('no' if not self.params.use_exif_size else 'yes'),
                "feature_process_size: %s" % self.params.feature_process_size,
                "feature_min_frames: %s" % self.params.feature_min_frames,
                "processes: %s" % self.params.processes,
                "matching_gps_neighbors: %s" %
                self.params.matching_gps_neighbors,
                "depthmap_method: %s" % args.opensfm_depthmap_method,
                "depthmap_resolution: %s" % args.opensfm_depthmap_resolution,
                "depthmap_min_patch_sd: %s" %
                args.opensfm_depthmap_min_patch_sd,
                "depthmap_min_consistent_views: %s" %
                args.opensfm_depthmap_min_consistent_views,
                "optimize_camera_parameters: %s" %
                ('no' if self.params.fixed_camera_params else 'yes')
            ]

            if has_alt:
                log.ODM_DEBUG(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: True")
                config.append("align_method: naive")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_DEBUG("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment

            if args.matcher_distance > 0:
                config.append("matching_gps_distance: %s" %
                              self.params.matching_gps_distance)

            if tree.odm_georeferencing_gcp:
                config.append("bundle_use_gcp: yes")
                io.copy(tree.odm_georeferencing_gcp, tree.opensfm)

            # write config file
            log.ODM_DEBUG(config)
            config_filename = io.join_paths(tree.opensfm, 'config.yaml')
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))

            # run OpenSfM reconstruction
            matched_done_file = io.join_paths(tree.opensfm,
                                              'matching_done.txt')
            if not io.file_exists(matched_done_file) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
                system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
                system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
                with open(matched_done_file, 'w') as fout:
                    fout.write("Matching done!\n")
            else:
                log.ODM_WARNING(
                    'Found a feature matching done progress file in: %s' %
                    matched_done_file)

            if not io.file_exists(tree.opensfm_tracks) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
            else:
                log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' %
                                tree.opensfm_tracks)

            if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
            else:
                log.ODM_WARNING(
                    'Found a valid OpenSfM reconstruction file in: %s' %
                    tree.opensfm_reconstruction)

            if not io.file_exists(
                    tree.opensfm_reconstruction_meshed) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm mesh %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
            else:
                log.ODM_WARNING(
                    'Found a valid OpenSfM meshed reconstruction file in: %s' %
                    tree.opensfm_reconstruction_meshed)

            if not args.use_pmvs:
                if not io.file_exists(
                        tree.opensfm_reconstruction_nvm) or rerun_cell:
                    system.run(
                        'PYTHONPATH=%s %s/bin/opensfm export_visualsfm %s' %
                        (context.pyopencv_path, context.opensfm_path,
                         tree.opensfm))
                else:
                    log.ODM_WARNING(
                        'Found a valid OpenSfM NVM reconstruction file in: %s'
                        % tree.opensfm_reconstruction_nvm)

                system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))

                # Skip dense reconstruction if necessary and export
                # sparse reconstruction instead
                if args.fast_orthophoto:
                    system.run(
                        'PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s'
                        % (context.pyopencv_path, context.opensfm_path,
                           tree.opensfm))
                else:
                    system.run(
                        'PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' %
                        (context.pyopencv_path, context.opensfm_path,
                         tree.opensfm))

        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM reconstruction file in: %s' %
                tree.opensfm_reconstruction)

        # check if reconstruction was exported to bundler before
        if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell:
            # convert back to bundler's format
            system.run(
                'PYTHONPATH=%s %s/bin/export_bundler %s' %
                (context.pyopencv_path, context.opensfm_path, tree.opensfm))
        else:
            log.ODM_WARNING('Found a valid Bundler file in: %s' %
                            tree.opensfm_reconstruction)

        if args.use_pmvs:
            # check if reconstruction was exported to pmvs before
            if not io.file_exists(tree.pmvs_visdat) or rerun_cell:
                # run PMVS converter
                system.run('PYTHONPATH=%s %s/bin/export_pmvs %s --output %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm, tree.pmvs))
            else:
                log.ODM_WARNING('Found a valid CMVS file in: %s' %
                                tree.pmvs_visdat)

        if reconstruction.georef:
            system.run(
                'PYTHONPATH=%s %s/bin/opensfm export_geocoords %s --transformation --proj \'%s\''
                % (context.pyopencv_path, context.opensfm_path, tree.opensfm,
                   reconstruction.georef.projection.srs))

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'OpenSfM')

        log.ODM_INFO('Running ODM OpenSfM Cell - Finished')
        return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
コード例 #4
0
    def setup(self,
              args,
              images_path,
              reconstruction,
              append_config=[],
              rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = os.path.join(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            if reconstruction.multi_camera:
                photos = get_photos_by_band(reconstruction.multi_camera,
                                            args.primary_band)
                if len(photos) < 1:
                    raise Exception("Not enough images in selected band %s" %
                                    args.primary_band.lower())
                log.ODM_INFO("Reconstruction will use %s images from %s band" %
                             (len(photos), args.primary_band.lower()))
            else:
                photos = reconstruction.photos

            # create file list
            has_alt = True
            has_gps = False
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    if photo.latitude is not None and photo.longitude is not None:
                        has_gps = True

                    fout.write('%s\n' %
                               os.path.join(images_path, photo.filename))

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path,
                                             "image_groups.txt")
            if 'split_image_groups_is_set' in args:
                image_groups_file = os.path.abspath(args.split_image_groups)

            if io.file_exists(image_groups_file):
                dst_groups_file = os.path.join(self.opensfm_project_path,
                                               "image_groups.txt")
                io.copy(image_groups_file, dst_groups_file)
                log.ODM_INFO("Copied %s to %s" %
                             (image_groups_file, dst_groups_file))

            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(
                        args.cameras)
                    with open(
                            os.path.join(self.opensfm_project_path,
                                         "camera_models_overrides.json"),
                            'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO(
                        "Wrote camera_models_overrides.json to OpenSfM directory"
                    )
                except Exception as e:
                    log.ODM_WARNING(
                        "Cannot set camera_models_overrides.json: %s" % str(e))

            use_bow = args.matcher_type == "bow"
            feature_type = "SIFT"

            # GPSDOP override if we have GPS accuracy information (such as RTK)
            if 'gps_accuracy_is_set' in args:
                log.ODM_INFO("Forcing GPS DOP to %s for all images" %
                             args.gps_accuracy)

            log.ODM_INFO("Writing exif overrides")

            exif_overrides = {}
            for p in photos:
                if 'gps_accuracy_is_set' in args:
                    dop = args.gps_accuracy
                elif p.get_gps_dop() is not None:
                    dop = p.get_gps_dop()
                else:
                    dop = args.gps_accuracy  # default value

                if p.latitude is not None and p.longitude is not None:
                    exif_overrides[p.filename] = {
                        'gps': {
                            'latitude': p.latitude,
                            'longitude': p.longitude,
                            'altitude':
                            p.altitude if p.altitude is not None else 0,
                            'dop': dop,
                        }
                    }

            with open(
                    os.path.join(self.opensfm_project_path,
                                 "exif_overrides.json"), 'w') as f:
                f.write(json.dumps(exif_overrides))

            # Check image masks
            masks = []
            for p in photos:
                if p.mask is not None:
                    masks.append(
                        (p.filename, os.path.join(images_path, p.mask)))

            if masks:
                log.ODM_INFO("Found %s image masks" % len(masks))
                with open(
                        os.path.join(self.opensfm_project_path,
                                     "mask_list.txt"), 'w') as f:
                    for fname, mask in masks:
                        f.write("{} {}\n".format(fname, mask))

            # Compute feature_process_size
            feature_process_size = 2048  # default

            if 'resize_to_is_set' in args:
                # Legacy
                log.ODM_WARNING(
                    "Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead."
                )
                feature_process_size = int(args.resize_to)
            else:
                feature_quality_scale = {
                    'ultra': 1,
                    'high': 0.5,
                    'medium': 0.25,
                    'low': 0.125,
                    'lowest': 0.0675,
                }

                max_dim = find_largest_photo_dim(photos)

                if max_dim > 0:
                    log.ODM_INFO("Maximum photo dimensions: %spx" %
                                 str(max_dim))
                    feature_process_size = int(
                        max_dim * feature_quality_scale[args.feature_quality])
                else:
                    log.ODM_WARNING(
                        "Cannot compute max image dimensions, going with defaults"
                    )

            depthmap_resolution = get_depthmap_resolution(args, photos)

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "flann_algorithm: KDTREE",  # more stable, faster than KMEANS
                "feature_process_size: %s" % feature_process_size,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % args.matcher_neighbors,
                "matching_gps_distance: %s" % args.matcher_distance,
                "optimize_camera_parameters: %s" %
                ('no'
                 if args.use_fixed_camera_params or args.cameras else 'yes'),
                "undistorted_image_format: tif",
                "bundle_outlier_filtering_type: AUTO",
                "align_orientation_prior: vertical",
                "triangulation_type: ROBUST",
                "retriangulation_ratio: 2",
            ]

            if args.camera_lens != 'auto':
                config.append("camera_projection_type: %s" %
                              args.camera_lens.upper())

            if not has_gps:
                log.ODM_INFO("No GPS information, using BOW matching")
                use_bow = True

            feature_type = args.feature_type.upper()

            if use_bow:
                config.append("matcher_type: WORDS")

                # Cannot use SIFT with BOW
                if feature_type == "SIFT":
                    log.ODM_WARNING(
                        "Using BOW matching, will use HAHOG feature type, not SIFT"
                    )
                    feature_type = "HAHOG"

            # GPU acceleration?
            if has_gpus() and feature_type == "SIFT":
                log.ODM_INFO("Using GPU for extracting SIFT features")
                log.ODM_INFO("--min-num-features will be ignored")
                feature_type = "SIFT_GPU"

            config.append("feature_type: %s" % feature_type)

            if has_alt:
                log.ODM_INFO(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            gcp_path = reconstruction.gcp.gcp_path
            if has_alt or gcp_path:
                config.append("align_method: auto")
            else:
                config.append("align_method: orientation_prior")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment
            else:
                config.append("local_bundle_radius: 0")

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                if not args.force_gps:
                    config.append("bundle_use_gps: no")
                io.copy(gcp_path, self.path("gcp_list.txt"))

            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" %
                            list_path)
コード例 #5
0
    def setup(self, args, images_path, photos, gcp_path=None, append_config = [], rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:
            
            # create file list
            has_alt = True
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    fout.write('%s\n' % io.join_paths(images_path, photo.filename))

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "feature_process_size: %s" % args.resize_to,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % args.matcher_neighbors,
                "matching_gps_distance: %s" % args.matcher_distance,
                "depthmap_method: %s" % args.opensfm_depthmap_method,
                "depthmap_resolution: %s" % args.depthmap_resolution,
                "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd,
                "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views,
                "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params else 'yes'),
            ]

            if has_alt:
                log.ODM_DEBUG("Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")
                config.append("align_method: naive")
            else:
                config.append("align_method: orientation_prior")
                config.append("align_orientation_prior: vertical")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_DEBUG("Enabling hybrid bundle adjustment")
                config.append("bundle_interval: 100")          # Bundle after adding 'bundle_interval' cameras
                config.append("bundle_new_points_ratio: 1.2")  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append("local_bundle_radius: 1")        # Max image graph distance for images to be included in local bundle adjustment

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                io.copy(gcp_path, self.path("gcp_list.txt"))
            
            config = config + append_config

            # write config file
            log.ODM_DEBUG(config)
            config_filename = io.join_paths(self.opensfm_project_path, 'config.yaml')
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path, "image_groups.txt")
            if io.file_exists(image_groups_file):
                log.ODM_DEBUG("Copied image_groups.txt to OpenSfM directory")
                io.copy(image_groups_file, os.path.join(self.opensfm_project_path, "image_groups.txt"))
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
コード例 #6
0
    def setup(self, args, images_path, photos, gcp_path=None, append_config = [], rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:
            
            # create file list
            has_alt = True
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    fout.write('%s\n' % io.join_paths(images_path, photo.filename))
            
            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path, "image_groups.txt")
            if io.file_exists(image_groups_file):
                log.ODM_INFO("Copied image_groups.txt to OpenSfM directory")
                io.copy(image_groups_file, os.path.join(self.opensfm_project_path, "image_groups.txt"))
        
            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(args.cameras)
                    with open(os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO("Wrote camera_models_overrides.json to OpenSfM directory")
                except Exception as e:
                    log.ODM_WARNING("Cannot set camera_models_overrides.json: %s" % str(e))

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "feature_process_size: %s" % args.resize_to,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % args.matcher_neighbors,
                "matching_gps_distance: %s" % args.matcher_distance,
                "depthmap_method: %s" % args.opensfm_depthmap_method,
                "depthmap_resolution: %s" % args.depthmap_resolution,
                "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd,
                "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views,
                "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params or args.cameras else 'yes'),
                "undistorted_image_format: png", # mvs-texturing exhibits artifacts with JPG
                "bundle_outlier_filtering_type: AUTO",
            ]

            # TODO: add BOW matching when dataset is not georeferenced (no gps)

            if has_alt:
                log.ODM_INFO("Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            if has_alt or gcp_path:
                config.append("align_method: naive")
            else:
                config.append("align_method: orientation_prior")
                config.append("align_orientation_prior: vertical")
            
            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append("bundle_interval: 100")          # Bundle after adding 'bundle_interval' cameras
                config.append("bundle_new_points_ratio: 1.2")  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append("local_bundle_radius: 1")        # Max image graph distance for images to be included in local bundle adjustment

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                config.append("bundle_use_gps: no")
                io.copy(gcp_path, self.path("gcp_list.txt"))
            
            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
コード例 #7
0
ファイル: run.py プロジェクト: sheex2018/NodeMICMAC
            'num_cores': args.max_concurrency,
            'ext': image_ext,
            'zoom': args.zoom,
            'mm3d': mm3d
        }
        system.run(
            '{mm3d} Malt Ortho .*.{ext} Ground_UTM EZA=1 ZoomI=64 ZoomF={zoom} NbVI=2 HrOr=1 '
            'RoundResol=0 ResolOrtho=1 DefCor=0.0005 NbProc={num_cores}'.
            format(**kwargs_malt))

        progressbc.send_update(80)

        # build ORTHO
        porto_src = '/code/micmac/include/XML_MicMac/Param-Tawny.xml'
        porto_dst = 'Ortho-MEC-Malt/Param-Tawny.xml'
        io.copy(porto_src, porto_dst)
        system.run('{mm3d} Porto Ortho-MEC-Malt/Param-Tawny.xml'.format(
            **kwargs_malt))

        progressbc.send_update(90)

        # build PLY
        kwargs_nuage = {
            'mm3d':
            mm3d,
            'ply':
            io.join_paths(project_dir,
                          'odm_georeferencing/odm_georeferenced_model.ply'),
            'nuage':
            get_last_etape('MEC-Malt/NuageImProf_STD-MALT_Etape_*.xml')
        }
コード例 #8
0
ファイル: osfm.py プロジェクト: MobileScientists/ODM
    def setup(self,
              args,
              images_path,
              reconstruction,
              append_config=[],
              rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = os.path.join(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            if reconstruction.multi_camera:
                photos = get_photos_by_band(reconstruction.multi_camera,
                                            args.primary_band)
                if len(photos) < 1:
                    raise Exception("Not enough images in selected band %s" %
                                    args.primary_band.lower())
                log.ODM_INFO("Reconstruction will use %s images from %s band" %
                             (len(photos), args.primary_band.lower()))
            else:
                photos = reconstruction.photos

            # create file list
            has_alt = True
            has_gps = False
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    if photo.latitude is not None and photo.longitude is not None:
                        has_gps = True

                    fout.write('%s\n' %
                               os.path.join(images_path, photo.filename))

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path,
                                             "image_groups.txt")
            if 'split_image_groups_is_set' in args:
                image_groups_file = os.path.abspath(args.split_image_groups)

            if io.file_exists(image_groups_file):
                dst_groups_file = os.path.join(self.opensfm_project_path,
                                               "image_groups.txt")
                io.copy(image_groups_file, dst_groups_file)
                log.ODM_INFO("Copied %s to %s" %
                             (image_groups_file, dst_groups_file))

            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(
                        args.cameras)
                    with open(
                            os.path.join(self.opensfm_project_path,
                                         "camera_models_overrides.json"),
                            'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO(
                        "Wrote camera_models_overrides.json to OpenSfM directory"
                    )
                except Exception as e:
                    log.ODM_WARNING(
                        "Cannot set camera_models_overrides.json: %s" % str(e))

            # Check image masks
            masks = []
            for p in photos:
                if p.mask is not None:
                    masks.append(
                        (p.filename, os.path.join(images_path, p.mask)))

            if masks:
                log.ODM_INFO("Found %s image masks" % len(masks))
                with open(
                        os.path.join(self.opensfm_project_path,
                                     "mask_list.txt"), 'w') as f:
                    for fname, mask in masks:
                        f.write("{} {}\n".format(fname, mask))

            # Compute feature_process_size
            feature_process_size = 2048  # default

            if ('resize_to_is_set' in args) and args.resize_to > 0:
                # Legacy
                log.ODM_WARNING(
                    "Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead."
                )
                feature_process_size = int(args.resize_to)
            else:
                feature_quality_scale = {
                    'ultra': 1,
                    'high': 0.5,
                    'medium': 0.25,
                    'low': 0.125,
                    'lowest': 0.0675,
                }

                max_dim = find_largest_photo_dim(photos)

                if max_dim > 0:
                    log.ODM_INFO("Maximum photo dimensions: %spx" %
                                 str(max_dim))
                    feature_process_size = int(
                        max_dim * feature_quality_scale[args.feature_quality])
                    log.ODM_INFO(
                        "Photo dimensions for feature extraction: %ipx" %
                        feature_process_size)
                else:
                    log.ODM_WARNING(
                        "Cannot compute max image dimensions, going with defaults"
                    )

            depthmap_resolution = get_depthmap_resolution(args, photos)

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "flann_algorithm: KDTREE",  # more stable, faster than KMEANS
                "feature_process_size: %s" % feature_process_size,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % args.matcher_neighbors,
                "matching_gps_distance: 0",
                "matching_graph_rounds: 50",
                "optimize_camera_parameters: %s" %
                ('no'
                 if args.use_fixed_camera_params or args.cameras else 'yes'),
                "reconstruction_algorithm: %s" % (args.sfm_algorithm),
                "undistorted_image_format: tif",
                "bundle_outlier_filtering_type: AUTO",
                "sift_peak_threshold: 0.066",
                "align_orientation_prior: vertical",
                "triangulation_type: ROBUST",
                "retriangulation_ratio: 2",
            ]

            if args.camera_lens != 'auto':
                config.append("camera_projection_type: %s" %
                              args.camera_lens.upper())

            matcher_type = args.matcher_type
            feature_type = args.feature_type.upper()

            osfm_matchers = {
                "bow": "WORDS",
                "flann": "FLANN",
                "bruteforce": "BRUTEFORCE"
            }

            if not has_gps and not 'matcher_type_is_set' in args:
                log.ODM_INFO(
                    "No GPS information, using BOW matching by default (you can override this by setting --matcher-type explicitly)"
                )
                matcher_type = "bow"

            if matcher_type == "bow":
                # Cannot use anything other than HAHOG with BOW
                if feature_type != "HAHOG":
                    log.ODM_WARNING(
                        "Using BOW matching, will use HAHOG feature type, not SIFT"
                    )
                    feature_type = "HAHOG"

            config.append("matcher_type: %s" % osfm_matchers[matcher_type])

            # GPU acceleration?
            if has_gpu():
                max_photo = find_largest_photo(photos)
                w, h = max_photo.width, max_photo.height
                if w > h:
                    h = int((h / w) * feature_process_size)
                    w = int(feature_process_size)
                else:
                    w = int((w / h) * feature_process_size)
                    h = int(feature_process_size)

                if has_popsift_and_can_handle_texsize(
                        w, h) and feature_type == "SIFT":
                    log.ODM_INFO("Using GPU for extracting SIFT features")
                    feature_type = "SIFT_GPU"

            config.append("feature_type: %s" % feature_type)

            if has_alt:
                log.ODM_INFO(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            gcp_path = reconstruction.gcp.gcp_path
            if has_alt or gcp_path:
                config.append("align_method: auto")
            else:
                config.append("align_method: orientation_prior")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment
            else:
                config.append("local_bundle_radius: 0")

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                if not args.force_gps:
                    config.append("bundle_use_gps: no")
                io.copy(gcp_path, self.path("gcp_list.txt"))

            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))

            # We impose our own reference_lla
            if reconstruction.is_georeferenced():
                self.write_reference_lla(
                    reconstruction.georef.utm_east_offset,
                    reconstruction.georef.utm_north_offset,
                    reconstruction.georef.proj4())
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" %
                            list_path)
コード例 #9
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        outputs['large'] = len(photos) > args.split

        if outputs['large']:
            # If we have a cluster address, we'll use a distributed workflow
            local_workflow = not bool(args.sm_cluster)

            octx = OSFMContext(tree.opensfm)
            split_done_file = octx.path("split_done.txt")

            if not io.file_exists(split_done_file) or self.rerun():
                orig_max_concurrency = args.max_concurrency
                if not local_workflow:
                    args.max_concurrency = max(1, args.max_concurrency - 1)
                    log.ODM_INFO(
                        "Setting max-concurrency to %s to better handle remote splits"
                        % args.max_concurrency)

                log.ODM_INFO(
                    "Large dataset detected (%s photos) and split set at %s. Preparing split merge."
                    % (len(photos), args.split))
                config = [
                    "submodels_relpath: ../submodels/opensfm",
                    "submodel_relpath_template: ../submodels/submodel_%04d/opensfm",
                    "submodel_images_relpath_template: ../submodels/submodel_%04d/images",
                    "submodel_size: %s" % args.split,
                    "submodel_overlap: %s" % args.split_overlap,
                ]

                octx.setup(args,
                           tree.dataset_raw,
                           photos,
                           reconstruction=reconstruction,
                           append_config=config,
                           rerun=self.rerun())
                octx.extract_metadata(self.rerun())

                self.update_progress(5)

                if local_workflow:
                    octx.feature_matching(self.rerun())

                self.update_progress(20)

                # Create submodels
                if not io.dir_exists(tree.submodels_path) or self.rerun():
                    if io.dir_exists(tree.submodels_path):
                        log.ODM_WARNING(
                            "Removing existing submodels directory: %s" %
                            tree.submodels_path)
                        shutil.rmtree(tree.submodels_path)

                    octx.run("create_submodels")
                else:
                    log.ODM_WARNING(
                        "Submodels directory already exist at: %s" %
                        tree.submodels_path)

                # Find paths of all submodels
                mds = metadataset.MetaDataSet(tree.opensfm)
                submodel_paths = [
                    os.path.abspath(p) for p in mds.get_submodel_paths()
                ]

                for sp in submodel_paths:
                    sp_octx = OSFMContext(sp)

                    # Copy filtered GCP file if needed
                    # One in OpenSfM's directory, one in the submodel project directory
                    if reconstruction.gcp and reconstruction.gcp.exists():
                        submodel_gcp_file = os.path.abspath(
                            sp_octx.path("..", "gcp_list.txt"))
                        submodel_images_dir = os.path.abspath(
                            sp_octx.path("..", "images"))

                        if reconstruction.gcp.make_filtered_copy(
                                submodel_gcp_file, submodel_images_dir):
                            log.ODM_INFO("Copied filtered GCP file to %s" %
                                         submodel_gcp_file)
                            io.copy(
                                submodel_gcp_file,
                                os.path.abspath(sp_octx.path("gcp_list.txt")))
                        else:
                            log.ODM_INFO(
                                "No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP"
                                % sp_octx.name())

                # Reconstruct each submodel
                log.ODM_INFO(
                    "Dataset has been split into %s submodels. Reconstructing each submodel..."
                    % len(submodel_paths))
                self.update_progress(25)

                if local_workflow:
                    for sp in submodel_paths:
                        log.ODM_INFO("Reconstructing %s" % sp)
                        OSFMContext(sp).reconstruct(self.rerun())
                else:
                    lre = LocalRemoteExecutor(args.sm_cluster, self.rerun())
                    lre.set_projects([
                        os.path.abspath(os.path.join(p, ".."))
                        for p in submodel_paths
                    ])
                    lre.run_reconstruction()

                self.update_progress(50)

                # Align
                octx.align_reconstructions(self.rerun())

                self.update_progress(55)

                # Aligned reconstruction is in reconstruction.aligned.json
                # We need to rename it to reconstruction.json
                remove_paths = []
                for sp in submodel_paths:
                    sp_octx = OSFMContext(sp)

                    aligned_recon = sp_octx.path('reconstruction.aligned.json')
                    unaligned_recon = sp_octx.path(
                        'reconstruction.unaligned.json')
                    main_recon = sp_octx.path('reconstruction.json')

                    if io.file_exists(main_recon) and io.file_exists(
                            unaligned_recon) and not self.rerun():
                        log.ODM_INFO("Submodel %s has already been aligned." %
                                     sp_octx.name())
                        continue

                    if not io.file_exists(aligned_recon):
                        log.ODM_WARNING(
                            "Submodel %s does not have an aligned reconstruction (%s). "
                            "This could mean that the submodel could not be reconstructed "
                            " (are there enough features to reconstruct it?). Skipping."
                            % (sp_octx.name(), aligned_recon))
                        remove_paths.append(sp)
                        continue

                    if io.file_exists(main_recon):
                        shutil.move(main_recon, unaligned_recon)

                    shutil.move(aligned_recon, main_recon)
                    log.ODM_INFO("%s is now %s" % (aligned_recon, main_recon))

                # Remove invalid submodels
                submodel_paths = [
                    p for p in submodel_paths if not p in remove_paths
                ]

                # Run ODM toolchain for each submodel
                if local_workflow:
                    for sp in submodel_paths:
                        sp_octx = OSFMContext(sp)

                        log.ODM_INFO("========================")
                        log.ODM_INFO("Processing %s" % sp_octx.name())
                        log.ODM_INFO("========================")

                        argv = get_submodel_argv(args, tree.submodels_path,
                                                 sp_octx.name())

                        # Re-run the ODM toolchain on the submodel
                        system.run(" ".join(map(quote, argv)),
                                   env_vars=os.environ.copy())
                else:
                    lre.set_projects([
                        os.path.abspath(os.path.join(p, ".."))
                        for p in submodel_paths
                    ])
                    lre.run_toolchain()

                # Restore max_concurrency value
                args.max_concurrency = orig_max_concurrency

                octx.touch(split_done_file)
            else:
                log.ODM_WARNING('Found a split done file in: %s' %
                                split_done_file)
        else:
            log.ODM_INFO("Normal dataset, will process all at once.")
            self.progress = 0.0
コード例 #10
0
    def process(self, inputs, outputs):

        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running ODM OpenSfM Cell')

        # get inputs
        tree = inputs.tree
        args = inputs.args
        reconstruction = inputs.reconstruction
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
            return ecto.QUIT

        # create working directories
        system.mkdir_p(tree.opensfm)

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'opensfm') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'opensfm' in args.rerun_from)

        if args.fast_orthophoto:
            output_file = io.join_paths(tree.opensfm, 'reconstruction.ply')
        elif args.use_opensfm_dense:
            output_file = tree.opensfm_model
        else:
            output_file = tree.opensfm_reconstruction

        # check if reconstruction was done before
        if not io.file_exists(output_file) or rerun_cell:
            # create file list
            list_path = io.join_paths(tree.opensfm, 'image_list.txt')
            has_alt = True
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    fout.write('%s\n' %
                               io.join_paths(tree.dataset_raw, photo.filename))

            # create config file for OpenSfM
            config = [
                "use_exif_size: %s" %
                ('no' if not self.params.use_exif_size else 'yes'),
                "feature_process_size: %s" % self.params.feature_process_size,
                "feature_min_frames: %s" % self.params.feature_min_frames,
                "processes: %s" % self.params.processes,
                "matching_gps_neighbors: %s" %
                self.params.matching_gps_neighbors,
                "depthmap_method: %s" % args.opensfm_depthmap_method,
                "depthmap_resolution: %s" % args.depthmap_resolution,
                "depthmap_min_patch_sd: %s" %
                args.opensfm_depthmap_min_patch_sd,
                "depthmap_min_consistent_views: %s" %
                args.opensfm_depthmap_min_consistent_views,
                "optimize_camera_parameters: %s" %
                ('no' if self.params.fixed_camera_params else 'yes')
            ]

            if has_alt:
                log.ODM_DEBUG(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")
                config.append("align_method: naive")
            else:
                config.append("align_method: orientation_prior")
                config.append("align_orientation_prior: vertical")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_DEBUG("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment

            if args.matcher_distance > 0:
                config.append("matching_gps_distance: %s" %
                              self.params.matching_gps_distance)

            if tree.odm_georeferencing_gcp:
                config.append("bundle_use_gcp: yes")
                io.copy(tree.odm_georeferencing_gcp, tree.opensfm)

            # write config file
            log.ODM_DEBUG(config)
            config_filename = io.join_paths(tree.opensfm, 'config.yaml')
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))

            # run OpenSfM reconstruction
            matched_done_file = io.join_paths(tree.opensfm,
                                              'matching_done.txt')
            if not io.file_exists(matched_done_file) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
                system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
                system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
                with open(matched_done_file, 'w') as fout:
                    fout.write("Matching done!\n")
            else:
                log.ODM_WARNING(
                    'Found a feature matching done progress file in: %s' %
                    matched_done_file)

            if not io.file_exists(tree.opensfm_tracks) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
            else:
                log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' %
                                tree.opensfm_tracks)

            if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
            else:
                log.ODM_WARNING(
                    'Found a valid OpenSfM reconstruction file in: %s' %
                    tree.opensfm_reconstruction)

            # Check that a reconstruction file has been created
            if not io.file_exists(tree.opensfm_reconstruction):
                log.ODM_ERROR(
                    "The program could not process this dataset using the current settings. "
                    "Check that the images have enough overlap, "
                    "that there are enough recognizable features "
                    "and that the images are in focus. "
                    "You could also try to increase the --min-num-features parameter."
                    "The program will now exit.")
                sys.exit(1)

            # Always export VisualSFM's reconstruction and undistort images
            # as we'll use these for texturing (after GSD estimation and resizing)
            if not args.ignore_gsd:
                image_scale = gsd.image_scale_factor(
                    args.orthophoto_resolution, tree.opensfm_reconstruction)
            else:
                image_scale = 1.0

            if not io.file_exists(
                    tree.opensfm_reconstruction_nvm) or rerun_cell:
                system.run(
                    'PYTHONPATH=%s %s/bin/opensfm export_visualsfm --image_extension png --scale_focal %s %s'
                    % (context.pyopencv_path, context.opensfm_path,
                       image_scale, tree.opensfm))
            else:
                log.ODM_WARNING(
                    'Found a valid OpenSfM NVM reconstruction file in: %s' %
                    tree.opensfm_reconstruction_nvm)

            # These will be used for texturing
            system.run(
                'PYTHONPATH=%s %s/bin/opensfm undistort --image_format png --image_scale %s %s'
                % (context.pyopencv_path, context.opensfm_path, image_scale,
                   tree.opensfm))

            # Skip dense reconstruction if necessary and export
            # sparse reconstruction instead
            if args.fast_orthophoto:
                system.run(
                    'PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s' %
                    (context.pyopencv_path, context.opensfm_path,
                     tree.opensfm))
            elif args.use_opensfm_dense:
                # Undistort images at full scale in JPG
                # (TODO: we could compare the size of the PNGs if they are < than depthmap_resolution
                # and use those instead of re-exporting full resolution JPGs)
                system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' %
                           (context.pyopencv_path, context.opensfm_path,
                            tree.opensfm))
                system.run(
                    'PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' %
                    (context.pyopencv_path, context.opensfm_path,
                     tree.opensfm))
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM reconstruction file in: %s' %
                tree.opensfm_reconstruction)

        # check if reconstruction was exported to bundler before
        if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell:
            # convert back to bundler's format
            system.run(
                'PYTHONPATH=%s %s/bin/export_bundler %s' %
                (context.pyopencv_path, context.opensfm_path, tree.opensfm))
        else:
            log.ODM_WARNING('Found a valid Bundler file in: %s' %
                            tree.opensfm_reconstruction)

        if reconstruction.georef:
            system.run(
                'PYTHONPATH=%s %s/bin/opensfm export_geocoords %s --transformation --proj \'%s\''
                % (context.pyopencv_path, context.opensfm_path, tree.opensfm,
                   reconstruction.georef.projection.srs))

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'OpenSfM')

        log.ODM_INFO('Running ODM OpenSfM Cell - Finished')
        return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
コード例 #11
0
        if args.gcp:
            system.run(
                '{mm3d} Campari .*.{ext} Ground_Init_RTL Ground_RTL '
                'GCP=[ground.xml,0.005,images.xml,0.01] NbIterEnd=6 AllFree=1 DetGCP=1'
                .format(**kwargs_campari))
        else:
            system.run(
                '{mm3d} Campari .*.{ext} Ground_Init_RTL Ground_RTL '
                'EmGPS=[RAWGNSS_N,5] AllFree=0'.format(**kwargs_campari))

        progressbc.send_update(60)

        # change projection and system coords to UTM from relative for GPS only
        kwargs_chg = {'ext': image_ext, 'mm3d': mm3d}
        if args.gcp:
            io.copy('Ori-Ground_RTL', 'Ori-Ground_UTM')
            system.run(
                '{mm3d} GCPCtrl .*.{ext} Ground_UTM ground.xml images.xml'.
                format(**kwargs_chg))
        else:
            system.run(
                '{mm3d} ChgSysCo .*.{ext} Ground_RTL [email protected] Ground_UTM'
                .format(**kwargs_chg))

        progressbc.send_update(65)

        # camera cloud
        if args.camera_cloud:
            kwargs_camera_cloud = {
                'ext': image_ext,
                'mm3d': mm3d,
コード例 #12
0
ファイル: mve.py プロジェクト: pengbao4386/ODM
    def process(self, args, outputs):
        # get inputs
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start MVE')
            exit(1)

        # check if reconstruction was done before
        if not io.file_exists(tree.mve_model) or self.rerun():
            # cleanup if a rerun
            if io.dir_exists(tree.mve_path) and self.rerun():
                shutil.rmtree(tree.mve_path)

            # make bundle directory
            if not io.file_exists(tree.mve_bundle):
                system.mkdir_p(tree.mve_path)
                system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))

                octx = OSFMContext(tree.opensfm)
                octx.save_absolute_image_list_to(tree.mve_image_list)
                io.copy(tree.opensfm_bundle, tree.mve_bundle)

            # mve makescene wants the output directory
            # to not exists before executing it (otherwise it
            # will prompt the user for confirmation)
            if io.dir_exists(tree.mve):
                shutil.rmtree(tree.mve)

            # run mve makescene
            if not io.dir_exists(tree.mve_views):
                system.run('%s %s %s' %
                           (context.makescene_path, tree.mve_path, tree.mve),
                           env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            self.update_progress(10)

            # Compute mve output scale based on depthmap_resolution
            max_width = 0
            max_height = 0
            for photo in photos:
                max_width = max(photo.width, max_width)
                max_height = max(photo.height, max_height)

            max_pixels = args.depthmap_resolution * args.depthmap_resolution
            if max_width * max_height <= max_pixels:
                mve_output_scale = 0
            else:
                ratio = float(max_width * max_height) / float(max_pixels)
                mve_output_scale = int(
                    math.ceil(math.log(ratio) / math.log(4.0)))

            dmrecon_config = [
                "-s%s" % mve_output_scale,
                "--progress=silent",
                "--local-neighbors=2",
            ]

            # Run MVE's dmrecon
            log.ODM_INFO(
                '                                                                               '
            )
            log.ODM_INFO(
                '                                    ,*/**                                      '
            )
            log.ODM_INFO(
                '                                  ,*@%*/@%*                                    '
            )
            log.ODM_INFO(
                '                                ,/@%******@&*.                                 '
            )
            log.ODM_INFO(
                '                              ,*@&*********/@&*                                '
            )
            log.ODM_INFO(
                '                            ,*@&**************@&*                              '
            )
            log.ODM_INFO(
                '                          ,/@&******************@&*.                           '
            )
            log.ODM_INFO(
                '                        ,*@&*********************/@&*                          '
            )
            log.ODM_INFO(
                '                      ,*@&**************************@&*.                       '
            )
            log.ODM_INFO(
                '                    ,/@&******************************&&*,                     '
            )
            log.ODM_INFO(
                '                  ,*&&**********************************@&*.                   '
            )
            log.ODM_INFO(
                '                ,*@&**************************************@&*.                 '
            )
            log.ODM_INFO(
                '              ,*@&***************#@@@@@@@@@%****************&&*,               '
            )
            log.ODM_INFO(
                '            .*&&***************&@@@@@@@@@@@@@@****************@@*.             '
            )
            log.ODM_INFO(
                '          .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*.           '
            )
            log.ODM_INFO(
                '        .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*,         '
            )
            log.ODM_INFO(
                '      .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*.       '
            )
            log.ODM_INFO(
                '    .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*.     '
            )
            log.ODM_INFO(
                '  .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*,   '
            )
            log.ODM_INFO(
                ' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(,  '
            )
            log.ODM_INFO(
                ' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&*   '
            )
            log.ODM_INFO(
                '   *#@/*******************************@@@@@***&@&**********************&@*,    '
            )
            log.ODM_INFO(
                '     *#@#******************************&@@@***@#*********************&@*,      '
            )
            log.ODM_INFO(
                '       */@#*****************************@@@************************@@*.        '
            )
            log.ODM_INFO(
                '         *#@/***************************/@@/*********************%@*,          '
            )
            log.ODM_INFO(
                '           *#@#**************************#@@%******************%@*,            '
            )
            log.ODM_INFO(
                '             */@#*************************(@@@@@@@&%/********&@*.              '
            )
            log.ODM_INFO(
                '               *(@(*********************************/%@@%**%@*,                '
            )
            log.ODM_INFO(
                '                 *(@%************************************%@**                  '
            )
            log.ODM_INFO(
                '                   **@%********************************&@*,                    '
            )
            log.ODM_INFO(
                '                     *(@(****************************%@/*                      '
            )
            log.ODM_INFO(
                '                       ,(@%************************#@/*                        '
            )
            log.ODM_INFO(
                '                         ,*@%********************&@/,                          '
            )
            log.ODM_INFO(
                '                           */@#****************#@/*                            '
            )
            log.ODM_INFO(
                '                             ,/@&************#@/*                              '
            )
            log.ODM_INFO(
                '                               ,*@&********%@/,                                '
            )
            log.ODM_INFO(
                '                                 */@#****(@/*                                  '
            )
            log.ODM_INFO(
                '                                   ,/@@@@(*                                    '
            )
            log.ODM_INFO(
                '                                     .**,                                      '
            )
            log.ODM_INFO('')
            log.ODM_INFO(
                "Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung."
            )
            log.ODM_INFO("                              Process is running")

            # TODO: find out why MVE is crashing at random
            # MVE *seems* to have a race condition, triggered randomly, regardless of dataset
            # https://gist.github.com/pierotofy/6c9ce93194ba510b61e42e3698cfbb89
            # Temporary workaround is to retry the reconstruction until we get it right
            # (up to a certain number of retries).
            retry_count = 1
            while retry_count < 10:
                try:
                    system.run(
                        '%s %s %s' % (context.dmrecon_path,
                                      ' '.join(dmrecon_config), tree.mve),
                        env_vars={'OMP_NUM_THREADS': args.max_concurrency})
                    break
                except Exception as e:
                    if str(e) == "Child returned 134" or str(
                            e) == "Child returned 1":
                        retry_count += 1
                        log.ODM_WARNING(
                            "Caught error code, retrying attempt #%s" %
                            retry_count)
                    else:
                        raise e

            self.update_progress(90)

            scene2pset_config = ["-F%s" % mve_output_scale]

            # run scene2pset
            system.run('%s %s "%s" "%s"' %
                       (context.scene2pset_path, ' '.join(scene2pset_config),
                        tree.mve, tree.mve_model),
                       env_vars={'OMP_NUM_THREADS': args.max_concurrency})
        else:
            log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' %
                            tree.mve_model)
コード例 #13
0
ファイル: mve.py プロジェクト: OpenDroneMap/OpenDroneMap
    def process(self, inputs, outputs):
        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running MVE Cell')

        # get inputs
        tree = inputs.tree
        args = inputs.args
        reconstruction = inputs.reconstruction
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start MVE')
            return ecto.QUIT

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'mve') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'mve' in args.rerun_from)

        # check if reconstruction was done before
        if not io.file_exists(tree.mve_model) or rerun_cell:
            # cleanup if a rerun
            if io.dir_exists(tree.mve_path) and rerun_cell:
                shutil.rmtree(tree.mve_path)

            # make bundle directory
            if not io.file_exists(tree.mve_bundle):
                system.mkdir_p(tree.mve_path)
                system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))
                io.copy(tree.opensfm_image_list, tree.mve_image_list)
                io.copy(tree.opensfm_bundle, tree.mve_bundle)

            # mve makescene wants the output directory
            # to not exists before executing it (otherwise it
            # will prompt the user for confirmation)
            if io.dir_exists(tree.mve):
                shutil.rmtree(tree.mve)

            # run mve makescene
            if not io.dir_exists(tree.mve_views):
                system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            # Compute mve output scale based on depthmap_resolution
            max_width = 0
            max_height = 0
            for photo in photos:
                max_width = max(photo.width, max_width)
                max_height = max(photo.height, max_height)

            max_pixels = args.depthmap_resolution * args.depthmap_resolution
            if max_width * max_height <= max_pixels:
                mve_output_scale = 0
            else:
                ratio = float(max_width * max_height) / float(max_pixels)
                mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0)))

            dmrecon_config = [
                "-s%s" % mve_output_scale,
	            "--progress=silent",
                "--local-neighbors=2",
                "--force",
            ]

            # Run MVE's dmrecon
            log.ODM_INFO('                                                                               ')
            log.ODM_INFO('                                    ,*/**                                      ')
            log.ODM_INFO('                                  ,*@%*/@%*                                    ')
            log.ODM_INFO('                                ,/@%******@&*.                                 ')
            log.ODM_INFO('                              ,*@&*********/@&*                                ')
            log.ODM_INFO('                            ,*@&**************@&*                              ')
            log.ODM_INFO('                          ,/@&******************@&*.                           ')
            log.ODM_INFO('                        ,*@&*********************/@&*                          ')
            log.ODM_INFO('                      ,*@&**************************@&*.                       ')
            log.ODM_INFO('                    ,/@&******************************&&*,                     ')
            log.ODM_INFO('                  ,*&&**********************************@&*.                   ')
            log.ODM_INFO('                ,*@&**************************************@&*.                 ')
            log.ODM_INFO('              ,*@&***************#@@@@@@@@@%****************&&*,               ')
            log.ODM_INFO('            .*&&***************&@@@@@@@@@@@@@@****************@@*.             ')
            log.ODM_INFO('          .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*.           ')
            log.ODM_INFO('        .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*,         ')
            log.ODM_INFO('      .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*.       ')
            log.ODM_INFO('    .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*.     ')
            log.ODM_INFO('  .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*,   ')
            log.ODM_INFO(' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(,  ')
            log.ODM_INFO(' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&*   ')
            log.ODM_INFO('   *#@/*******************************@@@@@***&@&**********************&@*,    ')
            log.ODM_INFO('     *#@#******************************&@@@***@#*********************&@*,      ')
            log.ODM_INFO('       */@#*****************************@@@************************@@*.        ')
            log.ODM_INFO('         *#@/***************************/@@/*********************%@*,          ')
            log.ODM_INFO('           *#@#**************************#@@%******************%@*,            ')
            log.ODM_INFO('             */@#*************************(@@@@@@@&%/********&@*.              ')
            log.ODM_INFO('               *(@(*********************************/%@@%**%@*,                ')
            log.ODM_INFO('                 *(@%************************************%@**                  ')
            log.ODM_INFO('                   **@%********************************&@*,                    ')
            log.ODM_INFO('                     *(@(****************************%@/*                      ')
            log.ODM_INFO('                       ,(@%************************#@/*                        ')
            log.ODM_INFO('                         ,*@%********************&@/,                          ')
            log.ODM_INFO('                           */@#****************#@/*                            ')
            log.ODM_INFO('                             ,/@&************#@/*                              ')
            log.ODM_INFO('                               ,*@&********%@/,                                ')
            log.ODM_INFO('                                 */@#****(@/*                                  ')
            log.ODM_INFO('                                   ,/@@@@(*                                    ')
            log.ODM_INFO('                                     .**,                                      ')
            log.ODM_INFO('')
            log.ODM_INFO("Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung.")
            log.ODM_INFO("                              Process is running")
            system.run('%s %s %s' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            scene2pset_config = [
                "-F%s" % mve_output_scale
            ]

            # run scene2pset
            system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency})
        else:
            log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' %
                            tree.mve_model)

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'MVE')

        log.ODM_INFO('Running ODM MVE Cell - Finished')
        return ecto.OK if args.end_with != 'mve' else ecto.QUIT
コード例 #14
0
    def process(self, inputs, outputs):
        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running MVE Cell')

        # get inputs
        tree = inputs.tree
        args = inputs.args
        reconstruction = inputs.reconstruction
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start MVE')
            return ecto.QUIT

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'mve') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'mve' in args.rerun_from)

        # check if reconstruction was done before
        if not io.file_exists(tree.mve_model) or rerun_cell:
            # cleanup if a rerun
            if io.dir_exists(tree.mve_path) and rerun_cell:
                shutil.rmtree(tree.mve_path)

            # make bundle directory
            if not io.file_exists(tree.mve_bundle):
                system.mkdir_p(tree.mve_path)
                system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))
                io.copy(tree.opensfm_image_list, tree.mve_image_list)
                io.copy(tree.opensfm_bundle, tree.mve_bundle)

            # mve makescene wants the output directory
            # to not exists before executing it (otherwise it
            # will prompt the user for confirmation)
            if io.dir_exists(tree.mve):
                shutil.rmtree(tree.mve)

            # run mve makescene
            if not io.dir_exists(tree.mve_views):
                system.run('%s %s %s' %
                           (context.makescene_path, tree.mve_path, tree.mve),
                           env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            # Compute mve output scale based on depthmap_resolution
            max_width = 0
            max_height = 0
            for photo in photos:
                max_width = max(photo.width, max_width)
                max_height = max(photo.height, max_height)

            max_pixels = args.depthmap_resolution * args.depthmap_resolution
            if max_width * max_height <= max_pixels:
                mve_output_scale = 0
            else:
                ratio = float(max_width * max_height) / float(max_pixels)
                mve_output_scale = int(
                    math.ceil(math.log(ratio) / math.log(4.0)))

            dmrecon_config = [
                "-s%s" % mve_output_scale,
                "--progress=silent",
                "--local-neighbors=2",
                "--force",
            ]

            # Run MVE's dmrecon
            log.ODM_INFO(
                '                                                                               '
            )
            log.ODM_INFO(
                '                                    ,*/**                                      '
            )
            log.ODM_INFO(
                '                                  ,*@%*/@%*                                    '
            )
            log.ODM_INFO(
                '                                ,/@%******@&*.                                 '
            )
            log.ODM_INFO(
                '                              ,*@&*********/@&*                                '
            )
            log.ODM_INFO(
                '                            ,*@&**************@&*                              '
            )
            log.ODM_INFO(
                '                          ,/@&******************@&*.                           '
            )
            log.ODM_INFO(
                '                        ,*@&*********************/@&*                          '
            )
            log.ODM_INFO(
                '                      ,*@&**************************@&*.                       '
            )
            log.ODM_INFO(
                '                    ,/@&******************************&&*,                     '
            )
            log.ODM_INFO(
                '                  ,*&&**********************************@&*.                   '
            )
            log.ODM_INFO(
                '                ,*@&**************************************@&*.                 '
            )
            log.ODM_INFO(
                '              ,*@&***************#@@@@@@@@@%****************&&*,               '
            )
            log.ODM_INFO(
                '            .*&&***************&@@@@@@@@@@@@@@****************@@*.             '
            )
            log.ODM_INFO(
                '          .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*.           '
            )
            log.ODM_INFO(
                '        .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*,         '
            )
            log.ODM_INFO(
                '      .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*.       '
            )
            log.ODM_INFO(
                '    .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*.     '
            )
            log.ODM_INFO(
                '  .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*,   '
            )
            log.ODM_INFO(
                ' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(,  '
            )
            log.ODM_INFO(
                ' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&*   '
            )
            log.ODM_INFO(
                '   *#@/*******************************@@@@@***&@&**********************&@*,    '
            )
            log.ODM_INFO(
                '     *#@#******************************&@@@***@#*********************&@*,      '
            )
            log.ODM_INFO(
                '       */@#*****************************@@@************************@@*.        '
            )
            log.ODM_INFO(
                '         *#@/***************************/@@/*********************%@*,          '
            )
            log.ODM_INFO(
                '           *#@#**************************#@@%******************%@*,            '
            )
            log.ODM_INFO(
                '             */@#*************************(@@@@@@@&%/********&@*.              '
            )
            log.ODM_INFO(
                '               *(@(*********************************/%@@%**%@*,                '
            )
            log.ODM_INFO(
                '                 *(@%************************************%@**                  '
            )
            log.ODM_INFO(
                '                   **@%********************************&@*,                    '
            )
            log.ODM_INFO(
                '                     *(@(****************************%@/*                      '
            )
            log.ODM_INFO(
                '                       ,(@%************************#@/*                        '
            )
            log.ODM_INFO(
                '                         ,*@%********************&@/,                          '
            )
            log.ODM_INFO(
                '                           */@#****************#@/*                            '
            )
            log.ODM_INFO(
                '                             ,/@&************#@/*                              '
            )
            log.ODM_INFO(
                '                               ,*@&********%@/,                                '
            )
            log.ODM_INFO(
                '                                 */@#****(@/*                                  '
            )
            log.ODM_INFO(
                '                                   ,/@@@@(*                                    '
            )
            log.ODM_INFO(
                '                                     .**,                                      '
            )
            log.ODM_INFO('')
            log.ODM_INFO(
                "Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung."
            )
            log.ODM_INFO("                              Process is running")
            system.run(
                '%s %s %s' %
                (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve),
                env_vars={'OMP_NUM_THREADS': args.max_concurrency})

            scene2pset_config = ["-F%s" % mve_output_scale]

            # run scene2pset
            system.run('%s %s "%s" "%s"' %
                       (context.scene2pset_path, ' '.join(scene2pset_config),
                        tree.mve, tree.mve_model),
                       env_vars={'OMP_NUM_THREADS': args.max_concurrency})
        else:
            log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' %
                            tree.mve_model)

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'MVE')

        log.ODM_INFO('Running ODM MVE Cell - Finished')
        return ecto.OK if args.end_with != 'mve' else ecto.QUIT
コード例 #15
0
ファイル: smvs.py プロジェクト: sentimentanalyser/ODM
    def process(self, inputs, outputs):

        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running SMVS Cell')

        # get inputs
        tree = inputs.tree
        args = inputs.args
        reconstruction = inputs.reconstruction
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start SMVS')
            return ecto.QUIT

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'smvs') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'smvs' in args.rerun_from)

        # check if reconstruction was done before
        if not io.file_exists(tree.smvs_model) or rerun_cell:
            # cleanup if a rerun
            if io.dir_exists(tree.mve_path) and rerun_cell:
                shutil.rmtree(tree.mve_path)

            # make bundle directory
            if not io.file_exists(tree.mve_bundle):
                system.mkdir_p(tree.mve_path)
                system.mkdir_p(io.join_paths(tree.mve_path, 'bundle'))
                io.copy(tree.opensfm_image_list, tree.mve_image_list)
                io.copy(tree.opensfm_bundle, tree.mve_bundle)

            # mve makescene wants the output directory
            # to not exists before executing it (otherwise it
            # will prompt the user for confirmation)
            if io.dir_exists(tree.smvs):
                shutil.rmtree(tree.smvs)

            # run mve makescene
            if not io.dir_exists(tree.mve_views):
                system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.smvs))

            # config
            config = [
                "-t%s" % self.params.threads,
                "-a%s" % self.params.alpha,
                "--max-pixels=%s" % int(self.params.max_pixels),
                "-o%s" % self.params.output_scale,
                "--debug-lvl=%s" % ('1' if self.params.verbose else '0'),
                "%s" % '-S' if self.params.shading else '',
                "%s" % '-g' if self.params.gamma_srgb and self.params.shading else '',
                "--force" if rerun_cell else ''
            ]

            # run smvs
            system.run('%s %s %s' % (context.smvs_path, ' '.join(config), tree.smvs))
            
            # find and rename the output file for simplicity
            smvs_files = glob.glob(os.path.join(tree.smvs, 'smvs-*'))
            smvs_files.sort(key=os.path.getmtime) # sort by last modified date
            if len(smvs_files) > 0:
                old_file = smvs_files[-1]
                if not (io.rename_file(old_file, tree.smvs_model)):
                    log.ODM_WARNING("File %s does not exist, cannot be renamed. " % old_file)

                # Filter
                point_cloud.filter(tree.smvs_model, standard_deviation=args.pc_filter, verbose=args.verbose)
            else:
                log.ODM_WARNING("Cannot find a valid point cloud (smvs-XX.ply) in %s. Check the console output for errors." % tree.smvs)
        else:
            log.ODM_WARNING('Found a valid SMVS reconstruction file in: %s' %
                            tree.smvs_model)

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'SMVS')

        log.ODM_INFO('Running ODM SMVS Cell - Finished')
        return ecto.OK if args.end_with != 'smvs' else ecto.QUIT
コード例 #16
0
    def setup(self,
              args,
              images_path,
              photos,
              reconstruction,
              append_config=[],
              rerun=False):
        """
        Setup a OpenSfM project
        """
        if rerun and io.dir_exists(self.opensfm_project_path):
            shutil.rmtree(self.opensfm_project_path)

        if not io.dir_exists(self.opensfm_project_path):
            system.mkdir_p(self.opensfm_project_path)

        list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt')
        if not io.file_exists(list_path) or rerun:

            # create file list
            has_alt = True
            has_gps = False
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    if photo.latitude is not None and photo.longitude is not None:
                        has_gps = True
                    fout.write('%s\n' %
                               io.join_paths(images_path, photo.filename))

            # check for image_groups.txt (split-merge)
            image_groups_file = os.path.join(args.project_path,
                                             "image_groups.txt")
            if io.file_exists(image_groups_file):
                log.ODM_INFO("Copied image_groups.txt to OpenSfM directory")
                io.copy(
                    image_groups_file,
                    os.path.join(self.opensfm_project_path,
                                 "image_groups.txt"))

            # check for cameras
            if args.cameras:
                try:
                    camera_overrides = camera.get_opensfm_camera_models(
                        args.cameras)
                    with open(
                            os.path.join(self.opensfm_project_path,
                                         "camera_models_overrides.json"),
                            'w') as f:
                        f.write(json.dumps(camera_overrides))
                    log.ODM_INFO(
                        "Wrote camera_models_overrides.json to OpenSfM directory"
                    )
                except Exception as e:
                    log.ODM_WARNING(
                        "Cannot set camera_models_overrides.json: %s" % str(e))

            use_bow = False
            feature_type = "SIFT"

            matcher_neighbors = args.matcher_neighbors
            if matcher_neighbors != 0 and reconstruction.multi_camera is not None:
                matcher_neighbors *= len(reconstruction.multi_camera)
                log.ODM_INFO(
                    "Increasing matcher neighbors to %s to accomodate multi-camera setup"
                    % matcher_neighbors)
                log.ODM_INFO("Multi-camera setup, using BOW matching")
                use_bow = True

            # GPSDOP override if we have GPS accuracy information (such as RTK)
            override_gps_dop = 'gps_accuracy_is_set' in args
            for p in photos:
                if p.get_gps_dop() is not None:
                    override_gps_dop = True
                    break

            if override_gps_dop:
                if 'gps_accuracy_is_set' in args:
                    log.ODM_INFO("Forcing GPS DOP to %s for all images" %
                                 args.gps_accuracy)
                else:
                    log.ODM_INFO(
                        "Looks like we have RTK accuracy info for some photos. Good! We'll use it."
                    )

                exif_overrides = {}
                for p in photos:
                    dop = args.gps_accuracy if 'gps_accuracy_is_set' in args else p.get_gps_dop(
                    )
                    if dop is not None and p.latitude is not None and p.longitude is not None:
                        exif_overrides[p.filename] = {
                            'gps': {
                                'latitude':
                                p.latitude,
                                'longitude':
                                p.longitude,
                                'altitude':
                                p.altitude if p.altitude is not None else 0,
                                'dop':
                                dop,
                            }
                        }

                with open(
                        os.path.join(self.opensfm_project_path,
                                     "exif_overrides.json"), 'w') as f:
                    f.write(json.dumps(exif_overrides))

            # create config file for OpenSfM
            config = [
                "use_exif_size: no",
                "flann_algorithm: KDTREE",  # more stable, faster than KMEANS
                "feature_process_size: %s" % args.resize_to,
                "feature_min_frames: %s" % args.min_num_features,
                "processes: %s" % args.max_concurrency,
                "matching_gps_neighbors: %s" % matcher_neighbors,
                "matching_gps_distance: %s" % args.matcher_distance,
                "depthmap_method: %s" % args.opensfm_depthmap_method,
                "depthmap_resolution: %s" % args.depthmap_resolution,
                "depthmap_min_patch_sd: %s" %
                args.opensfm_depthmap_min_patch_sd,
                "depthmap_min_consistent_views: %s" %
                args.opensfm_depthmap_min_consistent_views,
                "optimize_camera_parameters: %s" %
                ('no'
                 if args.use_fixed_camera_params or args.cameras else 'yes'),
                "undistorted_image_format: tif",
                "bundle_outlier_filtering_type: AUTO",
                "align_orientation_prior: vertical",
                "triangulation_type: ROBUST",
                "bundle_common_position_constraints: %s" %
                ('no' if reconstruction.multi_camera is None else 'yes'),
            ]

            if args.camera_lens != 'auto':
                config.append("camera_projection_type: %s" %
                              args.camera_lens.upper())

            if not has_gps:
                log.ODM_INFO("No GPS information, using BOW matching")
                use_bow = True

            feature_type = args.feature_type.upper()

            if use_bow:
                config.append("matcher_type: WORDS")

                # Cannot use SIFT with BOW
                if feature_type == "SIFT":
                    log.ODM_WARNING(
                        "Using BOW matching, will use HAHOG feature type, not SIFT"
                    )
                    feature_type = "HAHOG"

            config.append("feature_type: %s" % feature_type)

            if has_alt:
                log.ODM_INFO(
                    "Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")

            gcp_path = reconstruction.gcp.gcp_path
            if has_alt or gcp_path:
                config.append("align_method: auto")
            else:
                config.append("align_method: orientation_prior")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_INFO("Enabling hybrid bundle adjustment")
                config.append(
                    "bundle_interval: 100"
                )  # Bundle after adding 'bundle_interval' cameras
                config.append(
                    "bundle_new_points_ratio: 1.2"
                )  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append(
                    "local_bundle_radius: 1"
                )  # Max image graph distance for images to be included in local bundle adjustment
            else:
                config.append("local_bundle_radius: 0")

            if gcp_path:
                config.append("bundle_use_gcp: yes")
                if not args.force_gps:
                    config.append("bundle_use_gps: no")
                io.copy(gcp_path, self.path("gcp_list.txt"))

            config = config + append_config

            # write config file
            log.ODM_INFO(config)
            config_filename = self.get_config_file_path()
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))
        else:
            log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" %
                            list_path)
コード例 #17
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        outputs['large'] = len(photos) > args.split

        if outputs['large']:
            # If we have a cluster address, we'll use a distributed workflow
            local_workflow = not bool(args.sm_cluster)

            octx = OSFMContext(tree.opensfm)
            split_done_file = octx.path("split_done.txt")

            if not io.file_exists(split_done_file) or self.rerun():
                orig_max_concurrency = args.max_concurrency
                if not local_workflow:
                    args.max_concurrency = max(1, args.max_concurrency - 1)
                    log.ODM_INFO("Setting max-concurrency to %s to better handle remote splits" % args.max_concurrency)

                log.ODM_INFO("Large dataset detected (%s photos) and split set at %s. Preparing split merge." % (
                    len(photos), args.split))
                config = [
                    "submodels_relpath: ../submodels/opensfm",
                    "submodel_relpath_template: ../submodels/submodel_%04d/opensfm",
                    "submodel_images_relpath_template: ../submodels/submodel_%04d/images",
                    "submodel_size: %s" % args.split,
                    "submodel_overlap: %s" % args.split_overlap,
                ]

                octx.setup(args, tree.dataset_raw, reconstruction=reconstruction, append_config=config,
                           rerun=self.rerun())
                octx.extract_metadata(self.rerun())

                self.update_progress(5)

                if local_workflow:
                    octx.feature_matching(self.rerun())

                self.update_progress(20)

                # Create submodels
                if not io.dir_exists(tree.submodels_path) or self.rerun():
                    if io.dir_exists(tree.submodels_path):
                        log.ODM_WARNING("Removing existing submodels directory: %s" % tree.submodels_path)
                        shutil.rmtree(tree.submodels_path)

                    octx.run("create_submodels")
                else:
                    log.ODM_WARNING("Submodels directory already exist at: %s" % tree.submodels_path)

                # Find paths of all submodels
                mds = metadataset.MetaDataSet(tree.opensfm)
                submodel_paths = [os.path.abspath(p) for p in mds.get_submodel_paths()]

                for sp in submodel_paths:
                    sp_octx = OSFMContext(sp)

                    # Copy filtered GCP file if needed
                    # One in OpenSfM's directory, one in the submodel project directory
                    if reconstruction.gcp and reconstruction.gcp.exists():
                        submodel_gcp_file = os.path.abspath(sp_octx.path("..", "gcp_list.txt"))
                        submodel_images_dir = os.path.abspath(sp_octx.path("..", "images"))

                        if reconstruction.gcp.make_filtered_copy(submodel_gcp_file, submodel_images_dir):
                            log.ODM_INFO("Copied filtered GCP file to %s" % submodel_gcp_file)
                            io.copy(submodel_gcp_file, os.path.abspath(sp_octx.path("gcp_list.txt")))
                        else:
                            log.ODM_INFO(
                                "No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP" % sp_octx.name())

                # Reconstruct each submodel
                log.ODM_INFO(
                    "Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths))
                self.update_progress(25)

                if local_workflow:
                    for sp in submodel_paths:
                        log.ODM_INFO("Reconstructing %s" % sp)
                        OSFMContext(sp).reconstruct(self.rerun())
                else:
                    lre = LocalRemoteExecutor(args.sm_cluster, self.rerun())
                    lre.set_projects([os.path.abspath(os.path.join(p, "..")) for p in submodel_paths])
                    lre.run_reconstruction()

                self.update_progress(50)

                # TODO: this is currently not working and needs a champion to fix it
                # https://community.opendronemap.org/t/filenotfound-error-cameras-json/6047/2

                # resplit_done_file = octx.path('resplit_done.txt')
                # if not io.file_exists(resplit_done_file) and bool(args.split_multitracks):
                #     submodels = mds.get_submodel_paths()
                #     i = 0
                #     for s in submodels:
                #         template = octx.path("../aligned_submodels/submodel_%04d")
                #         with open(s+"/reconstruction.json", "r") as f:
                #             j = json.load(f)
                #         for k in range(0, len(j)):
                #             v = j[k]
                #             path = template % i

                #             #Create the submodel path up to opensfm
                #             os.makedirs(path+"/opensfm")
                #             os.makedirs(path+"/images")

                #             #symlinks for common data
                #             images = os.listdir(octx.path("../images"))
                #             for image in images:
                #                 os.symlink("../../../images/"+image, path+"/images/"+image)
                #             os.symlink("../../../opensfm/exif", path+"/opensfm/exif")
                #             os.symlink("../../../opensfm/features", path+"/opensfm/features")
                #             os.symlink("../../../opensfm/matches", path+"/opensfm/matches")
                #             os.symlink("../../../opensfm/reference_lla.json", path+"/opensfm/reference_lla.json")
                #             os.symlink("../../../opensfm/camera_models.json", path+"/opensfm/camera_models.json")

                #             shutil.copy(s+"/../cameras.json", path+"/cameras.json")

                #             shutil.copy(s+"/../images.json", path+"/images.json")

                #             with open(octx.path("config.yaml")) as f:
                #                 doc = yaml.safe_load(f)

                #             dmcv = "depthmap_min_consistent_views"
                #             if dmcv in doc:
                #                 if len(v["shots"]) < doc[dmcv]:
                #                     doc[dmcv] = len(v["shots"])
                #                     print("WARNING: Reduced "+dmcv+" to accommodate short track")

                #             with open(path+"/opensfm/config.yaml", "w") as f:
                #                 yaml.dump(doc, f)

                #             #We need the original tracks file for the visualsfm export, since
                #             #there may still be point matches between the tracks
                #             shutil.copy(s+"/tracks.csv", path+"/opensfm/tracks.csv")

                #             #Create our new reconstruction file with only the relevant track
                #             with open(path+"/opensfm/reconstruction.json", "w") as o:
                #                 json.dump([v], o)

                #             #Create image lists
                #             with open(path+"/opensfm/image_list.txt", "w") as o:
                #                 o.writelines(list(map(lambda x: "../images/"+x+'\n', v["shots"].keys())))
                #             with open(path+"/img_list.txt", "w") as o:
                #                 o.writelines(list(map(lambda x: x+'\n', v["shots"].keys())))

                #             i+=1
                #     os.rename(octx.path("../submodels"), octx.path("../unaligned_submodels"))
                #     os.rename(octx.path("../aligned_submodels"), octx.path("../submodels"))
                #     octx.touch(resplit_done_file)

                mds = metadataset.MetaDataSet(tree.opensfm)
                submodel_paths = [os.path.abspath(p) for p in mds.get_submodel_paths()]

                # Align
                octx.align_reconstructions(self.rerun())

                self.update_progress(55)

                # Aligned reconstruction is in reconstruction.aligned.json
                # We need to rename it to reconstruction.json
                remove_paths = []
                for sp in submodel_paths:
                    sp_octx = OSFMContext(sp)

                    aligned_recon = sp_octx.path('reconstruction.aligned.json')
                    unaligned_recon = sp_octx.path('reconstruction.unaligned.json')
                    main_recon = sp_octx.path('reconstruction.json')

                    if io.file_exists(main_recon) and io.file_exists(unaligned_recon) and not self.rerun():
                        log.ODM_INFO("Submodel %s has already been aligned." % sp_octx.name())
                        continue

                    if not io.file_exists(aligned_recon):
                        log.ODM_WARNING("Submodel %s does not have an aligned reconstruction (%s). "
                                        "This could mean that the submodel could not be reconstructed "
                                        " (are there enough features to reconstruct it?). Skipping." % (
                                            sp_octx.name(), aligned_recon))
                        remove_paths.append(sp)
                        continue

                    if io.file_exists(main_recon):
                        shutil.move(main_recon, unaligned_recon)

                    shutil.move(aligned_recon, main_recon)
                    log.ODM_INFO("%s is now %s" % (aligned_recon, main_recon))

                # Remove invalid submodels
                submodel_paths = [p for p in submodel_paths if not p in remove_paths]

                # Run ODM toolchain for each submodel
                if local_workflow:
                    for sp in submodel_paths:
                        sp_octx = OSFMContext(sp)

                        log.ODM_INFO("========================")
                        log.ODM_INFO("Processing %s" % sp_octx.name())
                        log.ODM_INFO("========================")

                        argv = get_submodel_argv(args, tree.submodels_path, sp_octx.name())

                        # Re-run the ODM toolchain on the submodel
                        system.run(" ".join(map(quote, map(str, argv))), env_vars=os.environ.copy())
                else:
                    lre.set_projects([os.path.abspath(os.path.join(p, "..")) for p in submodel_paths])
                    lre.run_toolchain()

                # Restore max_concurrency value
                args.max_concurrency = orig_max_concurrency

                octx.touch(split_done_file)
            else:
                log.ODM_WARNING('Found a split done file in: %s' % split_done_file)
        else:
            log.ODM_INFO("Normal dataset, will process all at once.")
            self.progress = 0.0
コード例 #18
0
    def process(self, inputs, outputs):

        # Benchmarking
        start_time = system.now_raw()

        log.ODM_INFO('Running ODM OpenSfM Cell')

        # get inputs
        tree = inputs.tree
        args = inputs.args
        reconstruction = inputs.reconstruction
        photos = reconstruction.photos

        if not photos:
            log.ODM_ERROR('Not enough photos in photos array to start OpenSfM')
            return ecto.QUIT

        # create working directories
        system.mkdir_p(tree.opensfm)

        # check if we rerun cell or not
        rerun_cell = (args.rerun is not None and
                      args.rerun == 'opensfm') or \
                     (args.rerun_all) or \
                     (args.rerun_from is not None and
                      'opensfm' in args.rerun_from)

        if args.fast_orthophoto:
            output_file = io.join_paths(tree.opensfm, 'reconstruction.ply')
        elif args.use_opensfm_dense:
            output_file = tree.opensfm_model
        else:
            output_file = tree.opensfm_reconstruction

        # check if reconstruction was done before
        if not io.file_exists(output_file) or rerun_cell:
            # create file list
            list_path = io.join_paths(tree.opensfm, 'image_list.txt')
            has_alt = True
            with open(list_path, 'w') as fout:
                for photo in photos:
                    if not photo.altitude:
                        has_alt = False
                    fout.write('%s\n' % io.join_paths(tree.dataset_raw, photo.filename))

            # create config file for OpenSfM
            config = [
                "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'),
                "feature_process_size: %s" % self.params.feature_process_size,
                "feature_min_frames: %s" % self.params.feature_min_frames,
                "processes: %s" % self.params.processes,
                "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors,
                "depthmap_method: %s" % args.opensfm_depthmap_method,
                "depthmap_resolution: %s" % args.depthmap_resolution,
                "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd,
                "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views,
                "optimize_camera_parameters: %s" % ('no' if self.params.fixed_camera_params else 'yes')
            ]

            if has_alt:
                log.ODM_DEBUG("Altitude data detected, enabling it for GPS alignment")
                config.append("use_altitude_tag: yes")
                config.append("align_method: naive")
            else:
                config.append("align_method: orientation_prior")
                config.append("align_orientation_prior: vertical")

            if args.use_hybrid_bundle_adjustment:
                log.ODM_DEBUG("Enabling hybrid bundle adjustment")
                config.append("bundle_interval: 100")          # Bundle after adding 'bundle_interval' cameras
                config.append("bundle_new_points_ratio: 1.2")  # Bundle when (new points) / (bundled points) > bundle_new_points_ratio
                config.append("local_bundle_radius: 1")        # Max image graph distance for images to be included in local bundle adjustment

            if args.matcher_distance > 0:
                config.append("matching_gps_distance: %s" % self.params.matching_gps_distance)

            if tree.odm_georeferencing_gcp:
                config.append("bundle_use_gcp: yes")
                io.copy(tree.odm_georeferencing_gcp, tree.opensfm)

            # write config file
            log.ODM_DEBUG(config)
            config_filename = io.join_paths(tree.opensfm, 'config.yaml')
            with open(config_filename, 'w') as fout:
                fout.write("\n".join(config))

            # run OpenSfM reconstruction
            matched_done_file = io.join_paths(tree.opensfm, 'matching_done.txt')
            if not io.file_exists(matched_done_file) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' %
                           (context.pyopencv_path, context.opensfm_path, tree.opensfm))
                system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' %
                           (context.pyopencv_path, context.opensfm_path, tree.opensfm))
                system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' %
                           (context.pyopencv_path, context.opensfm_path, tree.opensfm))
                with open(matched_done_file, 'w') as fout:
                    fout.write("Matching done!\n")
            else:
                log.ODM_WARNING('Found a feature matching done progress file in: %s' %
                                matched_done_file)

            if not io.file_exists(tree.opensfm_tracks) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' %
                           (context.pyopencv_path, context.opensfm_path, tree.opensfm))
            else:
                log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' %
                                tree.opensfm_tracks)

            if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' %
                           (context.pyopencv_path, context.opensfm_path, tree.opensfm))
            else:
                log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' %
                                tree.opensfm_reconstruction)

            # Check that a reconstruction file has been created
            if not io.file_exists(tree.opensfm_reconstruction):
                log.ODM_ERROR("The program could not process this dataset using the current settings. "
                                "Check that the images have enough overlap, "
                                "that there are enough recognizable features "
                                "and that the images are in focus. "
                                "You could also try to increase the --min-num-features parameter."
                                "The program will now exit.")
                sys.exit(1)


            # Always export VisualSFM's reconstruction and undistort images
            # as we'll use these for texturing (after GSD estimation and resizing)
            if not args.ignore_gsd:
                image_scale = gsd.image_scale_factor(args.orthophoto_resolution, tree.opensfm_reconstruction)
            else:
                image_scale = 1.0

            if not io.file_exists(tree.opensfm_reconstruction_nvm) or rerun_cell:
                system.run('PYTHONPATH=%s %s/bin/opensfm export_visualsfm --image_extension png --scale_focal %s %s' %
                            (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm))
            else:
                log.ODM_WARNING('Found a valid OpenSfM NVM reconstruction file in: %s' %
                                tree.opensfm_reconstruction_nvm)

            # These will be used for texturing
            system.run('PYTHONPATH=%s %s/bin/opensfm undistort --image_format png --image_scale %s %s' %
                        (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm))

            # Skip dense reconstruction if necessary and export
            # sparse reconstruction instead
            if args.fast_orthophoto:
                system.run('PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s' %
                        (context.pyopencv_path, context.opensfm_path, tree.opensfm))
            elif args.use_opensfm_dense:
                # Undistort images at full scale in JPG
                # (TODO: we could compare the size of the PNGs if they are < than depthmap_resolution
                # and use those instead of re-exporting full resolution JPGs)
                system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' %
                        (context.pyopencv_path, context.opensfm_path, tree.opensfm))
                system.run('PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' %
                        (context.pyopencv_path, context.opensfm_path, tree.opensfm))
        else:
            log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' %
                            tree.opensfm_reconstruction)

        # check if reconstruction was exported to bundler before
        if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell:
            # convert back to bundler's format
            system.run('PYTHONPATH=%s %s/bin/export_bundler %s' %
                       (context.pyopencv_path, context.opensfm_path, tree.opensfm))
        else:
            log.ODM_WARNING('Found a valid Bundler file in: %s' %
                            tree.opensfm_reconstruction)

        if reconstruction.georef:
            system.run('PYTHONPATH=%s %s/bin/opensfm export_geocoords %s --transformation --proj \'%s\'' %
                       (context.pyopencv_path, context.opensfm_path, tree.opensfm, reconstruction.georef.projection.srs))

        outputs.reconstruction = reconstruction

        if args.time:
            system.benchmark(start_time, tree.benchmarking, 'OpenSfM')

        log.ODM_INFO('Running ODM OpenSfM Cell - Finished')
        return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT