def process(self, inputs, outputs): # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [f for f in io.get_files_list(in_dir) if supported_extension(f)] log.ODM_INFO('Running ODM Load Dataset Cell') # get inputs tree = self.inputs.tree # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw resize_dir = tree.dataset_resize # Check first if a project already exists. This is a mediocre way to check, by checking the resize dir if io.dir_exists(resize_dir): log.ODM_DEBUG("resize dir: %s" % resize_dir) images_dir = resize_dir # if first time running, create project directory and copy images over to project/images else: if not io.dir_exists(images_dir): log.ODM_INFO("Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir)] log.ODM_DEBUG('Loading dataset from: %s' % images_dir) files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = Pool().map( partial(make_odm_photo, self.params.force_focal, self.params.force_ccd), path_files ) log.ODM_INFO('Found %s usable images' % len(photos)) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) return ecto.QUIT # append photos to cell output outputs.photos = photos log.ODM_INFO('Running ODM Load Dataset Cell - Finished') return ecto.OK
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Meshing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_meshing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_meshing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_meshing' in args.rerun_from) if not io.file_exists(tree.odm_mesh) or rerun_cell: log.ODM_DEBUG('Writing ODM Mesh file in: %s' % tree.odm_mesh) kwargs = { 'bin': context.odm_modules_path, 'outfile': tree.odm_mesh, 'log': tree.odm_meshing_log, 'max_vertex': self.params.max_vertex, 'oct_tree': self.params.oct_tree, 'samples': self.params.samples, 'solver': self.params.solver, 'verbose': verbose } if not args.use_pmvs: kwargs['infile'] = tree.opensfm_model else: kwargs['infile'] = tree.pmvs_model # run meshing binary system.run('{bin}/odm_meshing -inputFile {infile} ' '-outputFile {outfile} -logFile {log} ' '-maxVertexCount {max_vertex} -octreeDepth {oct_tree} {verbose} ' '-samplesPerNode {samples} -solverDivide {solver}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Mesh file in: %s' % tree.odm_mesh) if args.time: system.benchmark(start_time, tree.benchmarking, 'Meshing') log.ODM_INFO('Running ODM Meshing Cell - Finished') return ecto.OK if args.end_with != 'odm_meshing' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Texturing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree # define paths and create working directories system.mkdir_p(tree.odm_texturing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_texturing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_texturing' in args.rerun_from) if not io.file_exists(tree.odm_textured_model_obj) or rerun_cell: log.ODM_DEBUG('Writing ODM Textured file in: %s' % tree.odm_textured_model_obj) # odm_texturing definitions kwargs = { 'bin': context.odm_modules_path, 'out_dir': tree.odm_texturing, 'bundle': tree.opensfm_bundle, 'imgs_path': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'model': tree.odm_mesh, 'log': tree.odm_texuring_log, 'resize': self.params.resize, 'resolution': self.params.resolution, 'size': self.params.size } # run texturing binary system.run('{bin}/odm_texturing -bundleFile {bundle} ' '-imagesPath {imgs_path} -imagesListPath {imgs_list} ' '-inputModelPath {model} -outputFolder {out_dir}/ ' '-textureResolution {resolution} -bundleResizedTo {resize} ' '-textureWithSize {size} -logFile {log}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % tree.odm_textured_model_obj) if args.time: system.benchmark(start_time, tree.benchmarking, 'Texturing') log.ODM_INFO('Running ODM Texturing Cell - Finished') return ecto.OK if args.end_with != 'odm_texturing' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM FilterPoints Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_filterpoints') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_filterpoints' in args.rerun_from) if not os.path.exists(tree.odm_filterpoints): system.mkdir_p(tree.odm_filterpoints) # check if reconstruction was done before if not io.file_exists(tree.filtered_point_cloud) or rerun_cell: if args.fast_orthophoto: inputPointCloud = os.path.join(tree.opensfm, 'reconstruction.ply') elif args.use_opensfm_dense: inputPointCloud = tree.opensfm_model else: inputPointCloud = tree.mve_model confidence = None if not args.use_opensfm_dense and not args.fast_orthophoto: confidence = args.mve_confidence point_cloud.filter(inputPointCloud, tree.filtered_point_cloud, standard_deviation=args.pc_filter, confidence=confidence, verbose=args.verbose) else: log.ODM_WARNING('Found a valid point cloud file in: %s' % tree.filtered_point_cloud) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'MVE') log.ODM_INFO('Running ODM FilterPoints Cell - Finished') return ecto.OK if args.end_with != 'odm_filterpoints' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Georeferencing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree gcpfile = io.join_paths(tree.root_path, self.params.gcp_file) # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) # in case a gcp file it's not provided, let's try to generate it using # images metadata. Internally calls jhead. if not self.params.use_gcp and \ not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('Warning: No coordinates file. ' 'Generating coordinates file in: %s' % tree.odm_georeferencing_coords) try: # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_raw, 'imgs_list': tree.opensfm_bundle_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log } # run UTM extraction binary system.run('{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} ' '-logFile {log}'.format(**kwargs)) except Exception, e: log.ODM_ERROR('Could not generate GCP file from images metadata.' 'Consider rerunning with argument --odm_georeferencing-useGcp' ' and provide a proper GCP file') log.ODM_ERROR(e) return ecto.QUIT
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] if outputs['large']: if not os.path.exists(tree.submodels_path): log.ODM_ERROR( "We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?" % tree.submodels_path) exit(1) # Merge point clouds if args.merge in ['all', 'pointcloud']: if not io.file_exists( tree.odm_georeferencing_model_laz) or self.rerun(): all_point_clouds = get_submodel_paths( tree.submodels_path, "odm_georeferencing", "odm_georeferenced_model.laz") try: point_cloud.merge(all_point_clouds, tree.odm_georeferencing_model_laz, rerun=self.rerun()) point_cloud.post_point_cloud_steps(args, tree) except Exception as e: log.ODM_WARNING( "Could not merge point cloud: %s (skipping)" % str(e)) else: log.ODM_WARNING("Found merged point cloud in %s" % tree.odm_georeferencing_model_laz) self.update_progress(25) # Merge crop bounds merged_bounds_file = os.path.join( tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') if not io.file_exists(merged_bounds_file) or self.rerun(): all_bounds = get_submodel_paths( tree.submodels_path, 'odm_georeferencing', 'odm_georeferenced_model.bounds.gpkg') log.ODM_INFO("Merging all crop bounds: %s" % all_bounds) if len(all_bounds) > 0: # Calculate a new crop area # based on the convex hull of all crop areas of all submodels # (without a buffer, otherwise we are double-cropping) Cropper.merge_bounds(all_bounds, merged_bounds_file, 0) else: log.ODM_WARNING("No bounds found for any submodel.") # Merge orthophotos if args.merge in ['all', 'orthophoto']: if not io.dir_exists(tree.odm_orthophoto): system.mkdir_p(tree.odm_orthophoto) if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun(): all_orthos_and_ortho_cuts = get_all_submodel_paths( tree.submodels_path, os.path.join("odm_orthophoto", "odm_orthophoto_feathered.tif"), os.path.join("odm_orthophoto", "odm_orthophoto_cut.tif"), ) if len(all_orthos_and_ortho_cuts) > 1: log.ODM_INFO( "Found %s submodels with valid orthophotos and cutlines" % len(all_orthos_and_ortho_cuts)) # TODO: histogram matching via rasterio # currently parts have different color tones if io.file_exists(tree.odm_orthophoto_tif): os.remove(tree.odm_orthophoto_tif) orthophoto_vars = orthophoto.get_orthophoto_vars(args) orthophoto.merge(all_orthos_and_ortho_cuts, tree.odm_orthophoto_tif, orthophoto_vars) orthophoto.post_orthophoto_steps( args, merged_bounds_file, tree.odm_orthophoto_tif) elif len(all_orthos_and_ortho_cuts) == 1: # Simply copy log.ODM_WARNING( "A single orthophoto/cutline pair was found between all submodels." ) shutil.copyfile(all_orthos_and_ortho_cuts[0][0], tree.odm_orthophoto_tif) else: log.ODM_WARNING( "No orthophoto/cutline pairs were found in any of the submodels. No orthophoto will be generated." ) else: log.ODM_WARNING("Found merged orthophoto in %s" % tree.odm_orthophoto_tif) self.update_progress(75) # Merge DEMs def merge_dems(dem_filename, human_name): if not io.dir_exists(tree.path('odm_dem')): system.mkdir_p(tree.path('odm_dem')) dem_file = tree.path("odm_dem", dem_filename) if not io.file_exists(dem_file) or self.rerun(): all_dems = get_submodel_paths(tree.submodels_path, "odm_dem", dem_filename) log.ODM_INFO("Merging %ss" % human_name) # Merge dem_vars = utils.get_dem_vars(args) eu_map_source = None # Default # Use DSM's euclidean map for DTMs # (requires the DSM to be computed) if human_name == "DTM": eu_map_source = "dsm" euclidean_merge_dems(all_dems, dem_file, dem_vars, euclidean_map_source=eu_map_source) if io.file_exists(dem_file): # Crop if args.crop > 0: Cropper.crop( merged_bounds_file, dem_file, dem_vars, keep_original=not args.optimize_disk_space) log.ODM_INFO("Created %s" % dem_file) else: log.ODM_WARNING("Cannot merge %s, %s was not created" % (human_name, dem_file)) else: log.ODM_WARNING("Found merged %s in %s" % (human_name, dem_filename)) if args.merge in ['all', 'dem'] and args.dsm: merge_dems("dsm.tif", "DSM") if args.merge in ['all', 'dem'] and args.dtm: merge_dems("dtm.tif", "DTM") # Stop the pipeline short! We're done. self.next_stage = None else: log.ODM_INFO("Normal dataset, nothing to merge.") self.progress = 0.0
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] class nonloc: runs = [] def add_run(nvm_file, primary=True, band=None): subdir = "" if not primary and band is not None: subdir = band if not args.skip_3dmodel and (primary or args.use_3dmesh): nonloc.runs += [{ 'out_dir': os.path.join(tree.odm_texturing, subdir), 'model': tree.odm_mesh, 'nadir': False, 'primary': primary, 'nvm_file': nvm_file, 'labeling_file': os.path.join(tree.odm_texturing, "odm_textured_model_geo_labeling.vec") if subdir else None }] if not args.use_3dmesh: nonloc.runs += [{ 'out_dir': os.path.join(tree.odm_25dtexturing, subdir), 'model': tree.odm_25dmesh, 'nadir': True, 'primary': primary, 'nvm_file': nvm_file, 'labeling_file': os.path.join(tree.odm_25dtexturing, "odm_textured_model_geo_labeling.vec") if subdir else None }] if reconstruction.multi_camera: for band in reconstruction.multi_camera: primary = band['name'] == get_primary_band_name( reconstruction.multi_camera, args.primary_band) nvm_file = os.path.join( tree.opensfm, "undistorted", "reconstruction_%s.nvm" % band['name'].lower()) add_run(nvm_file, primary, band['name'].lower()) # Sort to make sure primary band is processed first nonloc.runs.sort(key=lambda r: r['primary'], reverse=True) else: add_run(tree.opensfm_reconstruction_nvm) progress_per_run = 100.0 / len(nonloc.runs) progress = 0.0 for r in nonloc.runs: if not io.dir_exists(r['out_dir']): system.mkdir_p(r['out_dir']) odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj) if not io.file_exists(odm_textured_model_obj) or self.rerun(): log.ODM_INFO('Writing MVS Textured file in: %s' % odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" keepUnseenFaces = "" nadir = "" if (self.params.get('skip_glob_seam_leveling')): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.get('skip_loc_seam_leveling')): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.get('keep_unseen_faces')): keepUnseenFaces = "--keep_unseen_faces" if (r['nadir']): nadir = '--nadir_mode' # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': os.path.join(r['out_dir'], "odm_textured_model_geo"), 'model': r['model'], 'dataTerm': self.params.get('data_term'), 'outlierRemovalType': self.params.get('outlier_rem_type'), 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': self.params.get('tone_mapping'), 'nadirMode': nadir, 'nvm_file': r['nvm_file'], 'intermediate': '--no_intermediate_results' if (r['labeling_file'] or not reconstruction.multi_camera) else '', 'labelingFile': '-L "%s"' % r['labeling_file'] if r['labeling_file'] else '' } mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp') # Make sure tmp directory is empty if io.dir_exists(mvs_tmp_dir): log.ODM_INFO( "Removing old tmp directory {}".format(mvs_tmp_dir)) shutil.rmtree(mvs_tmp_dir) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{intermediate} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{keepUnseenFaces} ' '{nadirMode} ' '{labelingFile} '.format(**kwargs)) # Backward compatibility: copy odm_textured_model_geo.mtl to odm_textured_model.mtl # for certain older WebODM clients which expect a odm_textured_model.mtl # to be present for visualization # We should remove this at some point in the future geo_mtl = os.path.join(r['out_dir'], 'odm_textured_model_geo.mtl') if io.file_exists(geo_mtl): nongeo_mtl = os.path.join(r['out_dir'], 'odm_textured_model.mtl') shutil.copy(geo_mtl, nongeo_mtl) progress += progress_per_run self.update_progress(progress) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % odm_textured_model_obj) if args.optimize_disk_space: for r in nonloc.runs: if io.file_exists(r['model']): os.remove(r['model']) undistorted_images_path = os.path.join(tree.opensfm, "undistorted", "images") if io.dir_exists(undistorted_images_path): shutil.rmtree(undistorted_images_path)
def setup(self, args, images_path, photos, reconstruction, append_config=[], rerun=False): """ Setup a OpenSfM project """ if rerun and io.dir_exists(self.opensfm_project_path): shutil.rmtree(self.opensfm_project_path) if not io.dir_exists(self.opensfm_project_path): system.mkdir_p(self.opensfm_project_path) list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt') if not io.file_exists(list_path) or rerun: # create file list has_alt = True has_gps = False with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False if photo.latitude is not None and photo.longitude is not None: has_gps = True fout.write('%s\n' % io.join_paths(images_path, photo.filename)) # check for image_groups.txt (split-merge) image_groups_file = os.path.join(args.project_path, "image_groups.txt") if io.file_exists(image_groups_file): log.ODM_INFO("Copied image_groups.txt to OpenSfM directory") io.copy( image_groups_file, os.path.join(self.opensfm_project_path, "image_groups.txt")) # check for cameras if args.cameras: try: camera_overrides = camera.get_opensfm_camera_models( args.cameras) with open( os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f: f.write(json.dumps(camera_overrides)) log.ODM_INFO( "Wrote camera_models_overrides.json to OpenSfM directory" ) except Exception as e: log.ODM_WARNING( "Cannot set camera_models_overrides.json: %s" % str(e)) use_bow = False feature_type = "SIFT" matcher_neighbors = args.matcher_neighbors if matcher_neighbors != 0 and reconstruction.multi_camera is not None: matcher_neighbors *= len(reconstruction.multi_camera) log.ODM_INFO( "Increasing matcher neighbors to %s to accomodate multi-camera setup" % matcher_neighbors) log.ODM_INFO("Multi-camera setup, using BOW matching") use_bow = True # create config file for OpenSfM config = [ "use_exif_size: no", "feature_process_size: %s" % args.resize_to, "feature_min_frames: %s" % args.min_num_features, "processes: %s" % args.max_concurrency, "matching_gps_neighbors: %s" % matcher_neighbors, "matching_gps_distance: %s" % args.matcher_distance, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params or args.cameras else 'yes'), "undistorted_image_format: tif", "bundle_outlier_filtering_type: AUTO", "align_orientation_prior: vertical", "triangulation_type: ROBUST", "bundle_common_position_constraints: %s" % ('no' if reconstruction.multi_camera is None else 'yes'), ] if args.camera_lens != 'auto': config.append("camera_projection_type: %s" % args.camera_lens.upper()) if not has_gps: log.ODM_INFO("No GPS information, using BOW matching") use_bow = True feature_type = args.feature_type.upper() if use_bow: config.append("matcher_type: WORDS") # Cannot use SIFT with BOW if feature_type == "SIFT": log.ODM_WARNING( "Using BOW matching, will use HAHOG feature type, not SIFT" ) feature_type = "HAHOG" config.append("feature_type: %s" % feature_type) if has_alt: log.ODM_INFO( "Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") gcp_path = reconstruction.gcp.gcp_path if has_alt or gcp_path: config.append("align_method: auto") else: config.append("align_method: orientation_prior") if args.use_hybrid_bundle_adjustment: log.ODM_INFO("Enabling hybrid bundle adjustment") config.append( "bundle_interval: 100" ) # Bundle after adding 'bundle_interval' cameras config.append( "bundle_new_points_ratio: 1.2" ) # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append( "local_bundle_radius: 1" ) # Max image graph distance for images to be included in local bundle adjustment else: config.append("local_bundle_radius: 0") if gcp_path: config.append("bundle_use_gcp: yes") if not args.force_gps: config.append("bundle_use_gps: no") io.copy(gcp_path, self.path("gcp_list.txt")) config = config + append_config # write config file log.ODM_INFO(config) config_filename = self.get_config_file_path() with open(config_filename, 'w') as fout: fout.write("\n".join(config)) else: log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = self.inputs.tree args = self.inputs.args photos = self.inputs.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) system.mkdir_p(tree.pmvs) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) # check if reconstruction was done before if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') with open(list_path, 'w') as fout: for photo in photos: fout.write('%s\n' % photo.path_file) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors ] if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) # write config file config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction system.run('PYTHONPATH=%s %s/bin/run_all %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run('PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to pmvs before if not io.file_exists(tree.pmvs_visdat) or rerun_cell: # run PMVS converter system.run('PYTHONPATH=%s %s/bin/export_pmvs %s --output %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, tree.pmvs)) else: log.ODM_WARNING('Found a valid CMVS file in: %s' % tree.pmvs_visdat) if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
def process(self, inputs, outputs): # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [ f for f in io.get_files_list(in_dir) if supported_extension(f) ] log.ODM_INFO('Running ODM Load Dataset Cell') # get inputs tree = self.inputs.tree # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw resize_dir = tree.dataset_resize # Check first if a project already exists. This is a mediocre way to check, by checking the resize dir if io.dir_exists(resize_dir): log.ODM_DEBUG("resize dir: %s" % resize_dir) images_dir = resize_dir # if first time running, create project directory and copy images over to project/images else: if not io.dir_exists(images_dir): log.ODM_INFO( "Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [ copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir) ] log.ODM_DEBUG('Loading dataset from: %s' % images_dir) files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = Pool().map( partial(make_odm_photo, self.params.force_focal, self.params.force_ccd), path_files) log.ODM_INFO('Found %s usable images' % len(photos)) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) return ecto.QUIT # append photos to cell output outputs.photos = photos log.ODM_INFO('Running ODM Load Dataset Cell - Finished') return ecto.OK
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Orthophoto Cell') # get inputs args = self.inputs.args tree = self.inputs.tree reconstruction = inputs.reconstruction verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_orthophoto) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_orthophoto') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_orthophoto' in args.rerun_from) if not io.file_exists(tree.odm_orthophoto_file) or rerun_cell: # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'log': tree.odm_orthophoto_log, 'ortho': tree.odm_orthophoto_file, 'corners': tree.odm_orthophoto_corners, 'res': 1.0 / (gsd.cap_resolution(self.params.resolution, tree.opensfm_reconstruction, ignore_gsd=args.ignore_gsd) / 100.0), 'verbose': verbose } # Have geo coordinates? georef = reconstruction.georef # Check if the georef object is initialized # (during a --rerun this might not be) # TODO: we should move this to a more central # location (perhaps during the dataset initialization) if georef and not georef.utm_east_offset: odm_georeferencing_model_txt_geo_file = os.path.join( tree.odm_georeferencing, tree.odm_georeferencing_model_txt_geo) if io.file_exists(odm_georeferencing_model_txt_geo_file): georef.extract_offsets( odm_georeferencing_model_txt_geo_file) else: log.ODM_WARNING( 'Cannot read UTM offset from {}. An orthophoto will not be generated.' .format(odm_georeferencing_model_txt_geo_file)) if georef: if args.use_3dmesh: kwargs['model_geo'] = os.path.join( tree.odm_texturing, tree.odm_georeferencing_model_obj_geo) else: kwargs['model_geo'] = os.path.join( tree.odm_25dtexturing, tree.odm_georeferencing_model_obj_geo) else: if args.use_3dmesh: kwargs['model_geo'] = os.path.join( tree.odm_texturing, tree.odm_textured_model_obj) else: kwargs['model_geo'] = os.path.join( tree.odm_25dtexturing, tree.odm_textured_model_obj) # run odm_orthophoto system.run( '{bin}/odm_orthophoto -inputFile {model_geo} ' '-logFile {log} -outputFile {ortho} -resolution {res} {verbose} ' '-outputCornerFile {corners}'.format(**kwargs)) # Create georeferenced GeoTiff geotiffcreated = False if georef and georef.projection and georef.utm_east_offset and georef.utm_north_offset: ulx = uly = lrx = lry = 0.0 with open(tree.odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(georef.utm_east_offset) lry = float(tokens[1]) + \ float(georef.utm_north_offset) lrx = float(tokens[2]) + \ float(georef.utm_east_offset) uly = float(tokens[3]) + \ float(georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'tiled': '' if self.params.no_tiled else '-co TILED=yes ', 'compress': self.params.compress, 'predictor': '-co PREDICTOR=2 ' if self.params.compress in ['LZW', 'DEFLATE'] else '', 'proj': georef.projection.srs, 'bigtiff': self.params.bigtiff, 'png': tree.odm_orthophoto_file, 'tiff': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_tif_log, 'max_memory': get_max_memory(), 'threads': self.params.max_concurrency } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '{tiled} ' '-co BIGTIFF={bigtiff} ' '-co COMPRESS={compress} ' '{predictor} ' '-co BLOCKXSIZE=512 ' '-co BLOCKYSIZE=512 ' '-co NUM_THREADS={threads} ' '-a_srs \"{proj}\" ' '--config GDAL_CACHEMAX {max_memory}% ' '{png} {tiff} > {log}'.format(**kwargs)) if args.crop > 0: shapefile_path = os.path.join( tree.odm_georeferencing, 'odm_georeferenced_model.bounds.shp') Cropper.crop( shapefile_path, tree.odm_orthophoto_tif, { 'TILED': 'NO' if self.params.no_tiled else 'YES', 'COMPRESS': self.params.compress, 'PREDICTOR': '2' if self.params.compress in ['LZW', 'DEFLATE'] else '1', 'BIGTIFF': self.params.bigtiff, 'BLOCKXSIZE': 512, 'BLOCKYSIZE': 512, 'NUM_THREADS': self.params.max_concurrency }) if self.params.build_overviews: log.ODM_DEBUG("Building Overviews") kwargs = { 'orthophoto': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_gdaladdo_log } # Run gdaladdo system.run( 'gdaladdo -ro -r average ' '--config BIGTIFF_OVERVIEW IF_SAFER ' '--config COMPRESS_OVERVIEW JPEG ' '{orthophoto} 2 4 8 16 > {log}'.format(**kwargs)) geotiffcreated = True if not geotiffcreated: log.ODM_WARNING( 'No geo-referenced orthophoto created due ' 'to missing geo-referencing or corner coordinates.') else: log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_file) if args.time: system.benchmark(start_time, tree.benchmarking, 'Orthophoto') log.ODM_INFO('Running ODM OrthoPhoto Cell - Finished') return ecto.OK if args.end_with != 'odm_orthophoto' else ecto.QUIT
def process(self, inputs, outputs): # find a file in the root directory def find(file, dir): for root, dirs, files in os.walk(dir): return '/'.join((root, file)) if file in files else None # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Georeferencing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree gcpfile = io.join_paths(tree.root_path, self.params.gcp_file) \ if self.params.gcp_file else find('gcp_list.txt', tree.root_path) geocreated = True verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) # in case a gcp file it's not provided, let's try to generate it using # images metadata. Internally calls jhead. log.ODM_DEBUG(self.params.gcp_file) if not self.params.gcp_file: # and \ # not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('No coordinates file. ' 'Generating coordinates file: %s' % tree.odm_georeferencing_coords) # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log, 'verbose': verbose } # run UTM extraction binary extract_utm = system.run_and_return('{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} {verbose} ' '-logFile {log}'.format(**kwargs)) if extract_utm != '': log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file. Error: %s' % extract_utm) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_georeferencing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_georeferencing' in args.rerun_from) if not io.file_exists(tree.odm_georeferencing_model_obj_geo) or \ not io.file_exists(tree.odm_georeferencing_model_ply_geo) or rerun_cell: # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'bundle': tree.opensfm_bundle, 'imgs': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'model': tree.odm_textured_model_obj, 'log': tree.odm_georeferencing_log, 'coords': tree.odm_georeferencing_coords, 'pc_geo': tree.odm_georeferencing_model_ply_geo, 'geo_sys': tree.odm_georeferencing_model_txt_geo, 'model_geo': tree.odm_georeferencing_model_obj_geo, 'size': self.params.img_size, 'gcp': gcpfile, 'verbose': verbose } if args.use_opensfm_pointcloud: kwargs['pc'] = tree.opensfm_model else: kwargs['pc'] = tree.pmvs_model # Check to see if the GCP file exists if not self.params.use_exif and (self.params.gcp_file or find('gcp_list.txt', tree.root_path)): log.ODM_INFO('Found %s' % gcpfile) try: system.run('{bin}/odm_georef -bundleFile {bundle} -imagesPath {imgs} -imagesListPath {imgs_list} ' '-bundleResizedTo {size} -inputFile {model} -outputFile {model_geo} ' '-inputPointCloudFile {pc} -outputPointCloudFile {pc_geo} {verbose} ' '-logFile {log} -georefFileOutputPath {geo_sys} -gcpFile {gcp} ' '-outputCoordFile {coords}'.format(**kwargs)) except Exception: log.ODM_EXCEPTION('Georeferencing failed. ') return ecto.QUIT elif io.file_exists(tree.odm_georeferencing_coords): log.ODM_INFO('Running georeferencing with generated coords file.') system.run('{bin}/odm_georef -bundleFile {bundle} -inputCoordFile {coords} ' '-inputFile {model} -outputFile {model_geo} ' '-inputPointCloudFile {pc} -outputPointCloudFile {pc_geo} {verbose} ' '-logFile {log} -georefFileOutputPath {geo_sys}'.format(**kwargs)) else: log.ODM_WARNING('Georeferencing failed. Make sure your ' 'photos have geotags in the EXIF or you have ' 'provided a GCP file. ') geocreated = False # skip the rest of the georeferencing if geocreated: # update images metadata geo_ref = types.ODM_GeoRef() geo_ref.parse_coordinate_system(tree.odm_georeferencing_coords) for idx, photo in enumerate(self.inputs.photos): geo_ref.utm_to_latlon(tree.odm_georeferencing_latlon, photo, idx) # convert ply model to LAS reference system geo_ref.convert_to_las(tree.odm_georeferencing_model_ply_geo, tree.odm_georeferencing_pdal) # XYZ point cloud output log.ODM_INFO("Creating geo-referenced CSV file (XYZ format, can be used with GRASS to create DEM)") with open(tree.odm_georeferencing_xyz_file, "wb") as csvfile: csvfile_writer = csv.writer(csvfile, delimiter=",") reachedpoints = False with open(tree.odm_georeferencing_model_ply_geo) as f: for lineNumber, line in enumerate(f): if reachedpoints: tokens = line.split(" ") csv_line = [float(tokens[0])+geo_ref.utm_east_offset, float(tokens[1])+geo_ref.utm_north_offset, tokens[2]] csvfile_writer.writerow(csv_line) if line.startswith("end_header"): reachedpoints = True csvfile.close() else: log.ODM_WARNING('Found a valid georeferenced model in: %s' % tree.odm_georeferencing_model_ply_geo) if args.time: system.benchmark(start_time, tree.benchmarking, 'Georeferencing') log.ODM_INFO('Running ODM Georeferencing Cell - Finished') return ecto.OK if args.end_with != 'odm_georeferencing' else ecto.QUIT
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] if not os.path.exists(tree.odm_report): system.mkdir_p(tree.odm_report) log.ODM_INFO("Exporting shots.geojson") shots_geojson = os.path.join(tree.odm_report, "shots.geojson") if not io.file_exists(shots_geojson) or self.rerun(): # Extract geographical camera shots if reconstruction.is_georeferenced(): shots = get_geojson_shots_from_opensfm( tree.opensfm_reconstruction, tree.opensfm_transformation, reconstruction.get_proj_srs()) else: # Pseudo geo shots = get_geojson_shots_from_opensfm( tree.opensfm_reconstruction, pseudo_geotiff=tree.odm_orthophoto_tif) if shots: with open(shots_geojson, "w") as fout: fout.write(json.dumps(shots)) log.ODM_INFO("Wrote %s" % shots_geojson) else: log.ODM_WARNING("Cannot extract shots") else: log.ODM_WARNING('Found a valid shots file in: %s' % shots_geojson) if args.skip_report: # Stop right here log.ODM_WARNING("Skipping report generation as requested") return # Augment OpenSfM stats file with our own stats odm_stats_json = os.path.join(tree.odm_report, "stats.json") octx = OSFMContext(tree.opensfm) osfm_stats_json = octx.path("stats", "stats.json") odm_stats = None point_cloud_file = None views_dimension = None if not os.path.exists(odm_stats_json) or self.rerun(): if os.path.exists(osfm_stats_json): with open(osfm_stats_json, 'r') as f: odm_stats = json.loads(f.read()) # Add point cloud stats if os.path.exists(tree.odm_georeferencing_model_laz): point_cloud_file = tree.odm_georeferencing_model_laz views_dimension = "UserData" # pc_info_file should have been generated by cropper pc_info_file = os.path.join( tree.odm_georeferencing, "odm_georeferenced_model.info.json") odm_stats[ 'point_cloud_statistics'] = generate_point_cloud_stats( tree.odm_georeferencing_model_laz, pc_info_file) else: ply_pc = os.path.join(tree.odm_filterpoints, "point_cloud.ply") if os.path.exists(ply_pc): point_cloud_file = ply_pc views_dimension = "views" pc_info_file = os.path.join(tree.odm_filterpoints, "point_cloud.info.json") odm_stats[ 'point_cloud_statistics'] = generate_point_cloud_stats( ply_pc, pc_info_file) else: log.ODM_WARNING("No point cloud found") odm_stats['point_cloud_statistics'][ 'dense'] = not args.fast_orthophoto # Add runtime stats total_time = (system.now_raw() - outputs['start_time']).total_seconds() odm_stats['odm_processing_statistics'] = { 'total_time': total_time, 'total_time_human': hms(total_time), 'average_gsd': gsd.opensfm_reconstruction_average_gsd( octx.recon_file(), use_all_shots=reconstruction.has_gcp()), } with open(odm_stats_json, 'w') as f: f.write(json.dumps(odm_stats)) else: log.ODM_WARNING( "Cannot generate report, OpenSfM stats are missing") else: log.ODM_WARNING("Reading existing stats %s" % odm_stats_json) with open(odm_stats_json, 'r') as f: odm_stats = json.loads(f.read()) # Generate overlap diagram if odm_stats.get('point_cloud_statistics' ) and point_cloud_file and views_dimension: bounds = odm_stats['point_cloud_statistics'].get('stats', {}).get( 'bbox', {}).get('native', {}).get('bbox') if bounds: image_target_size = 1400 # pixels osfm_stats_dir = os.path.join(tree.opensfm, "stats") diagram_tiff = os.path.join(osfm_stats_dir, "overlap.tif") diagram_png = os.path.join(osfm_stats_dir, "overlap.png") width = bounds.get('maxx') - bounds.get('minx') height = bounds.get('maxy') - bounds.get('miny') max_dim = max(width, height) resolution = float(max_dim) / float(image_target_size) radius = resolution * math.sqrt(2) # Larger radius for sparse point cloud diagram if not odm_stats['point_cloud_statistics']['dense']: radius *= 10 system.run("pdal translate -i \"{}\" " "-o \"{}\" " "--writer gdal " "--writers.gdal.resolution={} " "--writers.gdal.data_type=uint8_t " "--writers.gdal.dimension={} " "--writers.gdal.output_type=max " "--writers.gdal.radius={} ".format( point_cloud_file, diagram_tiff, resolution, views_dimension, radius)) report_assets = os.path.abspath( os.path.join(os.path.dirname(__file__), "../opendm/report")) overlap_color_map = os.path.join(report_assets, "overlap_color_map.txt") bounds_file_path = os.path.join( tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') if args.crop > 0 and os.path.isfile(bounds_file_path): Cropper.crop(bounds_file_path, diagram_tiff, get_orthophoto_vars(args), keep_original=False) system.run( "gdaldem color-relief \"{}\" \"{}\" \"{}\" -of PNG -alpha". format(diagram_tiff, overlap_color_map, diagram_png)) # Copy assets for asset in [ "overlap_diagram_legend.png", "dsm_gradient.png" ]: shutil.copy(os.path.join(report_assets, asset), os.path.join(osfm_stats_dir, asset)) # Generate previews of ortho/dsm if os.path.isfile(tree.odm_orthophoto_tif): osfm_ortho = os.path.join(osfm_stats_dir, "ortho.png") generate_png(tree.odm_orthophoto_tif, osfm_ortho, image_target_size) dems = [] if args.dsm: dems.append("dsm") if args.dtm: dems.append("dtm") for dem in dems: dem_file = tree.path("odm_dem", "%s.tif" % dem) if os.path.isfile(dem_file): # Resize first (faster) resized_dem_file = io.related_file_path( dem_file, postfix=".preview") system.run( "gdal_translate -outsize {} 0 \"{}\" \"{}\" --config GDAL_CACHEMAX {}%" .format(image_target_size, dem_file, resized_dem_file, get_max_memory())) log.ODM_INFO("Computing raster stats for %s" % resized_dem_file) dem_stats = get_raster_stats(resized_dem_file) if len(dem_stats) > 0: odm_stats[dem + '_statistics'] = dem_stats[0] osfm_dem = os.path.join(osfm_stats_dir, "%s.png" % dem) colored_dem, hillshade_dem, colored_hillshade_dem = generate_colored_hillshade( resized_dem_file) system.run( "gdal_translate -outsize {} 0 -of png \"{}\" \"{}\" --config GDAL_CACHEMAX {}%" .format(image_target_size, colored_hillshade_dem, osfm_dem, get_max_memory())) for f in [ resized_dem_file, colored_dem, hillshade_dem, colored_hillshade_dem ]: if os.path.isfile(f): os.remove(f) else: log.ODM_WARNING( "Cannot generate overlap diagram, cannot compute point cloud bounds" ) else: log.ODM_WARNING( "Cannot generate overlap diagram, point cloud stats missing") octx.export_report(os.path.join(tree.odm_report, "report.pdf"), odm_stats, self.rerun())
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Orthophoto Cell') # get inputs args = self.inputs.args tree = self.inputs.tree verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_orthophoto) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_orthophoto') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_orthophoto' in args.rerun_from) if not io.file_exists(tree.odm_orthophoto_file) or rerun_cell: # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'log': tree.odm_orthophoto_log, 'ortho': tree.odm_orthophoto_file, 'corners': tree.odm_orthophoto_corners, 'res': self.params.resolution, 'verbose': verbose } kwargs['model_geo'] = tree.odm_georeferencing_model_obj_geo \ if io.file_exists(tree.odm_georeferencing_coords) \ else tree.odm_textured_model_obj # run odm_orthophoto system.run('{bin}/odm_orthophoto -inputFile {model_geo} ' '-logFile {log} -outputFile {ortho} -resolution {res} {verbose} ' '-outputCornerFile {corners}'.format(**kwargs)) if not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('No coordinates file. A georeferenced raster ' 'will not be created') else: # Create georeferenced GeoTiff geotiffcreated = False georef = types.ODM_GeoRef() # creates the coord refs # TODO I don't want to have to do this twice- after odm_georef georef.parse_coordinate_system(tree.odm_georeferencing_coords) if georef.epsg and georef.utm_east_offset and georef.utm_north_offset: ulx = uly = lrx = lry = 0.0 with open(tree.odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(georef.utm_east_offset) lry = float(tokens[1]) + \ float(georef.utm_north_offset) lrx = float(tokens[2]) + \ float(georef.utm_east_offset) uly = float(tokens[3]) + \ float(georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'tiled': '' if self.params.no_tiled else '-co TILED=yes ', 'compress': self.params.compress, 'predictor': '-co PREDICTOR=2 ' if self.params.compress in ['LZW', 'DEFLATE'] else '', 'epsg': georef.epsg, 't_srs': self.params.t_srs or "EPSG:{0}".format(georef.epsg), 'bigtiff': self.params.bigtiff, 'png': tree.odm_orthophoto_file, 'tiff': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_tif_log } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '{tiled} ' '-co BIGTIFF={bigtiff} ' '-co COMPRESS={compress} ' '{predictor} ' '-co BLOCKXSIZE=512 ' '-co BLOCKYSIZE=512 ' '-co NUM_THREADS=ALL_CPUS ' '-a_srs \"EPSG:{epsg}\" ' '{png} {tiff} > {log}'.format(**kwargs)) if self.params.build_overviews: log.ODM_DEBUG("Building Overviews") kwargs = { 'orthophoto': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_gdaladdo_log } # Run gdaladdo system.run('gdaladdo -ro -r average ' '--config BIGTIFF_OVERVIEW IF_SAFER ' '--config COMPRESS_OVERVIEW JPEG ' '{orthophoto} 2 4 8 16 > {log}'.format(**kwargs)) geotiffcreated = True if not geotiffcreated: log.ODM_WARNING('No geo-referenced orthophoto created due ' 'to missing geo-referencing or corner coordinates.') else: log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_file) if args.time: system.benchmark(start_time, tree.benchmarking, 'Orthophoto') log.ODM_INFO('Running ODM OrthoPhoto Cell - Finished') return ecto.OK if args.end_with != 'odm_orthophoto' else ecto.QUIT
def process(args, tree, reconstruction, current_path): odm_orthophoto = io.join_paths(current_path, 'orthophoto') odm_orthophoto_path = odm_orthophoto odm_orthophoto_render = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_render.tif') odm_orthophoto_tif = io.join_paths(odm_orthophoto_path, 'odm_orthophoto.tif') odm_orthophoto_corners = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_corners.tif') odm_orthophoto_log = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_log.tif') odm_orthophoto_tif_log = io.join_paths(odm_orthophoto_path, 'gdal_translate_log.txt') odm_25dgeoreferencing = io.join_paths(current_path, 'odm_georeferencing') odm_georeferencing = io.join_paths(current_path, 'odm_georeferencing') odm_georeferencing_coords = io.join_paths( odm_georeferencing, 'coords.txt') odm_georeferencing_gcp = io.find('gcp_list.txt', current_path) odm_georeferencing_gcp_utm = io.join_paths(odm_georeferencing, 'gcp_list_utm.txt') odm_georeferencing_utm_log = io.join_paths( odm_georeferencing, 'odm_georeferencing_utm_log.txt') odm_georeferencing_log = 'odm_georeferencing_log.txt' odm_georeferencing_transform_file = 'odm_georeferencing_transform.txt' odm_georeferencing_proj = 'proj.txt' odm_georeferencing_model_txt_geo = 'odm_georeferencing_model_geo.txt' odm_georeferencing_model_obj_geo = 'odm_textured_model_geo.obj' odm_georeferencing_xyz_file = io.join_paths( odm_georeferencing, 'odm_georeferenced_model.csv') odm_georeferencing_las_json = io.join_paths( odm_georeferencing, 'las.json') odm_georeferencing_model_laz = io.join_paths( odm_georeferencing, 'odm_georeferenced_model.laz') odm_georeferencing_model_las = io.join_paths( odm_georeferencing, 'odm_georeferenced_model.las') odm_georeferencing_dem = io.join_paths( odm_georeferencing, 'odm_georeferencing_model_dem.tif') opensfm_reconstruction = io.join_paths(current_path,'reconstruction.json') odm_texturing = io.join_paths(current_path,'mvs') odm_textured_model_obj = io.join_paths(odm_texturing, 'odm_textured_model.obj') images_dir = io.join_paths(current_path, 'images') tree = tree opensfm_bundle = os.path.join(current_path, 'opensfm', 'bundle_r000.out') opensfm_bundle_list = os.path.join(current_path, 'opensfm', 'list_r000.out') opensfm_transformation = os.path.join(current_path, 'opensfm', 'geocoords_transformation.txt') filtered_point_cloud = os.path.join(current_path, 'filterpoints', 'point_cloud.ply') doPointCloudGeo = True transformPointCloud = True verbose ='' class nonloc: runs = [] def add_run(primary=True, band=None): subdir = "" if not primary and band is not None: subdir = band # Make sure 2.5D mesh is georeferenced before the 3D mesh # Because it will be used to calculate a transform # for the point cloud. If we use the 3D model transform, # DEMs and orthophoto might not align! b = True if b: nonloc.runs += [{ 'georeferencing_dir': os.path.join(odm_georeferencing, subdir), 'texturing_dir': os.path.join(odm_texturing, subdir), }] if not args.skip_3dmodel and (primary or args.use_3dmesh): nonloc.runs += [{ 'georeferencing_dir': odm_georeferencing, 'texturing_dir': os.path.join(odm_texturing, subdir), }] if reconstruction.multi_camera: for band in reconstruction.multi_camera: primary = band == reconstruction.multi_camera[0] add_run(primary, band['name'].lower()) else: add_run() #progress_per_run = 100.0 / len(nonloc.runs) #progress = 0.0 for r in nonloc.runs: if not io.dir_exists(r['georeferencing_dir']): system.mkdir_p(r['georeferencing_dir']) odm_georeferencing_model_obj_geo = os.path.join(r['texturing_dir'], odm_georeferencing_model_obj_geo) odm_georeferencing_model_obj = os.path.join(r['texturing_dir'], odm_textured_model_obj) odm_georeferencing_log = os.path.join(r['georeferencing_dir'], odm_georeferencing_log) odm_georeferencing_transform_file = os.path.join(r['georeferencing_dir'], odm_georeferencing_transform_file) odm_georeferencing_model_txt_geo_file = os.path.join(r['georeferencing_dir'], odm_georeferencing_model_txt_geo) pio = True if pio: #if not io.file_exists(odm_georeferencing_model_obj_geo) or \ #not io.file_exists(odm_georeferencing_model_laz) # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'input_pc_file': filtered_point_cloud, 'bundle': opensfm_bundle, 'imgs': images_dir, 'imgs_list': opensfm_bundle_list, 'model': odm_georeferencing_model_obj, 'log': odm_georeferencing_log, 'input_trans_file': opensfm_transformation, 'transform_file': odm_georeferencing_transform_file, 'coords': odm_georeferencing_coords, 'output_pc_file': odm_georeferencing_model_laz, 'geo_sys': odm_georeferencing_model_txt_geo_file, 'model_geo': odm_georeferencing_model_obj_geo, 'verbose': verbose } if transformPointCloud: kwargs['pc_params'] = '-inputPointCloudFile {input_pc_file} -outputPointCloudFile {output_pc_file}'.format(**kwargs) if reconstruction.is_georeferenced(): kwargs['pc_params'] += ' -outputPointCloudSrs %s' % pipes.quote(reconstruction.georef.proj4()) else: log.ODM_WARNING('NO SRS: The output point cloud will not have a SRS.') else: kwargs['pc_params'] = '' if io.file_exists(opensfm_transformation) and io.file_exists(odm_georeferencing_coords): log.ODM_INFO('Running georeferencing with OpenSfM transformation matrix') system.run('{bin}/odm_georef -bundleFile {bundle} -inputTransformFile {input_trans_file} -inputCoordFile {coords} ' '-inputFile {model} -outputFile {model_geo} ' '{pc_params} {verbose} ' '-logFile {log} -outputTransformFile {transform_file} -georefFileOutputPath {geo_sys}'.format(**kwargs)) elif io.file_exists(odm_georeferencing_coords): print('running georeferencing') log.ODM_INFO('Running georeferencing with generated coords file.') system.run('{bin}/odm_georef -bundleFile {bundle} -inputCoordFile {coords} ' '-inputFile {model} -outputFile {model_geo} ' '{pc_params} {verbose} ' '-logFile {log} -outputTransformFile {transform_file} -georefFileOutputPath {geo_sys}'.format(**kwargs)) else: log.ODM_WARNING('Georeferencing failed. Make sure your ' 'photos have geotags in the EXIF or you have ' 'provided a GCP file. ') doPointCloudGeo = False # skip the rest of the georeferencing if doPointCloudGeo: reconstruction.georef.extract_offsets(odm_georeferencing_model_txt_geo_file) point_cloud.post_point_cloud_steps(args, tree) if args.crop > 0: log.ODM_INFO("Calculating cropping area and generating bounds shapefile from point cloud") cropper = Cropper(odm_georeferencing, 'odm_georeferenced_model') decimation_step = 40 if args.fast_orthophoto or args.use_opensfm_dense else 90 # More aggressive decimation for large datasets if not args.fast_orthophoto: decimation_step *= int(len(reconstruction.photos) / 1000) + 1 cropper.create_bounds_gpkg(odm_georeferencing_model_laz, args.crop, decimation_step=decimation_step) # Do not execute a second time, since # We might be doing georeferencing for # multiple models (3D, 2.5D, ...) doPointCloudGeo = False transformPointCloud = False else: log.ODM_WARNING('Found a valid georeferenced model in: %s' % odm_georeferencing_model_laz) if args.optimize_disk_space and io.file_exists(odm_georeferencing_model_laz) and io.file_exists(filtered_point_cloud): os.remove(filtered_point_cloud)
def process(self, args, outputs): # get inputs tree = outputs['tree'] reconstruction = outputs['reconstruction'] photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start MVE') exit(1) # check if reconstruction was done before if not io.file_exists(tree.mve_model) or self.rerun(): # cleanup if a rerun if io.dir_exists(tree.mve_path) and self.rerun(): shutil.rmtree(tree.mve_path) # make bundle directory if not io.file_exists(tree.mve_bundle): system.mkdir_p(tree.mve_path) system.mkdir_p(io.join_paths(tree.mve_path, 'bundle')) octx = OSFMContext(tree.opensfm) octx.save_absolute_image_list_to(tree.mve_image_list) io.copy(tree.opensfm_bundle, tree.mve_bundle) # mve makescene wants the output directory # to not exists before executing it (otherwise it # will prompt the user for confirmation) if io.dir_exists(tree.mve): shutil.rmtree(tree.mve) # run mve makescene if not io.dir_exists(tree.mve_views): system.run('%s "%s" "%s"' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) self.update_progress(10) # Compute mve output scale based on depthmap_resolution max_width = 0 max_height = 0 for photo in photos: max_width = max(photo.width, max_width) max_height = max(photo.height, max_height) max_pixels = args.depthmap_resolution * args.depthmap_resolution if max_width * max_height <= max_pixels: mve_output_scale = 0 else: ratio = float(max_width * max_height) / float(max_pixels) mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0))) dmrecon_config = [ "-s%s" % mve_output_scale, "--progress=silent", "--local-neighbors=2", # "--filter-width=3", ] # Run MVE's dmrecon log.ODM_INFO(' ') log.ODM_INFO(' ,*/** ') log.ODM_INFO(' ,*@%*/@%* ') log.ODM_INFO(' ,/@%******@&*. ') log.ODM_INFO(' ,*@&*********/@&* ') log.ODM_INFO(' ,*@&**************@&* ') log.ODM_INFO(' ,/@&******************@&*. ') log.ODM_INFO(' ,*@&*********************/@&* ') log.ODM_INFO(' ,*@&**************************@&*. ') log.ODM_INFO(' ,/@&******************************&&*, ') log.ODM_INFO(' ,*&&**********************************@&*. ') log.ODM_INFO(' ,*@&**************************************@&*. ') log.ODM_INFO(' ,*@&***************#@@@@@@@@@%****************&&*, ') log.ODM_INFO(' .*&&***************&@@@@@@@@@@@@@@****************@@*. ') log.ODM_INFO(' .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*. ') log.ODM_INFO(' .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*, ') log.ODM_INFO(' .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*. ') log.ODM_INFO(' .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*. ') log.ODM_INFO(' .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*, ') log.ODM_INFO(' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(, ') log.ODM_INFO(' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&* ') log.ODM_INFO(' *#@/*******************************@@@@@***&@&**********************&@*, ') log.ODM_INFO(' *#@#******************************&@@@***@#*********************&@*, ') log.ODM_INFO(' */@#*****************************@@@************************@@*. ') log.ODM_INFO(' *#@/***************************/@@/*********************%@*, ') log.ODM_INFO(' *#@#**************************#@@%******************%@*, ') log.ODM_INFO(' */@#*************************(@@@@@@@&%/********&@*. ') log.ODM_INFO(' *(@(*********************************/%@@%**%@*, ') log.ODM_INFO(' *(@%************************************%@** ') log.ODM_INFO(' **@%********************************&@*, ') log.ODM_INFO(' *(@(****************************%@/* ') log.ODM_INFO(' ,(@%************************#@/* ') log.ODM_INFO(' ,*@%********************&@/, ') log.ODM_INFO(' */@#****************#@/* ') log.ODM_INFO(' ,/@&************#@/* ') log.ODM_INFO(' ,*@&********%@/, ') log.ODM_INFO(' */@#****(@/* ') log.ODM_INFO(' ,/@@@@(* ') log.ODM_INFO(' .**, ') log.ODM_INFO('') log.ODM_INFO("Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung.") log.ODM_INFO(" Process is running") # TODO: find out why MVE is crashing at random # MVE *seems* to have a race condition, triggered randomly, regardless of dataset # https://gist.github.com/pierotofy/6c9ce93194ba510b61e42e3698cfbb89 # Temporary workaround is to retry the reconstruction until we get it right # (up to a certain number of retries). retry_count = 1 while retry_count < 10: try: system.run('%s "%s" "%s"' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) break except Exception as e: if str(e) == "Child returned 134" or str(e) == "Child returned 1": retry_count += 1 log.ODM_WARNING("Caught error code, retrying attempt #%s" % retry_count) else: raise e self.update_progress(90) scene2pset_config = [ "-F%s" % mve_output_scale ] # run scene2pset system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) # run cleanmesh (filter points by MVE confidence threshold) if args.mve_confidence > 0: mve_filtered_model = io.related_file_path(tree.mve_model, postfix=".filtered") system.run('%s -t%s --no-clean --component-size=0 "%s" "%s"' % (context.meshclean_path, min(1.0, args.mve_confidence), tree.mve_model, mve_filtered_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) if io.file_exists(mve_filtered_model): os.remove(tree.mve_model) os.rename(mve_filtered_model, tree.mve_model) else: log.ODM_WARNING("Couldn't filter MVE model (%s does not exist)." % mve_filtered_model) else: log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' % tree.mve_model)
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running OMD OrthoPhoto Cell') # get inputs args = self.inputs.args tree = self.inputs.tree # define paths and create working directories system.mkdir_p(tree.odm_orthophoto) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_orthophoto') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_orthophoto' in args.rerun_from) if not io.file_exists(tree.odm_orthophoto_file) or rerun_cell: # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'model_geo': tree.odm_georeferencing_model_obj_geo, 'log': tree.odm_orthophoto_log, 'ortho': tree.odm_orthophoto_file, 'corners': tree.odm_orthophoto_corners, 'res': self.params.resolution } # run odm_orthophoto system.run('{bin}/odm_orthophoto -inputFile {model_geo} ' '-logFile {log} -outputFile {ortho} -resolution {res} ' '-outputCornerFile {corners}'.format(**kwargs)) # Create georeferenced GeoTiff geotiffcreated = False georef = types.ODM_GeoRef() # creates the coord refs # TODO I don't want to have to do this twice- after odm_georef georef.parse_coordinate_system(tree.odm_georeferencing_coords) if georef.epsg and georef.utm_east_offset and georef.utm_north_offset: ulx = uly = lrx = lry = 0.0 with open(tree.odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(georef.utm_east_offset) lry = float(tokens[1]) + \ float(georef.utm_north_offset) lrx = float(tokens[2]) + \ float(georef.utm_east_offset) uly = float(tokens[3]) + \ float(georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'epsg': georef.epsg, 'png': tree.odm_orthophoto_file, 'tiff': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_tif_log } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '-co TILED=yes ' '-co COMPRESS=DEFLATE ' '-co PREDICTOR=2 ' '-co BLOCKXSIZE=512 ' '-co BLOCKYSIZE=512 ' '-co NUM_THREADS=ALL_CPUS ' '-a_srs \"EPSG:{epsg}\" {png} {tiff} > {log}'.format(**kwargs)) geotiffcreated = True if not geotiffcreated: log.ODM_WARNING('No geo-referenced orthophoto created due ' 'to missing geo-referencing or corner coordinates.') else: log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_file) if args.time: system.benchmark(start_time, tree.benchmarking, 'Orthophoto') log.ODM_INFO('Running ODM OrthoPhoto Cell - Finished') return ecto.OK if args.end_with != 'odm_orthophoto' else ecto.QUIT
def process(self, args, outputs): # Load tree tree = types.ODM_Tree(args.project_path, args.images, args.gcp) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write( 'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [ f for f in io.get_files_list(in_dir) if supported_extension(f) ] # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_INFO( "Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [ copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir) ] # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) if not args.use_3dmesh: system.mkdir_p(tree.odm_25dgeoreferencing) log.ODM_DEBUG('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = io.join_paths(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = [] with open(tree.dataset_list, 'w') as dataset_list: for f in path_files: photos += [types.ODM_Photo(f)] dataset_list.write(photos[-1].filename + '\n') # Save image database for faster restart save_images_database(photos, images_database_file) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) exit(1) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) # Create reconstruction object reconstruction = types.ODM_Reconstruction(photos) if tree.odm_georeferencing_gcp: reconstruction.georeference_with_gcp( tree.odm_georeferencing_gcp, tree.odm_georeferencing_coords, tree.odm_georeferencing_gcp_utm, rerun=self.rerun()) else: reconstruction.georeference_with_gps( tree.dataset_raw, tree.odm_georeferencing_coords, rerun=self.rerun()) reconstruction.save_proj_srs( io.join_paths(tree.odm_georeferencing, tree.odm_georeferencing_proj)) outputs['reconstruction'] = reconstruction
def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrency=1, tmpdir=None, scale=1): if io.file_exists(orthophoto_file) and io.file_exists(crop_area_file): from opendm.grass_engine import grass log.ODM_INFO("Computing cutline") if tmpdir and not io.dir_exists(tmpdir): system.mkdir_p(tmpdir) scale = max(0.0001, min(1, scale)) scaled_orthophoto = None if scale < 1: log.ODM_INFO("Scaling orthophoto to %s%% to compute cutline" % (scale * 100)) scaled_orthophoto = os.path.join( tmpdir, os.path.basename( io.related_file_path(orthophoto_file, postfix=".scaled"))) # Scale orthophoto before computing cutline system.run("gdal_translate -outsize {}% 0 " "-co NUM_THREADS={} " "--config GDAL_CACHEMAX {}% " "{} {}".format(scale * 100, max_concurrency, concurrency.get_max_memory(), orthophoto_file, scaled_orthophoto)) orthophoto_file = scaled_orthophoto try: ortho_width, ortho_height = get_image_size.get_image_size( orthophoto_file, fallback_on_error=False) log.ODM_INFO("Orthophoto dimensions are %sx%s" % (ortho_width, ortho_height)) number_lines = int( max(8, math.ceil(min(ortho_width, ortho_height) / 256.0))) except: log.ODM_INFO( "Cannot compute orthophoto dimensions, setting arbitrary number of lines." ) number_lines = 32 log.ODM_INFO("Number of lines: %s" % number_lines) gctx = grass.create_context({'auto_cleanup': False, 'tmpdir': tmpdir}) gctx.add_param('orthophoto_file', orthophoto_file) gctx.add_param('crop_area_file', crop_area_file) gctx.add_param('number_lines', number_lines) gctx.add_param('max_concurrency', max_concurrency) gctx.add_param('memory', int(concurrency.get_max_memory_mb(300))) gctx.set_location(orthophoto_file) cutline_file = gctx.execute( os.path.join("opendm", "grass", "compute_cutline.grass")) if cutline_file != 'error': if io.file_exists(cutline_file): shutil.move(cutline_file, destination) log.ODM_INFO("Generated cutline file: %s --> %s" % (cutline_file, destination)) gctx.cleanup() return destination else: log.ODM_WARNING( "Unexpected script result: %s. No cutline file has been generated." % cutline_file) else: log.ODM_WARNING( "Could not generate orthophoto cutline. An error occured when running GRASS. No orthophoto will be generated." ) else: log.ODM_WARNING( "We've been asked to compute cutline, but either %s or %s is missing. Skipping..." % (orthophoto_file, crop_area_file))
def setup(self, args, images_path, reconstruction, append_config=[], rerun=False): """ Setup a OpenSfM project """ if rerun and io.dir_exists(self.opensfm_project_path): shutil.rmtree(self.opensfm_project_path) if not io.dir_exists(self.opensfm_project_path): system.mkdir_p(self.opensfm_project_path) list_path = os.path.join(self.opensfm_project_path, 'image_list.txt') if not io.file_exists(list_path) or rerun: if reconstruction.multi_camera: photos = get_photos_by_band(reconstruction.multi_camera, args.primary_band) if len(photos) < 1: raise Exception("Not enough images in selected band %s" % args.primary_band.lower()) log.ODM_INFO("Reconstruction will use %s images from %s band" % (len(photos), args.primary_band.lower())) else: photos = reconstruction.photos # create file list has_alt = True has_gps = False with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False if photo.latitude is not None and photo.longitude is not None: has_gps = True fout.write('%s\n' % os.path.join(images_path, photo.filename)) # check for image_groups.txt (split-merge) image_groups_file = os.path.join(args.project_path, "image_groups.txt") if 'split_image_groups_is_set' in args: image_groups_file = os.path.abspath(args.split_image_groups) if io.file_exists(image_groups_file): dst_groups_file = os.path.join(self.opensfm_project_path, "image_groups.txt") io.copy(image_groups_file, dst_groups_file) log.ODM_INFO("Copied %s to %s" % (image_groups_file, dst_groups_file)) # check for cameras if args.cameras: try: camera_overrides = camera.get_opensfm_camera_models( args.cameras) with open( os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f: f.write(json.dumps(camera_overrides)) log.ODM_INFO( "Wrote camera_models_overrides.json to OpenSfM directory" ) except Exception as e: log.ODM_WARNING( "Cannot set camera_models_overrides.json: %s" % str(e)) use_bow = args.matcher_type == "bow" feature_type = "SIFT" # GPSDOP override if we have GPS accuracy information (such as RTK) if 'gps_accuracy_is_set' in args: log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy) log.ODM_INFO("Writing exif overrides") exif_overrides = {} for p in photos: if 'gps_accuracy_is_set' in args: dop = args.gps_accuracy elif p.get_gps_dop() is not None: dop = p.get_gps_dop() else: dop = args.gps_accuracy # default value if p.latitude is not None and p.longitude is not None: exif_overrides[p.filename] = { 'gps': { 'latitude': p.latitude, 'longitude': p.longitude, 'altitude': p.altitude if p.altitude is not None else 0, 'dop': dop, } } with open( os.path.join(self.opensfm_project_path, "exif_overrides.json"), 'w') as f: f.write(json.dumps(exif_overrides)) # Check image masks masks = [] for p in photos: if p.mask is not None: masks.append( (p.filename, os.path.join(images_path, p.mask))) if masks: log.ODM_INFO("Found %s image masks" % len(masks)) with open( os.path.join(self.opensfm_project_path, "mask_list.txt"), 'w') as f: for fname, mask in masks: f.write("{} {}\n".format(fname, mask)) # Compute feature_process_size feature_process_size = 2048 # default if ('resize_to_is_set' in args) and args.resize_to > 0: # Legacy log.ODM_WARNING( "Legacy option --resize-to (this might be removed in a future version). Use --feature-quality instead." ) feature_process_size = int(args.resize_to) else: feature_quality_scale = { 'ultra': 1, 'high': 0.5, 'medium': 0.25, 'low': 0.125, 'lowest': 0.0675, } max_dim = find_largest_photo_dim(photos) if max_dim > 0: log.ODM_INFO("Maximum photo dimensions: %spx" % str(max_dim)) feature_process_size = int( max_dim * feature_quality_scale[args.feature_quality]) log.ODM_INFO( "Photo dimensions for feature extraction: %ipx" % feature_process_size) else: log.ODM_WARNING( "Cannot compute max image dimensions, going with defaults" ) depthmap_resolution = get_depthmap_resolution(args, photos) # create config file for OpenSfM config = [ "use_exif_size: no", "flann_algorithm: KDTREE", # more stable, faster than KMEANS "feature_process_size: %s" % feature_process_size, "feature_min_frames: %s" % args.min_num_features, "processes: %s" % args.max_concurrency, "matching_gps_neighbors: %s" % args.matcher_neighbors, "matching_gps_distance: %s" % args.matcher_distance, "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params or args.cameras else 'yes'), "undistorted_image_format: tif", "bundle_outlier_filtering_type: AUTO", "align_orientation_prior: vertical", "triangulation_type: ROBUST", "retriangulation_ratio: 2", ] if args.camera_lens != 'auto': config.append("camera_projection_type: %s" % args.camera_lens.upper()) if not has_gps: log.ODM_INFO("No GPS information, using BOW matching") use_bow = True feature_type = args.feature_type.upper() if use_bow: config.append("matcher_type: WORDS") # Cannot use SIFT with BOW if feature_type == "SIFT": log.ODM_WARNING( "Using BOW matching, will use HAHOG feature type, not SIFT" ) feature_type = "HAHOG" # GPU acceleration? if has_gpus() and feature_type == "SIFT": log.ODM_INFO("Using GPU for extracting SIFT features") log.ODM_INFO("--min-num-features will be ignored") feature_type = "SIFT_GPU" config.append("feature_type: %s" % feature_type) if has_alt: log.ODM_INFO( "Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") gcp_path = reconstruction.gcp.gcp_path if has_alt or gcp_path: config.append("align_method: auto") else: config.append("align_method: orientation_prior") if args.use_hybrid_bundle_adjustment: log.ODM_INFO("Enabling hybrid bundle adjustment") config.append( "bundle_interval: 100" ) # Bundle after adding 'bundle_interval' cameras config.append( "bundle_new_points_ratio: 1.2" ) # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append( "local_bundle_radius: 1" ) # Max image graph distance for images to be included in local bundle adjustment else: config.append("local_bundle_radius: 0") if gcp_path: config.append("bundle_use_gcp: yes") if not args.force_gps: config.append("bundle_use_gps: no") io.copy(gcp_path, self.path("gcp_list.txt")) config = config + append_config # write config file log.ODM_INFO(config) config_filename = self.get_config_file_path() with open(config_filename, 'w') as fout: fout.write("\n".join(config)) else: log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] dem_input = tree.odm_georeferencing_model_laz pc_model_found = io.file_exists(dem_input) ignore_resolution = False pseudo_georeference = False if not reconstruction.is_georeferenced(): log.ODM_WARNING("Not georeferenced, using ungeoreferenced point cloud...") ignore_resolution = True pseudo_georeference = True resolution = gsd.cap_resolution(args.dem_resolution, tree.opensfm_reconstruction, gsd_error_estimate=-3, ignore_gsd=args.ignore_gsd, ignore_resolution=ignore_resolution, has_gcp=reconstruction.has_gcp()) log.ODM_INFO('Classify: ' + str(args.pc_classify)) log.ODM_INFO('Create DSM: ' + str(args.dsm)) log.ODM_INFO('Create DTM: ' + str(args.dtm)) log.ODM_INFO('DEM input file {0} found: {1}'.format(dem_input, str(pc_model_found))) # define paths and create working directories odm_dem_root = tree.path('odm_dem') if not io.dir_exists(odm_dem_root): system.mkdir_p(odm_dem_root) if args.pc_classify and pc_model_found: pc_classify_marker = os.path.join(odm_dem_root, 'pc_classify_done.txt') if not io.file_exists(pc_classify_marker) or self.rerun(): log.ODM_INFO("Classifying {} using Simple Morphological Filter".format(dem_input)) commands.classify(dem_input, args.smrf_scalar, args.smrf_slope, args.smrf_threshold, args.smrf_window, verbose=args.verbose ) with open(pc_classify_marker, 'w') as f: f.write('Classify: smrf\n') f.write('Scalar: {}\n'.format(args.smrf_scalar)) f.write('Slope: {}\n'.format(args.smrf_slope)) f.write('Threshold: {}\n'.format(args.smrf_threshold)) f.write('Window: {}\n'.format(args.smrf_window)) progress = 20 self.update_progress(progress) if args.pc_rectify: commands.rectify(dem_input, args.debug) # Do we need to process anything here? if (args.dsm or args.dtm) and pc_model_found: dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif') dtm_output_filename = os.path.join(odm_dem_root, 'dtm.tif') if (args.dtm and not io.file_exists(dtm_output_filename)) or \ (args.dsm and not io.file_exists(dsm_output_filename)) or \ self.rerun(): products = [] if args.dsm or (args.dtm and args.dem_euclidean_map): products.append('dsm') if args.dtm: products.append('dtm') radius_steps = [(resolution / 100.0) / 2.0] for _ in range(args.dem_gapfill_steps - 1): radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value? for product in products: commands.create_dem( dem_input, product, output_type='idw' if product == 'dtm' else 'max', radiuses=list(map(str, radius_steps)), gapfill=args.dem_gapfill_steps > 0, outdir=odm_dem_root, resolution=resolution / 100.0, decimation=args.dem_decimation, verbose=args.verbose, max_workers=args.max_concurrency, keep_unfilled_copy=args.dem_euclidean_map ) dem_geotiff_path = os.path.join(odm_dem_root, "{}.tif".format(product)) bounds_file_path = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') if args.crop > 0: # Crop DEM Cropper.crop(bounds_file_path, dem_geotiff_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space) if args.dem_euclidean_map: unfilled_dem_path = io.related_file_path(dem_geotiff_path, postfix=".unfilled") if args.crop > 0: # Crop unfilled DEM Cropper.crop(bounds_file_path, unfilled_dem_path, utils.get_dem_vars(args), keep_original=not args.optimize_disk_space) commands.compute_euclidean_map(unfilled_dem_path, io.related_file_path(dem_geotiff_path, postfix=".euclideand"), overwrite=True) if pseudo_georeference: # 0.1 is arbitrary pseudogeo.add_pseudo_georeferencing(dem_geotiff_path, 0.1) if pseudo_georeference: pseudogeo.add_pseudo_georeferencing(dem_geotiff_path) if args.tiles: generate_dem_tiles(dem_geotiff_path, tree.path("%s_tiles" % product), args.max_concurrency) progress += 30 self.update_progress(progress) else: log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root) else: log.ODM_WARNING('DEM will not be generated')
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running OMD OrthoPhoto Cell') # get inputs args = self.inputs.args tree = self.inputs.tree # define paths and create working directories system.mkdir_p(tree.odm_orthophoto) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_orthophoto') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_orthophoto' in args.rerun_from) if not io.file_exists(tree.odm_orthophoto_file) or rerun_cell: # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'model_geo': tree.odm_georeferencing_model_obj_geo, 'log': tree.odm_orthophoto_log, 'ortho': tree.odm_orthophoto_file, 'corners': tree.odm_orthophoto_corners, 'res': self.params.resolution } # run odm_orthophoto system.run('{bin}/odm_orthophoto -inputFile {model_geo} ' '-logFile {log} -outputFile {ortho} -resolution {res} ' '-outputCornerFile {corners}'.format(**kwargs)) # Create georeferenced GeoTiff geotiffcreated = False georef = types.ODM_GeoRef() # creates the coord refs # TODO I don't want to have to do this twice- after odm_georef georef.parse_coordinate_system(tree.odm_georeferencing_coords) if georef.epsg and georef.utm_east_offset and georef.utm_north_offset: ulx = uly = lrx = lry = 0.0 with open(tree.odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(georef.utm_east_offset) lry = float(tokens[1]) + \ float(georef.utm_north_offset) lrx = float(tokens[2]) + \ float(georef.utm_east_offset) uly = float(tokens[3]) + \ float(georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'epsg': georef.epsg, 'png': tree.odm_orthophoto_file, 'tiff': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_tif_log } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '-a_srs \"EPSG:{epsg}\" {png} {tiff} > {log}'.format(**kwargs)) geotiffcreated = True if not geotiffcreated: log.ODM_WARNING('No geo-referenced orthophoto created due ' 'to missing geo-referencing or corner coordinates.') else: log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_file) if args.time: system.benchmark(start_time, tree.benchmarking, 'Orthophoto') log.ODM_INFO('Running ODM OrthoPhoto Cell - Finished') return ecto.OK if args.end_with != 'odm_orthophoto' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM DEM Cell') # get inputs args = self.inputs.args tree = self.inputs.tree las_model_found = io.file_exists(tree.odm_georeferencing_model_las) env_paths = [context.superbuild_bin_path] # Just to make sure l2d_module_installed = True try: system.run('l2d_classify --help > /dev/null', env_paths) except: log.ODM_WARNING('lidar2dems is not installed properly') l2d_module_installed = False log.ODM_INFO('Create DSM: ' + str(args.dsm)) log.ODM_INFO('Create DTM: ' + str(args.dtm)) log.ODM_INFO('DEM input file {0} found: {1}'.format(tree.odm_georeferencing_model_las, str(las_model_found))) # Do we need to process anything here? if (args.dsm or args.dtm) and las_model_found and l2d_module_installed: # define paths and create working directories odm_dem_root = tree.path('odm_dem') system.mkdir_p(odm_dem_root) dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif') dtm_output_filename = os.path.join(odm_dem_root, 'dtm.tif') # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_dem') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_dem' in args.rerun_from) if (args.dtm and not io.file_exists(dtm_output_filename)) or \ (args.dsm and not io.file_exists(dsm_output_filename)) or \ rerun_cell: # Extract boundaries and srs of point cloud summary_file_path = os.path.join(odm_dem_root, 'odm_georeferenced_model.summary.json') boundary_file_path = os.path.join(odm_dem_root, 'odm_georeferenced_model.boundary.json') system.run('pdal info --summary {0} > {1}'.format(tree.odm_georeferencing_model_las, summary_file_path), env_paths) system.run('pdal info --boundary {0} > {1}'.format(tree.odm_georeferencing_model_las, boundary_file_path), env_paths) pc_proj4 = "" pc_geojson_bounds_feature = None with open(summary_file_path, 'r') as f: json_f = json.loads(f.read()) pc_proj4 = json_f['summary']['srs']['proj4'] with open(boundary_file_path, 'r') as f: json_f = json.loads(f.read()) pc_geojson_boundary_feature = json_f['boundary']['boundary_json'] # Write bounds to GeoJSON bounds_geojson_path = os.path.join(odm_dem_root, 'odm_georeferenced_model.bounds.geojson') with open(bounds_geojson_path, "w") as f: f.write(json.dumps({ "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": pc_geojson_boundary_feature }] })) bounds_shapefile_path = os.path.join(odm_dem_root, 'bounds.shp') # Convert bounds to Shapefile kwargs = { 'input': bounds_geojson_path, 'output': bounds_shapefile_path, 'proj4': pc_proj4 } system.run('ogr2ogr -overwrite -a_srs "{proj4}" {output} {input}'.format(**kwargs)) # Process with lidar2dems terrain_params_map = { 'flatnonforest': (1, 3), 'flatforest': (1, 2), 'complexnonforest': (5, 2), 'complexforest': (10, 2) } terrain_params = terrain_params_map[args.dem_terrain_type.lower()] kwargs = { 'verbose': '-v' if self.params.verbose else '', 'slope': terrain_params[0], 'cellsize': terrain_params[1], 'outdir': odm_dem_root, 'site': bounds_shapefile_path } l2d_params = '--slope {slope} --cellsize {cellsize} ' \ '{verbose} ' \ '-o -s {site} ' \ '--outdir {outdir}'.format(**kwargs) approximate = '--approximate' if args.dem_approximate else '' # Classify only if we need a DTM run_classification = args.dtm if run_classification: system.run('l2d_classify {0} --decimation {1} ' '{2} --initialDistance {3} {4}'.format( l2d_params, args.dem_decimation, approximate, args.dem_initial_distance, tree.odm_georeferencing), env_paths) else: log.ODM_INFO("Will skip classification, only DSM is needed") copyfile(tree.odm_georeferencing_model_las, os.path.join(odm_dem_root, 'bounds-0_l2d_s{slope}c{cellsize}.las'.format(**kwargs))) products = [] if args.dsm: products.append('dsm') if args.dtm: products.append('dtm') radius_steps = [args.dem_resolution] for _ in range(args.dem_gapfill_steps - 1): radius_steps.append(radius_steps[-1] * 3) # 3 is arbitrary, maybe there's a better value? for product in products: demargs = { 'product': product, 'indir': odm_dem_root, 'l2d_params': l2d_params, 'maxsd': args.dem_maxsd, 'maxangle': args.dem_maxangle, 'resolution': args.dem_resolution, 'radius_steps': ' '.join(map(str, radius_steps)), 'gapfill': '--gapfill' if args.dem_gapfill_steps > 0 else '', # If we didn't run a classification, we should pass the decimate parameter here 'decimation': '--decimation {0}'.format(args.dem_decimation) if not run_classification else '' } system.run('l2d_dems {product} {indir} {l2d_params} ' '--maxsd {maxsd} --maxangle {maxangle} ' '--resolution {resolution} --radius {radius_steps} ' '{decimation} ' '{gapfill} '.format(**demargs), env_paths) # Rename final output if product == 'dsm': os.rename(os.path.join(odm_dem_root, 'bounds-0_dsm.idw.tif'), dsm_output_filename) elif product == 'dtm': os.rename(os.path.join(odm_dem_root, 'bounds-0_dtm.idw.tif'), dtm_output_filename) else: log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root) else: log.ODM_WARNING('DEM will not be generated') if args.time: system.benchmark(start_time, tree.benchmarking, 'Dem') log.ODM_INFO('Running ODM DEM Cell - Finished') return ecto.OK if args.end_with != 'odm_dem' else ecto.QUIT
def process(self, inputs, outputs): """Run the cell.""" log.ODM_INFO('Running OMD Slam Cell') # get inputs tree = self.inputs.tree args = self.inputs.args video = os.path.join(tree.root_path, args.video) slam_config = os.path.join(tree.root_path, args.slam_config) if not video: log.ODM_ERROR('No video provided') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) system.mkdir_p(tree.pmvs) vocabulary = os.path.join(context.orb_slam2_path, 'Vocabulary/ORBvoc.txt') orb_slam_cmd = os.path.join(context.odm_modules_path, 'odm_slam') trajectory = os.path.join(tree.opensfm, 'KeyFrameTrajectory.txt') map_points = os.path.join(tree.opensfm, 'MapPoints.txt') # check if we rerun cell or not rerun_cell = args.rerun == 'slam' # check if slam was run before if not io.file_exists(trajectory) or rerun_cell: # run slam binary system.run(' '.join([ 'cd {} &&'.format(tree.opensfm), orb_slam_cmd, vocabulary, slam_config, video, ])) else: log.ODM_WARNING('Found a valid slam trajectory in: {}'.format( trajectory)) # check if trajectory was exported to opensfm before if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: # convert slam to opensfm system.run(' '.join([ 'cd {} &&'.format(tree.opensfm), 'PYTHONPATH={}:{}'.format(context.pyopencv_path, context.opensfm_path), 'python', os.path.join(context.odm_modules_src_path, 'odm_slam/src/orb_slam_to_opensfm.py'), video, trajectory, map_points, slam_config, ])) # link opensfm images to resized images os.symlink(tree.opensfm + '/images', tree.dataset_resize) else: log.ODM_WARNING('Found a valid OpenSfM file in: {}'.format( tree.opensfm_reconstruction)) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run( 'PYTHONPATH={} {}/bin/export_bundler {}'.format( context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid Bundler file in: {}'.format( tree.opensfm_reconstruction)) # check if reconstruction was exported to pmvs before if not io.file_exists(tree.pmvs_visdat) or rerun_cell: # run PMVS converter system.run( 'PYTHONPATH={} {}/bin/export_pmvs {} --output {}'.format( context.pyopencv_path, context.opensfm_path, tree.opensfm, tree.pmvs)) else: log.ODM_WARNING('Found a valid CMVS file in: {}'.format( tree.pmvs_visdat)) log.ODM_INFO('Running OMD Slam Cell - Finished') return ecto.OK if args.end_with != 'odm_slam' else ecto.QUIT
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] verbose = '-verbose' if args.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_orthophoto) if not io.file_exists(tree.odm_orthophoto_file) or self.rerun(): # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'log': tree.odm_orthophoto_log, 'ortho': tree.odm_orthophoto_file, 'corners': tree.odm_orthophoto_corners, 'res': 1.0 / (gsd.cap_resolution(args.orthophoto_resolution, tree.opensfm_reconstruction, ignore_gsd=args.ignore_gsd) / 100.0), 'verbose': verbose } # Have geo coordinates? georef = reconstruction.georef # Check if the georef object is initialized # (during a --rerun this might not be) # TODO: we should move this to a more central # location (perhaps during the dataset initialization) if georef and not georef.utm_east_offset: georeferencing_dir = tree.odm_georeferencing if args.use_3dmesh and not args.skip_3dmodel else tree.odm_25dgeoreferencing odm_georeferencing_model_txt_geo_file = os.path.join( georeferencing_dir, tree.odm_georeferencing_model_txt_geo) if io.file_exists(odm_georeferencing_model_txt_geo_file): georef.extract_offsets( odm_georeferencing_model_txt_geo_file) else: log.ODM_WARNING( 'Cannot read UTM offset from {}. An orthophoto will not be generated.' .format(odm_georeferencing_model_txt_geo_file)) if georef: if args.use_3dmesh: kwargs['model_geo'] = os.path.join( tree.odm_texturing, tree.odm_georeferencing_model_obj_geo) else: kwargs['model_geo'] = os.path.join( tree.odm_25dtexturing, tree.odm_georeferencing_model_obj_geo) else: if args.use_3dmesh: kwargs['model_geo'] = os.path.join( tree.odm_texturing, tree.odm_textured_model_obj) else: kwargs['model_geo'] = os.path.join( tree.odm_25dtexturing, tree.odm_textured_model_obj) # run odm_orthophoto system.run( '{bin}/odm_orthophoto -inputFile {model_geo} ' '-logFile {log} -outputFile {ortho} -resolution {res} {verbose} ' '-outputCornerFile {corners}'.format(**kwargs)) # Create georeferenced GeoTiff geotiffcreated = False if georef and georef.projection and georef.utm_east_offset and georef.utm_north_offset: ulx = uly = lrx = lry = 0.0 with open(tree.odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(georef.utm_east_offset) lry = float(tokens[1]) + \ float(georef.utm_north_offset) lrx = float(tokens[2]) + \ float(georef.utm_east_offset) uly = float(tokens[3]) + \ float(georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') orthophoto_vars = orthophoto.get_orthophoto_vars(args) kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'vars': ' '.join([ '-co %s=%s' % (k, orthophoto_vars[k]) for k in orthophoto_vars ]), 'proj': georef.projection.srs, 'png': tree.odm_orthophoto_file, 'tiff': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_tif_log, 'max_memory': get_max_memory(), } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '{vars} ' '-a_srs \"{proj}\" ' '--config GDAL_CACHEMAX {max_memory}% ' '{png} {tiff} > {log}'.format(**kwargs)) bounds_file_path = os.path.join( tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') # Cutline computation, before cropping # We want to use the full orthophoto, not the cropped one. if args.orthophoto_cutline: compute_cutline(tree.odm_orthophoto_tif, bounds_file_path, os.path.join(tree.odm_orthophoto, "cutline.gpkg"), args.max_concurrency, tmpdir=os.path.join( tree.odm_orthophoto, "grass_cutline_tmpdir")) if args.crop > 0: Cropper.crop(bounds_file_path, tree.odm_orthophoto_tif, orthophoto_vars) if args.build_overviews: orthophoto.build_overviews(tree.odm_orthophoto_tif) geotiffcreated = True if not geotiffcreated: log.ODM_WARNING( 'No geo-referenced orthophoto created due ' 'to missing geo-referencing or corner coordinates.') else: log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_file)
def process(self, args, outputs): cm = outputs["cm"] georeconstruction_dir = outputs["georeconstruction_dir"] # Create dense dir outputs["dense_dir"] = os.path.join(outputs["project_path"], "dense") if not os.path.exists(outputs["dense_dir"]): system.mkdir_p(outputs["dense_dir"]) if not has_gpus() or args.use_mve_dense: output_type = "PMVS" outputs["dense_workspace_dir"] = os.path.join(outputs["dense_dir"], "pmvs") already_run_undistortion = os.path.exists(outputs["dense_workspace_dir"]) else: output_type = "COLMAP" outputs["dense_workspace_dir"] = outputs["dense_dir"] already_run_undistortion = os.path.exists(os.path.join(outputs["dense_dir"], "images")) if not already_run_undistortion or self.rerun(): log.ODM_INFO("Undistorting images using a %s workspace" % output_type.lower()) # Undistort images cm.run("image_undistorter", image_path=outputs["images_dir"], input_path=georeconstruction_dir, output_path=outputs["dense_dir"], output_type=output_type) if output_type == "COLMAP": outputs["point_cloud_ply_file"] = os.path.join(outputs["dense_workspace_dir"], "fused.ply") outputs["undistorted_dir"] = os.path.join(outputs["dense_workspace_dir"], "images") else: outputs["dense_mve_dir"] = os.path.join(outputs["dense_workspace_dir"], "mve") outputs["point_cloud_ply_file"] = os.path.join(outputs["dense_mve_dir"], "mve_dense_point_cloud.ply") outputs["undistorted_dir"] = os.path.join(outputs["dense_workspace_dir"], "bundler") if not os.path.exists(outputs["point_cloud_ply_file"]) or self.rerun(): if output_type == "COLMAP": # Use COLMAP, easy kwargs = { 'PatchMatchStereo.geom_consistency': 'true' } cm.run("patch_match_stereo", workspace_path=outputs["dense_workspace_dir"], workspace_format="COLMAP", **kwargs) kwargs = {} cm.run("stereo_fusion", workspace_path=outputs["dense_workspace_dir"], workspace_format="COLMAP", input_type="geometric", output_path=outputs["point_cloud_ply_file"], **kwargs) else: # Use MVE # Create directory structure so makescene is happy... if os.path.exists(outputs["dense_mve_dir"]) and self.rerun(): log.ODM_WARNING("Removing %s" % outputs["dense_mve_dir"]) shutil.rmtree(outputs["dense_mve_dir"]) bundler_dir = os.path.join(outputs["dense_workspace_dir"], "bundler") bundle_dir = os.path.join(bundler_dir, "bundle") if os.path.exists(outputs["dense_mve_dir"]) and self.rerun(): log.ODM_WARNING("Removing %s" % bundle_dir) shutil.rmtree(bundle_dir) # Create dense/pmvs/bundle system.mkdir_p(bundle_dir) bundle_rd_out_file = os.path.join(outputs["dense_workspace_dir"], "bundle.rd.out") bundle_image_list = os.path.join(outputs["dense_workspace_dir"], "bundle.rd.out.list.txt") # Copy bundle.rd.out --> bundler/bundle/bundle.out shutil.copy(bundle_rd_out_file, os.path.join(bundle_dir, "bundle.out")) # Read image list with open(bundle_image_list, "r") as f: images = filter(len, map(str.strip, f.read().split("\n"))) visualize = os.listdir(os.path.join(outputs["dense_workspace_dir"], "visualize")) visualize.sort() visualize = [os.path.join(outputs["dense_workspace_dir"], "visualize", v) for v in visualize] # Copy each image from visualize/########N{8}.jpg to bundle/images[N] # TODO: check tiff extensions? for i, src in enumerate(visualize): dst = os.path.join(bundler_dir, images[i]) log.ODM_INFO("Copying %s --> %s" % (os.path.basename(src), os.path.basename(dst))) # Could make it faster by moving, but then we mess up the structure... shutil.copy(src, dst) # Copy image list (bundle.rd.out.list.txt --> bundler/list.txt) shutil.copy(bundle_image_list, os.path.join(bundler_dir, "list.txt")) # Run makescene if os.path.exists(outputs["dense_mve_dir"]): log.ODM_WARNING("Removing %s" % outputs["dense_mve_dir"]) shutil.rmtree(outputs["dense_mve_dir"]) system.run("makescene \"{}\" \"{}\"".format(bundler_dir, outputs["dense_mve_dir"])) # Read image dimension # TODO: this can be improved, see below width, height = get_image_size(os.path.join(bundler_dir, images[0])) log.ODM_INFO("Image dimensions: (%s, %s)" % (width, height)) size = max(width, height) max_pixels = args.depthmap_resolution * args.depthmap_resolution if size * size <= max_pixels: mve_output_scale = 0 else: ratio = float(size* size) / float(max_pixels) mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0))) # TODO: we don't have a limit on undistortion dimensions # Compute mve output scale based on depthmap_resolution #max_pixels = args.depthmap_resolution * args.depthmap_resolution # if outputs['undist_image_max_size'] * outputs['undist_image_max_size'] <= max_pixels: # mve_output_scale = 0 # else: # ratio = float(outputs['undist_image_max_size'] * outputs['undist_image_max_size']) / float(max_pixels) # mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0))) dmrecon_config = [ "-s%s" % mve_output_scale, "--progress=fancy", "--local-neighbors=2", ] # Run MVE's dmrecon log.ODM_INFO("Running dense reconstruction. This might take a while.") # TODO: find out why MVE is crashing at random # MVE *seems* to have a race condition, triggered randomly, regardless of dataset # https://gist.github.com/pierotofy/6c9ce93194ba510b61e42e3698cfbb89 # Temporary workaround is to retry the reconstruction until we get it right # (up to a certain number of retries). retry_count = 1 while retry_count < 10: try: system.run('dmrecon %s "%s"' % (' '.join(dmrecon_config), outputs["dense_mve_dir"])) break except Exception as e: if str(e) == "Child returned 134" or str(e) == "Child returned 1": retry_count += 1 log.ODM_WARNING("Caught error code, retrying attempt #%s" % retry_count) else: raise e scene2pset_config = [ "-F%s" % mve_output_scale ] system.run('scene2pset %s "%s" "%s"' % (' '.join(scene2pset_config), outputs["dense_mve_dir"], outputs["point_cloud_ply_file"])) # run cleanmesh (filter points by MVE confidence threshold) if args.mve_confidence > 0: mve_filtered_model = io.related_file_path(outputs["point_cloud_ply_file"], postfix=".filtered") system.run('meshclean -t%s --no-clean --component-size=0 "%s" "%s"' % (min(1.0, args.mve_confidence), outputs["point_cloud_ply_file"], mve_filtered_model)) if io.file_exists(mve_filtered_model): os.remove(outputs["point_cloud_ply_file"]) os.rename(mve_filtered_model, outputs["point_cloud_ply_file"]) else: log.ODM_WARNING("Couldn't filter MVE model (%s does not exist)." % mve_filtered_model) else: log.ODM_WARNING('Found existing dense model in: %s' % outputs["point_cloud_ply_file"])
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) if args.fast_orthophoto: output_file = io.join_paths(tree.opensfm, 'reconstruction.ply') elif args.use_opensfm_dense: output_file = tree.opensfm_model else: output_file = tree.opensfm_reconstruction # check if reconstruction was done before if not io.file_exists(output_file) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') has_alt = True with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False fout.write('%s\n' % photo.path_file) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if self.params.fixed_camera_params else 'yes') ] if has_alt: log.ODM_DEBUG( "Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: True") config.append("align_method: naive") if args.use_hybrid_bundle_adjustment: log.ODM_DEBUG("Enabling hybrid bundle adjustment") config.append( "bundle_interval: 100" ) # Bundle after adding 'bundle_interval' cameras config.append( "bundle_new_points_ratio: 1.2" ) # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append( "local_bundle_radius: 1" ) # Max image graph distance for images to be included in local bundle adjustment if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) if tree.odm_georeferencing_gcp: config.append("bundle_use_gcp: yes") io.copy(tree.odm_georeferencing_gcp, tree.opensfm) # write config file log.ODM_DEBUG(config) config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction matched_done_file = io.join_paths(tree.opensfm, 'matching_done.txt') if not io.file_exists(matched_done_file) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) with open(matched_done_file, 'w') as fout: fout.write("Matching done!\n") else: log.ODM_WARNING( 'Found a feature matching done progress file in: %s' % matched_done_file) if not io.file_exists(tree.opensfm_tracks) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tree.opensfm_tracks) if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # Always export VisualSFM's reconstruction and undistort images # as we'll use these for texturing (after GSD estimation and resizing) if not args.ignore_gsd: image_scale = gsd.image_scale_factor( args.orthophoto_resolution, tree.opensfm_reconstruction) else: image_scale = 1.0 if not io.file_exists( tree.opensfm_reconstruction_nvm) or rerun_cell: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_visualsfm --image_extension png --scale_focal %s %s' % (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM NVM reconstruction file in: %s' % tree.opensfm_reconstruction_nvm) # These will be used for texturing system.run( 'PYTHONPATH=%s %s/bin/opensfm undistort --image_format png --image_scale %s %s' % (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm)) # Skip dense reconstruction if necessary and export # sparse reconstruction instead if args.fast_orthophoto: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) elif args.use_opensfm_dense: # Undistort images at full scale in JPG # (TODO: we could compare the size of the PNGs if they are < than depthmap_resolution # and use those instead of re-exporting full resolution JPGs) system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run( 'PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run( 'PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) if reconstruction.georef: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_geocoords %s --transformation --proj \'%s\'' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, reconstruction.georef.projection.srs)) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
def process(self, inputs, outputs): # find a file in the root directory def find(file, dir): for root, dirs, files in os.walk(dir): return '/'.join((root, file)) if file in files else None # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Georeferencing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree gcpfile = io.join_paths(tree.root_path, self.params.gcp_file) \ if self.params.gcp_file else find('gcp_list.txt', tree.root_path) geocreated = True verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) if args.use_25dmesh: system.mkdir_p(tree.odm_25dgeoreferencing) # in case a gcp file it's not provided, let's try to generate it using # images metadata. Internally calls jhead. log.ODM_DEBUG(self.params.gcp_file) if not self.params.gcp_file: # and \ # not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('No coordinates file. ' 'Generating coordinates file: %s' % tree.odm_georeferencing_coords) # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log, 'verbose': verbose } # run UTM extraction binary extract_utm = system.run_and_return('{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} {verbose} ' '-logFile {log}'.format(**kwargs)) if extract_utm != '': log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file. Error: %s' % extract_utm) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_georeferencing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_georeferencing' in args.rerun_from) runs = [{ 'georeferencing_dir': tree.odm_georeferencing, 'texturing_dir': tree.odm_texturing, 'model': os.path.join(tree.odm_texturing, tree.odm_textured_model_obj) }] if args.use_25dmesh: runs += [{ 'georeferencing_dir': tree.odm_25dgeoreferencing, 'texturing_dir': tree.odm_25dtexturing, 'model': os.path.join(tree.odm_25dtexturing, tree.odm_textured_model_obj) }] for r in runs: odm_georeferencing_model_obj_geo = os.path.join(r['texturing_dir'], tree.odm_georeferencing_model_obj_geo) odm_georeferencing_model_ply_geo = os.path.join(r['georeferencing_dir'], tree.odm_georeferencing_model_ply_geo) odm_georeferencing_log = os.path.join(r['georeferencing_dir'], tree.odm_georeferencing_log) odm_georeferencing_transform_file = os.path.join(r['georeferencing_dir'], tree.odm_georeferencing_transform_file) if not io.file_exists(odm_georeferencing_model_obj_geo) or \ not io.file_exists(odm_georeferencing_model_ply_geo) or rerun_cell: # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'bundle': tree.opensfm_bundle, 'imgs': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'model': r['model'], 'log': odm_georeferencing_log, 'transform_file': odm_georeferencing_transform_file, 'coords': tree.odm_georeferencing_coords, 'pc_geo': odm_georeferencing_model_ply_geo, 'geo_sys': os.path.join(r['georeferencing_dir'], tree.odm_georeferencing_model_txt_geo), 'model_geo': odm_georeferencing_model_obj_geo, 'size': self.params.img_size, 'gcp': gcpfile, 'verbose': verbose } if not args.use_pmvs: kwargs['pc'] = tree.opensfm_model else: kwargs['pc'] = tree.pmvs_model # Check to see if the GCP file exists if not self.params.use_exif and (self.params.gcp_file or find('gcp_list.txt', tree.root_path)): log.ODM_INFO('Found %s' % gcpfile) try: system.run('{bin}/odm_georef -bundleFile {bundle} -imagesPath {imgs} -imagesListPath {imgs_list} ' '-bundleResizedTo {size} -inputFile {model} -outputFile {model_geo} ' '-inputPointCloudFile {pc} -outputPointCloudFile {pc_geo} {verbose} ' '-logFile {log} -outputTransformFile {transform_file} -georefFileOutputPath {geo_sys} -gcpFile {gcp} ' '-outputCoordFile {coords}'.format(**kwargs)) except Exception: log.ODM_EXCEPTION('Georeferencing failed. ') return ecto.QUIT elif io.file_exists(tree.odm_georeferencing_coords): log.ODM_INFO('Running georeferencing with generated coords file.') system.run('{bin}/odm_georef -bundleFile {bundle} -inputCoordFile {coords} ' '-inputFile {model} -outputFile {model_geo} ' '-inputPointCloudFile {pc} -outputPointCloudFile {pc_geo} {verbose} ' '-logFile {log} -outputTransformFile {transform_file} -georefFileOutputPath {geo_sys}'.format(**kwargs)) else: log.ODM_WARNING('Georeferencing failed. Make sure your ' 'photos have geotags in the EXIF or you have ' 'provided a GCP file. ') geocreated = False # skip the rest of the georeferencing odm_georeferencing_model_ply_geo = os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_model_ply_geo) if geocreated: # update images metadata geo_ref = types.ODM_GeoRef() geo_ref.parse_coordinate_system(tree.odm_georeferencing_coords) for idx, photo in enumerate(self.inputs.photos): geo_ref.utm_to_latlon(tree.odm_georeferencing_latlon, photo, idx) # convert ply model to LAS reference system geo_ref.convert_to_las(odm_georeferencing_model_ply_geo, tree.odm_georeferencing_model_las, tree.odm_georeferencing_las_json) # If --dem, create a DEM if args.dem: demcreated = geo_ref.convert_to_dem(tree.odm_georeferencing_model_las, tree.odm_georeferencing_dem, tree.odm_georeferencing_dem_json, self.params.sample_radius, self.params.gdal_res, self.params.gdal_radius) if not demcreated: log.ODM_WARNING('Something went wrong. Check the logs in odm_georeferencing.') else: log.ODM_INFO('DEM created at {0}'.format(tree.odm_georeferencing_dem)) # XYZ point cloud output log.ODM_INFO("Creating geo-referenced CSV file (XYZ format)") with open(tree.odm_georeferencing_xyz_file, "wb") as csvfile: csvfile_writer = csv.writer(csvfile, delimiter=",") reachedpoints = False with open(odm_georeferencing_model_ply_geo) as f: for lineNumber, line in enumerate(f): if reachedpoints: tokens = line.split(" ") csv_line = [float(tokens[0])+geo_ref.utm_east_offset, float(tokens[1])+geo_ref.utm_north_offset, tokens[2]] csvfile_writer.writerow(csv_line) if line.startswith("end_header"): reachedpoints = True csvfile.close() else: log.ODM_WARNING('Found a valid georeferenced model in: %s' % odm_georeferencing_model_ply_geo) if args.time: system.benchmark(start_time, tree.benchmarking, 'Georeferencing') log.ODM_INFO('Running ODM Georeferencing Cell - Finished') return ecto.OK if args.end_with != 'odm_georeferencing' else ecto.QUIT
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] # define paths and create working directories system.mkdir_p(tree.odm_meshing) # Create full 3D model unless --skip-3dmodel is set if not args.skip_3dmodel: if not io.file_exists(tree.odm_mesh) or self.rerun(): log.ODM_DEBUG('Writing ODM Mesh file in: %s' % tree.odm_mesh) mesh.screened_poisson_reconstruction(tree.filtered_point_cloud, tree.odm_mesh, depth=self.params.get('oct_tree'), samples=self.params.get('samples'), maxVertexCount=self.params.get('max_vertex'), pointWeight=self.params.get('point_weight'), threads=self.params.get('max_concurrency'), verbose=self.params.get('verbose')) else: log.ODM_WARNING('Found a valid ODM Mesh file in: %s' % tree.odm_mesh) self.update_progress(50) # Always generate a 2.5D mesh # unless --use-3dmesh is set. if not args.use_3dmesh: if not io.file_exists(tree.odm_25dmesh) or self.rerun(): log.ODM_DEBUG('Writing ODM 2.5D Mesh file in: %s' % tree.odm_25dmesh) ortho_resolution = gsd.cap_resolution(args.orthophoto_resolution, tree.opensfm_reconstruction, ignore_gsd=args.ignore_gsd) / 100.0 dsm_multiplier = max(1.0, gsd.rounded_gsd(tree.opensfm_reconstruction, default_value=4, ndigits=3, ignore_gsd=args.ignore_gsd)) # A good DSM size depends on the flight altitude. # Flights at low altitude need more details (higher resolution) # Flights at higher altitude benefit from smoother surfaces (lower resolution) dsm_resolution = ortho_resolution * dsm_multiplier dsm_radius = dsm_resolution * math.sqrt(2) # Sparse point clouds benefits from using # a larger radius interolation --> less holes if args.fast_orthophoto: dsm_radius *= 2 log.ODM_DEBUG('ODM 2.5D DSM resolution: %s' % dsm_resolution) mesh.create_25dmesh(tree.filtered_point_cloud, tree.odm_25dmesh, dsm_radius=dsm_radius, dsm_resolution=dsm_resolution, depth=self.params.get('oct_tree'), maxVertexCount=self.params.get('max_vertex'), samples=self.params.get('samples'), verbose=self.params.get('verbose'), available_cores=args.max_concurrency, method='poisson' if args.fast_orthophoto else 'gridded') else: log.ODM_WARNING('Found a valid ODM 2.5D Mesh file in: %s' % tree.odm_25dmesh)
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running MVE Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start MVE') return ecto.QUIT # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'mve') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'mve' in args.rerun_from) # check if reconstruction was done before if not io.file_exists(tree.mve_model) or rerun_cell: # cleanup if a rerun if io.dir_exists(tree.mve_path) and rerun_cell: shutil.rmtree(tree.mve_path) # make bundle directory if not io.file_exists(tree.mve_bundle): system.mkdir_p(tree.mve_path) system.mkdir_p(io.join_paths(tree.mve_path, 'bundle')) io.copy(tree.opensfm_image_list, tree.mve_image_list) io.copy(tree.opensfm_bundle, tree.mve_bundle) # mve makescene wants the output directory # to not exists before executing it (otherwise it # will prompt the user for confirmation) if io.dir_exists(tree.mve): shutil.rmtree(tree.mve) # run mve makescene if not io.dir_exists(tree.mve_views): system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) # Compute mve output scale based on depthmap_resolution max_width = 0 max_height = 0 for photo in photos: max_width = max(photo.width, max_width) max_height = max(photo.height, max_height) max_pixels = args.depthmap_resolution * args.depthmap_resolution if max_width * max_height <= max_pixels: mve_output_scale = 0 else: ratio = float(max_width * max_height) / float(max_pixels) mve_output_scale = int( math.ceil(math.log(ratio) / math.log(4.0))) dmrecon_config = [ "-s%s" % mve_output_scale, "--progress=silent", "--local-neighbors=2", "--force", ] # Run MVE's dmrecon log.ODM_INFO( ' ' ) log.ODM_INFO( ' ,*/** ' ) log.ODM_INFO( ' ,*@%*/@%* ' ) log.ODM_INFO( ' ,/@%******@&*. ' ) log.ODM_INFO( ' ,*@&*********/@&* ' ) log.ODM_INFO( ' ,*@&**************@&* ' ) log.ODM_INFO( ' ,/@&******************@&*. ' ) log.ODM_INFO( ' ,*@&*********************/@&* ' ) log.ODM_INFO( ' ,*@&**************************@&*. ' ) log.ODM_INFO( ' ,/@&******************************&&*, ' ) log.ODM_INFO( ' ,*&&**********************************@&*. ' ) log.ODM_INFO( ' ,*@&**************************************@&*. ' ) log.ODM_INFO( ' ,*@&***************#@@@@@@@@@%****************&&*, ' ) log.ODM_INFO( ' .*&&***************&@@@@@@@@@@@@@@****************@@*. ' ) log.ODM_INFO( ' .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*. ' ) log.ODM_INFO( ' .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*, ' ) log.ODM_INFO( ' .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*. ' ) log.ODM_INFO( ' .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*. ' ) log.ODM_INFO( ' .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*, ' ) log.ODM_INFO( ' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(, ' ) log.ODM_INFO( ' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&* ' ) log.ODM_INFO( ' *#@/*******************************@@@@@***&@&**********************&@*, ' ) log.ODM_INFO( ' *#@#******************************&@@@***@#*********************&@*, ' ) log.ODM_INFO( ' */@#*****************************@@@************************@@*. ' ) log.ODM_INFO( ' *#@/***************************/@@/*********************%@*, ' ) log.ODM_INFO( ' *#@#**************************#@@%******************%@*, ' ) log.ODM_INFO( ' */@#*************************(@@@@@@@&%/********&@*. ' ) log.ODM_INFO( ' *(@(*********************************/%@@%**%@*, ' ) log.ODM_INFO( ' *(@%************************************%@** ' ) log.ODM_INFO( ' **@%********************************&@*, ' ) log.ODM_INFO( ' *(@(****************************%@/* ' ) log.ODM_INFO( ' ,(@%************************#@/* ' ) log.ODM_INFO( ' ,*@%********************&@/, ' ) log.ODM_INFO( ' */@#****************#@/* ' ) log.ODM_INFO( ' ,/@&************#@/* ' ) log.ODM_INFO( ' ,*@&********%@/, ' ) log.ODM_INFO( ' */@#****(@/* ' ) log.ODM_INFO( ' ,/@@@@(* ' ) log.ODM_INFO( ' .**, ' ) log.ODM_INFO('') log.ODM_INFO( "Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung." ) log.ODM_INFO(" Process is running") system.run( '%s %s %s' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) scene2pset_config = ["-F%s" % mve_output_scale] # run scene2pset system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) else: log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' % tree.mve_model) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'MVE') log.ODM_INFO('Running ODM MVE Cell - Finished') return ecto.OK if args.end_with != 'mve' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Meshing Cell') # get inputs args = inputs.args tree = inputs.tree reconstruction = inputs.reconstruction verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_meshing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_meshing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_meshing' in args.rerun_from) infile = tree.opensfm_model if args.use_pmvs: infile = tree.pmvs_model elif args.fast_orthophoto: infile = os.path.join(tree.opensfm, 'reconstruction.ply') # Do not create full 3D model with fast_orthophoto if not args.fast_orthophoto: if not io.file_exists(tree.odm_mesh) or rerun_cell: log.ODM_DEBUG('Writing ODM Mesh file in: %s' % tree.odm_mesh) kwargs = { 'bin': context.odm_modules_path, 'outfile': tree.odm_mesh, 'infile': infile, 'log': tree.odm_meshing_log, 'max_vertex': self.params.max_vertex, 'oct_tree': self.params.oct_tree, 'samples': self.params.samples, 'solver': self.params.solver, 'verbose': verbose } # run meshing binary system.run('{bin}/odm_meshing -inputFile {infile} ' '-outputFile {outfile} -logFile {log} ' '-maxVertexCount {max_vertex} -octreeDepth {oct_tree} {verbose} ' '-samplesPerNode {samples} -solverDivide {solver}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Mesh file in: %s' % tree.odm_mesh) # Do we need to generate a 2.5D mesh also? # This is always set if fast_orthophoto is set if args.use_25dmesh: if not io.file_exists(tree.odm_25dmesh) or rerun_cell: log.ODM_DEBUG('Writing ODM 2.5D Mesh file in: %s' % tree.odm_25dmesh) kwargs = { 'bin': context.odm_modules_path, 'outfile': tree.odm_25dmesh, 'infile': infile, 'log': tree.odm_25dmeshing_log, 'verbose': verbose, 'max_vertex': self.params.max_vertex, 'neighbors': args.mesh_neighbors, 'resolution': args.mesh_resolution } # run 2.5D meshing binary system.run('{bin}/odm_25dmeshing -inputFile {infile} ' '-outputFile {outfile} -logFile {log} ' '-maxVertexCount {max_vertex} -neighbors {neighbors} ' '-resolution {resolution} {verbose}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM 2.5D Mesh file in: %s' % tree.odm_25dmesh) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'Meshing') log.ODM_INFO('Running ODM Meshing Cell - Finished') return ecto.OK if args.end_with != 'odm_meshing' else ecto.QUIT
import os from scripts.odm_app import ODMApp if __name__ == '__main__': args = config.config() log.ODM_INFO('Initializing OpenDroneMap app - %s' % system.now()) # Add project dir if doesn't exist args.project_path = io.join_paths(args.project_path, args.name) if not io.dir_exists(args.project_path): log.ODM_WARNING('Directory %s does not exist. Creating it now.' % args.name) system.mkdir_p(os.path.abspath(args.project_path)) # If user asks to rerun everything, delete all of the existing progress directories. # TODO: Move this somewhere it's not hard-coded if args.rerun_all: log.ODM_DEBUG("Rerun all -- Removing old data") os.system("rm -rf " + args.project_path + "/images_resize " + args.project_path + "/odm_georeferencing " + args.project_path + "/odm_meshing " + args.project_path + "/odm_orthophoto " + args.project_path + "/odm_texturing " + args.project_path + "/opensfm " + args.project_path + "/pmvs") # create an instance of my App BlackBox # internally configure all tasks app = ODMApp(args=args)
def process(self, args, outputs): outputs['start_time'] = system.now_raw() tree = types.ODM_Tree(args.project_path, args.gcp, args.geo) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write( 'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the image filename is supported def valid_image_filename(filename): (pathfn, ext) = os.path.splitext(filename) return ext.lower( ) in context.supported_extensions and pathfn[-5:] != "_mask" # Get supported images from dir def get_images(in_dir): log.ODM_DEBUG(in_dir) entries = os.listdir(in_dir) valid, rejects = [], [] for f in entries: if valid_image_filename(f): valid.append(f) else: rejects.append(f) return valid, rejects def find_mask(photo_path, masks): (pathfn, ext) = os.path.splitext(os.path.basename(photo_path)) k = "{}_mask".format(pathfn) mask = masks.get(k) if mask: # Spaces are not supported due to OpenSfM's mask_list.txt format reqs if not " " in mask: return mask else: log.ODM_WARNING( "Image mask {} has a space. Spaces are currently not supported for image masks." .format(mask)) # get images directory images_dir = tree.dataset_raw # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) log.ODM_INFO('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = os.path.join(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): if not os.path.exists(images_dir): raise system.ExitException( "There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path)) files, rejects = get_images(images_dir) if files: # create ODMPhoto list path_files = [os.path.join(images_dir, f) for f in files] # Lookup table for masks masks = {} for r in rejects: (p, ext) = os.path.splitext(r) if p[-5:] == "_mask" and ext.lower( ) in context.supported_extensions: masks[p] = r photos = [] with open(tree.dataset_list, 'w') as dataset_list: log.ODM_INFO("Loading %s images" % len(path_files)) for f in path_files: try: p = types.ODM_Photo(f) p.set_mask(find_mask(f, masks)) photos += [p] dataset_list.write(photos[-1].filename + '\n') except PhotoCorruptedException: log.ODM_WARNING( "%s seems corrupted and will not be used" % os.path.basename(f)) # Check if a geo file is available if tree.odm_geo_file is not None and os.path.isfile( tree.odm_geo_file): log.ODM_INFO("Found image geolocation file") gf = GeoFile(tree.odm_geo_file) updated = 0 for p in photos: entry = gf.get_entry(p.filename) if entry: p.update_with_geo_entry(entry) updated += 1 log.ODM_INFO("Updated %s image positions" % updated) # GPSDOP override if we have GPS accuracy information (such as RTK) if 'gps_accuracy_is_set' in args: log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy) for p in photos: p.override_gps_dop(args.gps_accuracy) # Override projection type if args.camera_lens != "auto": log.ODM_INFO("Setting camera lens to %s for all images" % args.camera_lens) for p in photos: p.override_camera_projection(args.camera_lens) # Save image database for faster restart save_images_database(photos, images_database_file) else: raise system.ExitException( 'Not enough supported images in %s' % images_dir) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) log.logger.log_json_images(len(photos)) # Create reconstruction object reconstruction = types.ODM_Reconstruction(photos) if tree.odm_georeferencing_gcp and not args.use_exif: reconstruction.georeference_with_gcp( tree.odm_georeferencing_gcp, tree.odm_georeferencing_coords, tree.odm_georeferencing_gcp_utm, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) else: reconstruction.georeference_with_gps( tree.dataset_raw, tree.odm_georeferencing_coords, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) reconstruction.save_proj_srs( os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj)) outputs['reconstruction'] = reconstruction # Try to load boundaries if args.boundary: if reconstruction.is_georeferenced(): outputs['boundary'] = boundary.load_boundary( args.boundary, reconstruction.get_proj_srs()) else: args.boundary = None log.ODM_WARNING( "Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)" ) # If sfm-algorithm is triangulation, check if photos have OPK if args.sfm_algorithm == 'triangulation': for p in photos: if not p.has_opk(): log.ODM_WARNING( "No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename) args.sfm_algorithm = 'incremental' break
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM DEM Cell') # get inputs args = self.inputs.args tree = self.inputs.tree las_model_found = io.file_exists(tree.odm_georeferencing_model_las) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_dem') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_dem' in args.rerun_from) log.ODM_INFO('Classify: ' + str(args.pc_classify != "none")) log.ODM_INFO('Create DSM: ' + str(args.dsm)) log.ODM_INFO('Create DTM: ' + str(args.dtm)) log.ODM_INFO('DEM input file {0} found: {1}'.format( tree.odm_georeferencing_model_las, str(las_model_found))) # Setup terrain parameters terrain_params_map = { 'flatnonforest': (1, 3), 'flatforest': (1, 2), 'complexnonforest': (5, 2), 'complexforest': (10, 2) } terrain_params = terrain_params_map[args.dem_terrain_type.lower()] slope, cellsize = terrain_params # define paths and create working directories odm_dem_root = tree.path('odm_dem') if not io.dir_exists(odm_dem_root): system.mkdir_p(odm_dem_root) if args.pc_classify != "none" and las_model_found: pc_classify_marker = os.path.join(odm_dem_root, 'pc_classify_done.txt') if not io.file_exists(pc_classify_marker) or rerun_cell: log.ODM_INFO("Classifying {} using {}".format( tree.odm_georeferencing_model_las, args.pc_classify)) commands.classify(tree.odm_georeferencing_model_las, args.pc_classify == "smrf", slope, cellsize, approximate=args.dem_approximate, initialDistance=args.dem_initial_distance, verbose=args.verbose) with open(pc_classify_marker, 'w') as f: f.write('Classify: {}\n'.format(args.pc_classify)) f.write('Slope: {}\n'.format(slope)) f.write('Cellsize: {}\n'.format(cellsize)) f.write('Approximate: {}\n'.format(args.dem_approximate)) f.write('InitialDistance: {}\n'.format( args.dem_initial_distance)) # Do we need to process anything here? if (args.dsm or args.dtm) and las_model_found: dsm_output_filename = os.path.join(odm_dem_root, 'dsm.tif') dtm_output_filename = os.path.join(odm_dem_root, 'dtm.tif') if (args.dtm and not io.file_exists(dtm_output_filename)) or \ (args.dsm and not io.file_exists(dsm_output_filename)) or \ rerun_cell: products = [] if args.dsm: products.append('dsm') if args.dtm: products.append('dtm') radius_steps = [args.dem_resolution] for _ in range(args.dem_gapfill_steps - 1): radius_steps.append( radius_steps[-1] * 3) # 3 is arbitrary, maybe there's a better value? for product in products: commands.create_dems([tree.odm_georeferencing_model_las], product, radius=map(str, radius_steps), gapfill=True, outdir=odm_dem_root, resolution=args.dem_resolution, maxsd=args.dem_maxsd, maxangle=args.dem_maxangle, decimation=args.dem_decimation, verbose=args.verbose) if args.crop > 0: bounds_shapefile_path = os.path.join( tree.odm_georeferencing, 'odm_georeferenced_model.bounds.shp') if os.path.exists(bounds_shapefile_path): Cropper.crop( bounds_shapefile_path, os.path.join(odm_dem_root, "{}.tif".format(product)), { 'TILED': 'YES', 'COMPRESS': 'LZW', 'BLOCKXSIZE': 512, 'BLOCKYSIZE': 512, 'NUM_THREADS': 'ALL_CPUS' }) else: log.ODM_WARNING('Found existing outputs in: %s' % odm_dem_root) else: log.ODM_WARNING('DEM will not be generated') if args.time: system.benchmark(start_time, tree.benchmarking, 'Dem') log.ODM_INFO('Running ODM DEM Cell - Finished') return ecto.OK if args.end_with != 'odm_dem' else ecto.QUIT
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] verbose = '-verbose' if args.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_orthophoto) if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun(): gsd_error_estimate = 0.1 ignore_resolution = False if not reconstruction.is_georeferenced(): # Match DEMs gsd_error_estimate = -3 ignore_resolution = True resolution = 1.0 / ( gsd.cap_resolution(args.orthophoto_resolution, tree.opensfm_reconstruction, gsd_error_estimate=gsd_error_estimate, ignore_gsd=args.ignore_gsd, ignore_resolution=ignore_resolution, has_gcp=reconstruction.has_gcp()) / 100.0) # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'log': tree.odm_orthophoto_log, 'ortho': tree.odm_orthophoto_render, 'corners': tree.odm_orthophoto_corners, 'res': resolution, 'bands': '', 'verbose': verbose } models = [] if args.use_3dmesh: base_dir = tree.odm_texturing else: base_dir = tree.odm_25dtexturing model_file = tree.odm_textured_model_obj if reconstruction.multi_camera: for band in reconstruction.multi_camera: primary = band['name'] == get_primary_band_name( reconstruction.multi_camera, args.primary_band) subdir = "" if not primary: subdir = band['name'].lower() models.append(os.path.join(base_dir, subdir, model_file)) kwargs['bands'] = '-bands %s' % (','.join([ quote(b['name'].lower()) for b in reconstruction.multi_camera ])) else: models.append(os.path.join(base_dir, model_file)) kwargs['models'] = ','.join(map(quote, models)) # run odm_orthophoto system.run( '{bin}/odm_orthophoto -inputFiles {models} ' '-logFile {log} -outputFile {ortho} -resolution {res} {verbose} ' '-outputCornerFile {corners} {bands}'.format(**kwargs)) # Create georeferenced GeoTiff geotiffcreated = False if reconstruction.is_georeferenced(): ulx = uly = lrx = lry = 0.0 with open(tree.odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(reconstruction.georef.utm_east_offset) lry = float(tokens[1]) + \ float(reconstruction.georef.utm_north_offset) lrx = float(tokens[2]) + \ float(reconstruction.georef.utm_east_offset) uly = float(tokens[3]) + \ float(reconstruction.georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') orthophoto_vars = orthophoto.get_orthophoto_vars(args) kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'vars': ' '.join([ '-co %s=%s' % (k, orthophoto_vars[k]) for k in orthophoto_vars ]), 'proj': reconstruction.georef.proj4(), 'input': tree.odm_orthophoto_render, 'output': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_tif_log, 'max_memory': get_max_memory(), } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '{vars} ' '-a_srs \"{proj}\" ' '--config GDAL_CACHEMAX {max_memory}% ' '--config GDAL_TIFF_INTERNAL_MASK YES ' '{input} {output} > {log}'.format(**kwargs)) bounds_file_path = os.path.join( tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') # Cutline computation, before cropping # We want to use the full orthophoto, not the cropped one. if args.orthophoto_cutline: cutline_file = os.path.join(tree.odm_orthophoto, "cutline.gpkg") compute_cutline(tree.odm_orthophoto_tif, bounds_file_path, cutline_file, args.max_concurrency, tmpdir=os.path.join( tree.odm_orthophoto, "grass_cutline_tmpdir"), scale=0.25) orthophoto.compute_mask_raster( tree.odm_orthophoto_tif, cutline_file, os.path.join(tree.odm_orthophoto, "odm_orthophoto_cut.tif"), blend_distance=20, only_max_coords_feature=True) orthophoto.post_orthophoto_steps(args, bounds_file_path, tree.odm_orthophoto_tif, tree.orthophoto_tiles) # Generate feathered orthophoto also if args.orthophoto_cutline: orthophoto.feather_raster( tree.odm_orthophoto_tif, os.path.join(tree.odm_orthophoto, "odm_orthophoto_feathered.tif"), blend_distance=20) geotiffcreated = True if not geotiffcreated: if io.file_exists(tree.odm_orthophoto_render): pseudogeo.add_pseudo_georeferencing( tree.odm_orthophoto_render) log.ODM_INFO( "Renaming %s --> %s" % (tree.odm_orthophoto_render, tree.odm_orthophoto_tif)) os.rename(tree.odm_orthophoto_render, tree.odm_orthophoto_tif) else: log.ODM_WARNING( "Could not generate an orthophoto (it did not render)") else: log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_tif) if args.optimize_disk_space and io.file_exists( tree.odm_orthophoto_render): os.remove(tree.odm_orthophoto_render)
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] class nonloc: runs = [] def add_run(nvm_file, primary=True, band=None): subdir = "" if not primary and band is not None: subdir = band if not args.skip_3dmodel and (primary or args.use_3dmesh): nonloc.runs += [{ 'out_dir': os.path.join(tree.odm_texturing, subdir), 'model': tree.odm_mesh, 'nadir': False, 'nvm_file': nvm_file }] if not args.use_3dmesh: nonloc.runs += [{ 'out_dir': os.path.join(tree.odm_25dtexturing, subdir), 'model': tree.odm_25dmesh, 'nadir': True, 'nvm_file': nvm_file }] if reconstruction.multi_camera: for band in reconstruction.multi_camera: primary = band == reconstruction.multi_camera[0] nvm_file = os.path.join(tree.opensfm, "undistorted", "reconstruction_%s.nvm" % band['name'].lower()) add_run(nvm_file, primary, band['name'].lower()) else: add_run(tree.opensfm_reconstruction_nvm) progress_per_run = 100.0 / len(nonloc.runs) progress = 0.0 for r in nonloc.runs: if not io.dir_exists(r['out_dir']): system.mkdir_p(r['out_dir']) odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj) if not io.file_exists(odm_textured_model_obj) or self.rerun(): log.ODM_INFO('Writing MVS Textured file in: %s' % odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGeometricVisibilityTest = "" skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" skipHoleFilling = "" keepUnseenFaces = "" nadir = "" if (self.params.get('skip_vis_test')): skipGeometricVisibilityTest = "--skip_geometric_visibility_test" if (self.params.get('skip_glob_seam_leveling')): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.get('skip_loc_seam_leveling')): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.get('skip_hole_fill')): skipHoleFilling = "--skip_hole_filling" if (self.params.get('keep_unseen_faces')): keepUnseenFaces = "--keep_unseen_faces" if (r['nadir']): nadir = '--nadir_mode' # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': io.join_paths(r['out_dir'], "odm_textured_model"), 'model': r['model'], 'dataTerm': self.params.get('data_term'), 'outlierRemovalType': self.params.get('outlier_rem_type'), 'skipGeometricVisibilityTest': skipGeometricVisibilityTest, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'skipHoleFilling': skipHoleFilling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': self.params.get('tone_mapping'), 'nadirMode': nadir, 'nadirWeight': 2 ** args.texturing_nadir_weight - 1, 'nvm_file': r['nvm_file'] } mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp') # Make sure tmp directory is empty if io.dir_exists(mvs_tmp_dir): log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir)) shutil.rmtree(mvs_tmp_dir) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{skipGeometricVisibilityTest} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{skipHoleFilling} ' '{keepUnseenFaces} ' '{nadirMode} ' '-n {nadirWeight}'.format(**kwargs)) if args.optimize_disk_space: cleanup_files = [ os.path.join(r['out_dir'], "odm_textured_model_data_costs.spt"), os.path.join(r['out_dir'], "odm_textured_model_labeling.vec"), ] for f in cleanup_files: if io.file_exists(f): os.remove(f) progress += progress_per_run self.update_progress(progress) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % odm_textured_model_obj) if args.optimize_disk_space: for r in nonloc.runs: if io.file_exists(r['model']): os.remove(r['model']) undistorted_images_path = os.path.join(tree.opensfm, "undistorted", "images") if io.dir_exists(undistorted_images_path): shutil.rmtree(undistorted_images_path)
def process(self, args, outputs): outputs['start_time'] = system.now_raw() tree = types.ODM_Tree(args.project_path, args.gcp, args.geo) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write( 'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the image filename is supported def valid_image_filename(filename): (pathfn, ext) = os.path.splitext(filename) return ext.lower( ) in context.supported_extensions and pathfn[-5:] != "_mask" # Get supported images from dir def get_images(in_dir): log.ODM_DEBUG(in_dir) entries = os.listdir(in_dir) valid, rejects = [], [] for f in entries: if valid_image_filename(f): valid.append(f) else: rejects.append(f) return valid, rejects def find_mask(photo_path, masks): (pathfn, ext) = os.path.splitext(os.path.basename(photo_path)) k = "{}_mask".format(pathfn) mask = masks.get(k) if mask: # Spaces are not supported due to OpenSfM's mask_list.txt format reqs if not " " in mask: return mask else: log.ODM_WARNING( "Image mask {} has a space. Spaces are currently not supported for image masks." .format(mask)) # get images directory images_dir = tree.dataset_raw # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) log.ODM_INFO('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = os.path.join(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): if not os.path.exists(images_dir): raise system.ExitException( "There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path)) files, rejects = get_images(images_dir) if files: # create ODMPhoto list path_files = [os.path.join(images_dir, f) for f in files] # Lookup table for masks masks = {} for r in rejects: (p, ext) = os.path.splitext(r) if p[-5:] == "_mask" and ext.lower( ) in context.supported_extensions: masks[p] = r photos = [] with open(tree.dataset_list, 'w') as dataset_list: log.ODM_INFO("Loading %s images" % len(path_files)) for f in path_files: p = types.ODM_Photo(f) p.set_mask(find_mask(f, masks)) photos += [p] dataset_list.write(photos[-1].filename + '\n') # Check if a geo file is available if tree.odm_geo_file is not None and os.path.exists( tree.odm_geo_file): log.ODM_INFO("Found image geolocation file") gf = GeoFile(tree.odm_geo_file) updated = 0 for p in photos: entry = gf.get_entry(p.filename) if entry: p.update_with_geo_entry(entry) updated += 1 log.ODM_INFO("Updated %s image positions" % updated) # Save image database for faster restart save_images_database(photos, images_database_file) else: raise system.ExitException( 'Not enough supported images in %s' % images_dir) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) log.logger.log_json_images(len(photos)) # Create reconstruction object reconstruction = types.ODM_Reconstruction(photos) if tree.odm_georeferencing_gcp and not args.use_exif: reconstruction.georeference_with_gcp( tree.odm_georeferencing_gcp, tree.odm_georeferencing_coords, tree.odm_georeferencing_gcp_utm, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) else: reconstruction.georeference_with_gps( tree.dataset_raw, tree.odm_georeferencing_coords, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) reconstruction.save_proj_srs( os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj)) outputs['reconstruction'] = reconstruction
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running MVE Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start MVE') return ecto.QUIT # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'mve') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'mve' in args.rerun_from) # check if reconstruction was done before if not io.file_exists(tree.mve_model) or rerun_cell: # cleanup if a rerun if io.dir_exists(tree.mve_path) and rerun_cell: shutil.rmtree(tree.mve_path) # make bundle directory if not io.file_exists(tree.mve_bundle): system.mkdir_p(tree.mve_path) system.mkdir_p(io.join_paths(tree.mve_path, 'bundle')) io.copy(tree.opensfm_image_list, tree.mve_image_list) io.copy(tree.opensfm_bundle, tree.mve_bundle) # mve makescene wants the output directory # to not exists before executing it (otherwise it # will prompt the user for confirmation) if io.dir_exists(tree.mve): shutil.rmtree(tree.mve) # run mve makescene if not io.dir_exists(tree.mve_views): system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) # Compute mve output scale based on depthmap_resolution max_width = 0 max_height = 0 for photo in photos: max_width = max(photo.width, max_width) max_height = max(photo.height, max_height) max_pixels = args.depthmap_resolution * args.depthmap_resolution if max_width * max_height <= max_pixels: mve_output_scale = 0 else: ratio = float(max_width * max_height) / float(max_pixels) mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0))) dmrecon_config = [ "-s%s" % mve_output_scale, "--progress=silent", "--local-neighbors=2", "--force", ] # Run MVE's dmrecon log.ODM_INFO(' ') log.ODM_INFO(' ,*/** ') log.ODM_INFO(' ,*@%*/@%* ') log.ODM_INFO(' ,/@%******@&*. ') log.ODM_INFO(' ,*@&*********/@&* ') log.ODM_INFO(' ,*@&**************@&* ') log.ODM_INFO(' ,/@&******************@&*. ') log.ODM_INFO(' ,*@&*********************/@&* ') log.ODM_INFO(' ,*@&**************************@&*. ') log.ODM_INFO(' ,/@&******************************&&*, ') log.ODM_INFO(' ,*&&**********************************@&*. ') log.ODM_INFO(' ,*@&**************************************@&*. ') log.ODM_INFO(' ,*@&***************#@@@@@@@@@%****************&&*, ') log.ODM_INFO(' .*&&***************&@@@@@@@@@@@@@@****************@@*. ') log.ODM_INFO(' .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*. ') log.ODM_INFO(' .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*, ') log.ODM_INFO(' .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*. ') log.ODM_INFO(' .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*. ') log.ODM_INFO(' .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*, ') log.ODM_INFO(' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(, ') log.ODM_INFO(' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&* ') log.ODM_INFO(' *#@/*******************************@@@@@***&@&**********************&@*, ') log.ODM_INFO(' *#@#******************************&@@@***@#*********************&@*, ') log.ODM_INFO(' */@#*****************************@@@************************@@*. ') log.ODM_INFO(' *#@/***************************/@@/*********************%@*, ') log.ODM_INFO(' *#@#**************************#@@%******************%@*, ') log.ODM_INFO(' */@#*************************(@@@@@@@&%/********&@*. ') log.ODM_INFO(' *(@(*********************************/%@@%**%@*, ') log.ODM_INFO(' *(@%************************************%@** ') log.ODM_INFO(' **@%********************************&@*, ') log.ODM_INFO(' *(@(****************************%@/* ') log.ODM_INFO(' ,(@%************************#@/* ') log.ODM_INFO(' ,*@%********************&@/, ') log.ODM_INFO(' */@#****************#@/* ') log.ODM_INFO(' ,/@&************#@/* ') log.ODM_INFO(' ,*@&********%@/, ') log.ODM_INFO(' */@#****(@/* ') log.ODM_INFO(' ,/@@@@(* ') log.ODM_INFO(' .**, ') log.ODM_INFO('') log.ODM_INFO("Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung.") log.ODM_INFO(" Process is running") system.run('%s %s %s' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) scene2pset_config = [ "-F%s" % mve_output_scale ] # run scene2pset system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) else: log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' % tree.mve_model) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'MVE') log.ODM_INFO('Running ODM MVE Cell - Finished') return ecto.OK if args.end_with != 'mve' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running MVS Texturing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree # define paths and create working directories system.mkdir_p(tree.odm_texturing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'mvs_texturing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'mvs_texturing' in args.rerun_from) if not io.file_exists(tree.odm_textured_model_obj) or rerun_cell: log.ODM_DEBUG('Writing MVS Textured file in: %s' % tree.odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGeometricVisibilityTest = "" skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" skipHoleFilling = "" keepUnseenFaces = "" if (self.params.skip_vis_test): skipGeometricVisibilityTest = "--skip_geometric_visibility_test" if (self.params.skip_glob_seam_leveling): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.skip_loc_seam_leveling): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.skip_hole_fill): skipHoleFilling = "--skip_hole_filling" if (self.params.keep_unseen_faces): keepUnseenFaces = "--keep_unseen_faces" # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': io.join_paths(tree.odm_texturing, "odm_textured_model"), 'pmvs_folder': tree.pmvs_rec_path, 'nvm_file': io.join_paths(tree.pmvs_rec_path, "nvmCams.nvm"), 'model': tree.odm_mesh, 'dataTerm': self.params.data_term, 'outlierRemovalType': self.params.outlier_rem_type, 'skipGeometricVisibilityTest': skipGeometricVisibilityTest, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'skipHoleFilling': skipHoleFilling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': self.params.tone_mapping } if not args.use_pmvs: kwargs['nvm_file'] = io.join_paths(tree.opensfm, "reconstruction.nvm") else: log.ODM_DEBUG('Generating .nvm file from pmvs output: %s' % '{nvm_file}'.format(**kwargs)) # Create .nvm camera file. pmvs2nvmcams.run('{pmvs_folder}'.format(**kwargs), '{nvm_file}'.format(**kwargs)) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{skipGeometricVisibilityTest} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{skipHoleFilling} ' '{keepUnseenFaces}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % tree.odm_textured_model_obj) if args.time: system.benchmark(start_time, tree.benchmarking, 'Texturing') log.ODM_INFO('Running ODM Texturing Cell - Finished') return ecto.OK if args.end_with != 'odm_texturing' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) if args.fast_orthophoto: output_file = io.join_paths(tree.opensfm, 'reconstruction.ply') elif args.use_opensfm_dense: output_file = tree.opensfm_model else: output_file = tree.opensfm_reconstruction # check if reconstruction was done before if not io.file_exists(output_file) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') has_alt = True with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False fout.write('%s\n' % io.join_paths(tree.dataset_raw, photo.filename)) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if self.params.fixed_camera_params else 'yes') ] if has_alt: log.ODM_DEBUG("Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") config.append("align_method: naive") else: config.append("align_method: orientation_prior") config.append("align_orientation_prior: vertical") if args.use_hybrid_bundle_adjustment: log.ODM_DEBUG("Enabling hybrid bundle adjustment") config.append("bundle_interval: 100") # Bundle after adding 'bundle_interval' cameras config.append("bundle_new_points_ratio: 1.2") # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append("local_bundle_radius: 1") # Max image graph distance for images to be included in local bundle adjustment if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) if tree.odm_georeferencing_gcp: config.append("bundle_use_gcp: yes") io.copy(tree.odm_georeferencing_gcp, tree.opensfm) # write config file log.ODM_DEBUG(config) config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction matched_done_file = io.join_paths(tree.opensfm, 'matching_done.txt') if not io.file_exists(matched_done_file) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) with open(matched_done_file, 'w') as fout: fout.write("Matching done!\n") else: log.ODM_WARNING('Found a feature matching done progress file in: %s' % matched_done_file) if not io.file_exists(tree.opensfm_tracks) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tree.opensfm_tracks) if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # Check that a reconstruction file has been created if not io.file_exists(tree.opensfm_reconstruction): log.ODM_ERROR("The program could not process this dataset using the current settings. " "Check that the images have enough overlap, " "that there are enough recognizable features " "and that the images are in focus. " "You could also try to increase the --min-num-features parameter." "The program will now exit.") sys.exit(1) # Always export VisualSFM's reconstruction and undistort images # as we'll use these for texturing (after GSD estimation and resizing) if not args.ignore_gsd: image_scale = gsd.image_scale_factor(args.orthophoto_resolution, tree.opensfm_reconstruction) else: image_scale = 1.0 if not io.file_exists(tree.opensfm_reconstruction_nvm) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm export_visualsfm --image_extension png --scale_focal %s %s' % (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM NVM reconstruction file in: %s' % tree.opensfm_reconstruction_nvm) # These will be used for texturing system.run('PYTHONPATH=%s %s/bin/opensfm undistort --image_format png --image_scale %s %s' % (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm)) # Skip dense reconstruction if necessary and export # sparse reconstruction instead if args.fast_orthophoto: system.run('PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) elif args.use_opensfm_dense: # Undistort images at full scale in JPG # (TODO: we could compare the size of the PNGs if they are < than depthmap_resolution # and use those instead of re-exporting full resolution JPGs) system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run('PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) if reconstruction.georef: system.run('PYTHONPATH=%s %s/bin/opensfm export_geocoords %s --transformation --proj \'%s\'' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, reconstruction.georef.projection.srs)) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Resize Cell') # get inputs args = self.inputs.args tree = self.inputs.tree photos = self.inputs.photos if not photos: log.ODM_ERROR('Not enough photos in photos to resize') return ecto.QUIT if self.params.resize_to <= 0: log.ODM_ERROR('Resize parameter must be greater than 0') return ecto.QUIT # create working directory system.mkdir_p(tree.dataset_resize) log.ODM_DEBUG('Resizing dataset to: %s' % tree.dataset_resize) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'resize') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'resize' in args.rerun_from) # loop over photos for photo in photos: # define image paths path_file = photo.path_file new_path_file = io.join_paths(tree.dataset_resize, photo.filename) # set raw image path in case we want to rerun cell if io.file_exists(new_path_file) and rerun_cell: path_file = io.join_paths(tree.dataset_raw, photo.filename) if not io.file_exists(new_path_file) or rerun_cell: # open and resize image with opencv img = cv2.imread(path_file) # compute new size max_side = max(img.shape[0], img.shape[1]) if max_side <= self.params.resize_to: log.ODM_WARNING('Resize Parameter is greater than the largest side of the image') ratio = float(self.params.resize_to) / float(max_side) img_r = cv2.resize(img, None, fx=ratio, fy=ratio) # write image with opencv cv2.imwrite(new_path_file, img_r) # read metadata with pyexiv2 old_meta = pyexiv2.ImageMetadata(path_file) new_meta = pyexiv2.ImageMetadata(new_path_file) old_meta.read() new_meta.read() # copy metadata old_meta.copy(new_meta) # update metadata size new_meta['Exif.Photo.PixelXDimension'] = img_r.shape[0] new_meta['Exif.Photo.PixelYDimension'] = img_r.shape[1] new_meta.write() # update photos array with new values photo.path_file = new_path_file photo.width = img_r.shape[0] photo.height = img_r.shape[1] photo.update_focal() # log message log.ODM_DEBUG('Resized %s | dimensions: %s' % (photo.filename, img_r.shape)) else: # log message log.ODM_WARNING('Already resized %s | dimensions: %s x %s' % (photo.filename, photo.width, photo.height)) log.ODM_INFO('Resized %s images' % len(photos)) # append photos to cell output self.outputs.photos = photos if args.time: system.benchmark(start_time, tree.benchmarking, 'Resizing') log.ODM_INFO('Running ODM Resize Cell - Finished') return ecto.OK if args.end_with != 'resize' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Resize Cell') # get inputs args = self.inputs.args tree = self.inputs.tree photos = self.inputs.photos if not photos: log.ODM_ERROR('Not enough photos in photos to resize') return ecto.QUIT if self.params.resize_to <= 0: log.ODM_ERROR('Resize parameter must be greater than 0') return ecto.QUIT # create working directory system.mkdir_p(tree.dataset_resize) log.ODM_DEBUG('Resizing dataset to: %s' % tree.dataset_resize) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'resize') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'resize' in args.rerun_from) # loop over photos if self.params.skip_resize: photos = Pool().map( partial(no_resize, tree.dataset_raw, tree.dataset_resize, rerun_cell), photos ) log.ODM_INFO('Copied %s images' % len(photos)) else: photos = Pool().map( partial(resize, tree.dataset_raw, tree.dataset_resize, self.params.resize_to, rerun_cell), photos ) log.ODM_INFO('Resized %s images' % len(photos)) # append photos to cell output self.outputs.photos = photos if args.time: system.benchmark(start_time, tree.benchmarking, 'Resizing') log.ODM_INFO('Running ODM Resize Cell - Finished') return ecto.OK if args.end_with != 'resize' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running MVS Texturing Cell') # get inputs args = inputs.args tree = inputs.tree reconstruction = inputs.reconstruction # define paths and create working directories system.mkdir_p(tree.odm_texturing) if not args.use_3dmesh: system.mkdir_p(tree.odm_25dtexturing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'mvs_texturing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'mvs_texturing' in args.rerun_from) runs = [{ 'out_dir': tree.odm_texturing, 'model': tree.odm_mesh, 'nadir': False }] if args.skip_3dmodel: runs = [] if not args.use_3dmesh: runs += [{ 'out_dir': tree.odm_25dtexturing, 'model': tree.odm_25dmesh, 'nadir': True }] for r in runs: odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj) if not io.file_exists(odm_textured_model_obj) or rerun_cell: log.ODM_DEBUG('Writing MVS Textured file in: %s' % odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGeometricVisibilityTest = "" skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" skipHoleFilling = "" keepUnseenFaces = "" nadir = "" if (self.params.skip_vis_test): skipGeometricVisibilityTest = "--skip_geometric_visibility_test" if (self.params.skip_glob_seam_leveling): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.skip_loc_seam_leveling): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.skip_hole_fill): skipHoleFilling = "--skip_hole_filling" if (self.params.keep_unseen_faces): keepUnseenFaces = "--keep_unseen_faces" if (r['nadir']): nadir = '--nadir_mode' # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': io.join_paths(r['out_dir'], "odm_textured_model"), 'model': r['model'], 'dataTerm': self.params.data_term, 'outlierRemovalType': self.params.outlier_rem_type, 'skipGeometricVisibilityTest': skipGeometricVisibilityTest, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'skipHoleFilling': skipHoleFilling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': self.params.tone_mapping, 'nadirMode': nadir, 'nadirWeight': 2 ** args.texturing_nadir_weight - 1, 'nvm_file': io.join_paths(tree.opensfm, "reconstruction.nvm") } # Make sure tmp directory is empty mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp') if io.dir_exists(mvs_tmp_dir): log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir)) shutil.rmtree(mvs_tmp_dir) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{skipGeometricVisibilityTest} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{skipHoleFilling} ' '{keepUnseenFaces} ' '{nadirMode} ' '-n {nadirWeight}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % odm_textured_model_obj) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'Texturing') log.ODM_INFO('Running ODM Texturing Cell - Finished') return ecto.OK if args.end_with != 'mvs_texturing' else ecto.QUIT
def process(args, current_path, max_concurrency, reconstruction): #args = vars(args) orthophoto_cutline = True odm_orthophoto = io.join_paths(current_path, 'orthophoto') odm_orthophoto_path = odm_orthophoto odm_orthophoto_render = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_render.tif') odm_orthophoto_tif = io.join_paths(odm_orthophoto_path, 'odm_orthophoto.tif') odm_orthophoto_corners = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_corners.tif') odm_orthophoto_log = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_log.tif') odm_orthophoto_tif_log = io.join_paths(odm_orthophoto_path, 'gdal_translate_log.txt') odm_25dgeoreferencing = io.join_paths(current_path, 'odm_georeferencing') odm_georeferencing = io.join_paths(current_path, 'odm_georeferencing') odm_georeferencing_coords = io.join_paths(odm_georeferencing, 'coords.txt') odm_georeferencing_gcp = io.find('gcp_list.txt', current_path) odm_georeferencing_gcp_utm = io.join_paths(odm_georeferencing, 'gcp_list_utm.txt') odm_georeferencing_utm_log = io.join_paths( odm_georeferencing, 'odm_georeferencing_utm_log.txt') odm_georeferencing_log = 'odm_georeferencing_log.txt' odm_georeferencing_transform_file = 'odm_georeferencing_transform.txt' odm_georeferencing_proj = 'proj.txt' odm_georeferencing_model_txt_geo = 'odm_georeferencing_model_geo.txt' odm_georeferencing_model_obj_geo = 'odm_textured_model_geo.obj' odm_georeferencing_xyz_file = io.join_paths(odm_georeferencing, 'odm_georeferenced_model.csv') odm_georeferencing_las_json = io.join_paths(odm_georeferencing, 'las.json') odm_georeferencing_model_laz = io.join_paths( odm_georeferencing, 'odm_georeferenced_model.laz') odm_georeferencing_model_las = io.join_paths( odm_georeferencing, 'odm_georeferenced_model.las') odm_georeferencing_dem = io.join_paths(odm_georeferencing, 'odm_georeferencing_model_dem.tif') opensfm_reconstruction = io.join_paths(current_path, 'reconstruction.json') odm_texturing = io.join_paths(current_path, 'mvs') odm_textured_model_obj = io.join_paths(odm_texturing, 'odm_textured_model.obj') images_dir = io.join_paths(current_path, 'images') reconstruction = reconstruction verbose = '' #"-verbose" # define paths and create working directories system.mkdir_p(odm_orthophoto) if not io.file_exists(odm_orthophoto_tif): gsd_error_estimate = 0.1 ignore_resolution = False if not reconstruction.is_georeferenced(): # Match DEMs gsd_error_estimate = -3 ignore_resolution = True orthophoto_resolution = 5 resolution = 1.0 / ( gsd.cap_resolution(orthophoto_resolution, opensfm_reconstruction, gsd_error_estimate=gsd_error_estimate, ignore_gsd=True, ignore_resolution=ignore_resolution, has_gcp=reconstruction.has_gcp()) / 100.0) # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'log': odm_orthophoto_log, 'ortho': odm_orthophoto_render, 'corners': odm_orthophoto_corners, 'res': resolution, 'bands': '', 'verbose': verbose } # Check if the georef object is initialized # (during a --rerun this might not be) # TODO: this should be moved to a more central location? if reconstruction.is_georeferenced( ) and not reconstruction.georef.valid_utm_offsets(): georeferencing_dir = odm_georeferencing #if args.use_3dmesh and not args.skip_3dmodel else odm_25dgeoreferencing odm_georeferencing_model_txt_geo_file = os.path.join( georeferencing_dir, odm_georeferencing_model_txt_geo) if io.file_exists(odm_georeferencing_model_txt_geo_file): reconstruction.georef.extract_offsets( odm_georeferencing_model_txt_geo_file) else: log.ODM_WARNING('Cannot read UTM offset from {}.'.format( odm_georeferencing_model_txt_geo_file)) models = [] base_dir = odm_texturing if reconstruction.is_georeferenced(): model_file = odm_georeferencing_model_obj_geo else: model_file = odm_textured_model_obj if reconstruction.multi_camera: for band in reconstruction.multi_camera: primary = band == reconstruction.multi_camera[0] subdir = "" if not primary: subdir = band['name'].lower() models.append(os.path.join(base_dir, subdir, model_file)) kwargs['bands'] = '-bands %s' % (','.join([ quote(b['name'].lower()) for b in reconstruction.multi_camera ])) else: models.append(os.path.join(base_dir, model_file)) kwargs['models'] = ','.join(map(quote, models)) # run odm_orthophoto system.run( '{bin}/odm_orthophoto -inputFiles {models} ' '-logFile {log} -outputFile {ortho} -resolution {res} {verbose} ' '-outputCornerFile {corners} {bands}'.format(**kwargs)) # Create georeferenced GeoTiff geotiffcreated = False if reconstruction.is_georeferenced( ) and reconstruction.georef.valid_utm_offsets(): ulx = uly = lrx = lry = 0.0 with open(odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(reconstruction.georef.utm_east_offset) lry = float(tokens[1]) + \ float(reconstruction.georef.utm_north_offset) lrx = float(tokens[2]) + \ float(reconstruction.georef.utm_east_offset) uly = float(tokens[3]) + \ float(reconstruction.georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') orthophoto_vars = orthophoto.get_orthophoto_vars(args) kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'vars': ' '.join([ '-co %s=%s' % (k, orthophoto_vars[k]) for k in orthophoto_vars ]), 'proj': reconstruction.georef.proj4(), 'input': odm_orthophoto_render, 'output': odm_orthophoto_tif, 'log': odm_orthophoto_tif_log, 'max_memory': get_max_memory(), } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '{vars} ' '-a_srs \"{proj}\" ' '--config GDAL_CACHEMAX {max_memory}% ' '--config GDAL_TIFF_INTERNAL_MASK YES ' '{input} {output} > {log}'.format(**kwargs)) bounds_file_path = os.path.join( odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') # Cutline computation, before cropping # We want to use the full orthophoto, not the cropped one. pio = True if pio: cutline_file = os.path.join(odm_orthophoto, "cutline.gpkg") compute_cutline(odm_orthophoto_tif, bounds_file_path, cutline_file, max_concurrency, tmpdir=os.path.join(odm_orthophoto, "grass_cutline_tmpdir"), scale=0.25) orthophoto.compute_mask_raster(odm_orthophoto_tif, cutline_file, os.path.join( odm_orthophoto, "odm_orthophoto_cut.tif"), blend_distance=20, only_max_coords_feature=True) orthophoto.post_orthophoto_steps(args, bounds_file_path, odm_orthophoto_tif) # Generate feathered orthophoto also if pio: orthophoto.feather_raster(odm_orthophoto_tif, os.path.join( odm_orthophoto, "odm_orthophoto_feathered.tif"), blend_distance=20) geotiffcreated = True if not geotiffcreated: if io.file_exists(odm_orthophoto_render): pseudogeo.add_pseudo_georeferencing(odm_orthophoto_render) log.ODM_INFO("Renaming %s --> %s" % (odm_orthophoto_render, odm_orthophoto_tif)) os.rename(odm_orthophoto_render, odm_orthophoto_tif) else: log.ODM_WARNING( "Could not generate an orthophoto (it did not render)") else: log.ODM_WARNING('Found a valid orthophoto in: %s' % odm_orthophoto_tif)
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Orthophoto Cell') # get inputs args = self.inputs.args tree = self.inputs.tree reconstruction = inputs.reconstruction verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_orthophoto) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_orthophoto') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_orthophoto' in args.rerun_from) if not io.file_exists(tree.odm_orthophoto_file) or rerun_cell: # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'log': tree.odm_orthophoto_log, 'ortho': tree.odm_orthophoto_file, 'corners': tree.odm_orthophoto_corners, 'res': 1.0 / (gsd.cap_resolution(self.params.resolution, tree.opensfm_reconstruction, ignore_gsd=args.ignore_gsd) / 100.0), 'verbose': verbose } # Have geo coordinates? georef = reconstruction.georef # Check if the georef object is initialized # (during a --rerun this might not be) # TODO: we should move this to a more central # location (perhaps during the dataset initialization) if georef and not georef.utm_east_offset: georeferencing_dir = tree.odm_georeferencing if args.use_3dmesh and not args.skip_3dmodel else tree.odm_25dgeoreferencing odm_georeferencing_model_txt_geo_file = os.path.join(georeferencing_dir, tree.odm_georeferencing_model_txt_geo) if io.file_exists(odm_georeferencing_model_txt_geo_file): georef.extract_offsets(odm_georeferencing_model_txt_geo_file) else: log.ODM_WARNING('Cannot read UTM offset from {}. An orthophoto will not be generated.'.format(odm_georeferencing_model_txt_geo_file)) if georef: if args.use_3dmesh: kwargs['model_geo'] = os.path.join(tree.odm_texturing, tree.odm_georeferencing_model_obj_geo) else: kwargs['model_geo'] = os.path.join(tree.odm_25dtexturing, tree.odm_georeferencing_model_obj_geo) else: if args.use_3dmesh: kwargs['model_geo'] = os.path.join(tree.odm_texturing, tree.odm_textured_model_obj) else: kwargs['model_geo'] = os.path.join(tree.odm_25dtexturing, tree.odm_textured_model_obj) # run odm_orthophoto system.run('{bin}/odm_orthophoto -inputFile {model_geo} ' '-logFile {log} -outputFile {ortho} -resolution {res} {verbose} ' '-outputCornerFile {corners}'.format(**kwargs)) # Create georeferenced GeoTiff geotiffcreated = False if georef and georef.projection and georef.utm_east_offset and georef.utm_north_offset: ulx = uly = lrx = lry = 0.0 with open(tree.odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(georef.utm_east_offset) lry = float(tokens[1]) + \ float(georef.utm_north_offset) lrx = float(tokens[2]) + \ float(georef.utm_east_offset) uly = float(tokens[3]) + \ float(georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'tiled': '' if self.params.no_tiled else '-co TILED=yes ', 'compress': self.params.compress, 'predictor': '-co PREDICTOR=2 ' if self.params.compress in ['LZW', 'DEFLATE'] else '', 'proj': georef.projection.srs, 'bigtiff': self.params.bigtiff, 'png': tree.odm_orthophoto_file, 'tiff': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_tif_log, 'max_memory': get_max_memory(), 'threads': self.params.max_concurrency } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '{tiled} ' '-co BIGTIFF={bigtiff} ' '-co COMPRESS={compress} ' '{predictor} ' '-co BLOCKXSIZE=512 ' '-co BLOCKYSIZE=512 ' '-co NUM_THREADS={threads} ' '-a_srs \"{proj}\" ' '--config GDAL_CACHEMAX {max_memory}% ' '{png} {tiff} > {log}'.format(**kwargs)) if args.crop > 0: shapefile_path = os.path.join(tree.odm_georeferencing, 'odm_georeferenced_model.bounds.shp') Cropper.crop(shapefile_path, tree.odm_orthophoto_tif, { 'TILED': 'NO' if self.params.no_tiled else 'YES', 'COMPRESS': self.params.compress, 'PREDICTOR': '2' if self.params.compress in ['LZW', 'DEFLATE'] else '1', 'BIGTIFF': self.params.bigtiff, 'BLOCKXSIZE': 512, 'BLOCKYSIZE': 512, 'NUM_THREADS': self.params.max_concurrency }) if self.params.build_overviews: log.ODM_DEBUG("Building Overviews") kwargs = { 'orthophoto': tree.odm_orthophoto_tif, 'log': tree.odm_orthophoto_gdaladdo_log } # Run gdaladdo system.run('gdaladdo -ro -r average ' '--config BIGTIFF_OVERVIEW IF_SAFER ' '--config COMPRESS_OVERVIEW JPEG ' '{orthophoto} 2 4 8 16 > {log}'.format(**kwargs)) geotiffcreated = True if not geotiffcreated: log.ODM_WARNING('No geo-referenced orthophoto created due ' 'to missing geo-referencing or corner coordinates.') else: log.ODM_WARNING('Found a valid orthophoto in: %s' % tree.odm_orthophoto_file) if args.time: system.benchmark(start_time, tree.benchmarking, 'Orthophoto') log.ODM_INFO('Running ODM OrthoPhoto Cell - Finished') return ecto.OK if args.end_with != 'odm_orthophoto' else ecto.QUIT
import ecto import os from scripts.odm_app import ODMApp if __name__ == '__main__': args = config.config() log.ODM_INFO('Initializing OpenDroneMap app - %s' % system.now()) # Add project dir if doesn't exist args.project_path = io.join_paths(args.project_path, args.name) if not io.dir_exists(args.project_path): log.ODM_WARNING('Directory %s does not exist. Creating it now.' % args.name) system.mkdir_p(os.path.abspath(args.project_path)) # If user asks to rerun everything, delete all of the existing progress directories. # TODO: Move this somewhere it's not hard-coded if args.rerun_all: os.system("rm -rf " + args.project_path + "images_resize/ " + args.project_path + "odm_georeferencing/ " + args.project_path + "odm_meshing/ " + args.project_path + "odm_orthophoto/ " + args.project_path + "odm_texturing/ " + args.project_path + "opensfm/ " + args.project_path + "pmvs/") # create an instance of my App BlackBox # internally configure all tasks
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] if outputs['large']: if not os.path.exists(tree.submodels_path): log.ODM_ERROR( "We reached the merge stage, but %s folder does not exist. Something must have gone wrong at an earlier stage. Check the log and fix possible problem before restarting?" % tree.submodels_path) exit(1) # Merge point clouds if args.merge in ['all', 'pointcloud']: if not io.dir_exists(tree.entwine_pointcloud) or self.rerun(): all_point_clouds = get_submodel_paths( tree.submodels_path, "odm_georeferencing", "odm_georeferenced_model.laz") try: entwine.build(all_point_clouds, tree.entwine_pointcloud, max_concurrency=args.max_concurrency, rerun=self.rerun()) except Exception as e: log.ODM_WARNING( "Could not merge EPT point cloud: %s (skipping)" % str(e)) else: log.ODM_WARNING("Found merged EPT point cloud in %s" % tree.entwine_pointcloud) if not io.file_exists( tree.odm_georeferencing_model_laz) or self.rerun(): if io.dir_exists(tree.entwine_pointcloud): try: system.run('pdal translate "ept://{}" "{}"'.format( tree.entwine_pointcloud, tree.odm_georeferencing_model_laz)) except Exception as e: log.ODM_WARNING( "Cannot export EPT dataset to LAZ: %s" % str(e)) else: log.ODM_WARNING( "No EPT point cloud found (%s), skipping LAZ conversion)" % tree.entwine_pointcloud) else: log.ODM_WARNING("Found merged point cloud in %s" % tree.odm_georeferencing_model_laz) self.update_progress(25) # Merge crop bounds merged_bounds_file = os.path.join( tree.odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') if not io.file_exists(merged_bounds_file) or self.rerun(): all_bounds = get_submodel_paths( tree.submodels_path, 'odm_georeferencing', 'odm_georeferenced_model.bounds.gpkg') log.ODM_INFO("Merging all crop bounds: %s" % all_bounds) if len(all_bounds) > 0: # Calculate a new crop area # based on the convex hull of all crop areas of all submodels # (without a buffer, otherwise we are double-cropping) Cropper.merge_bounds(all_bounds, merged_bounds_file, 0) else: log.ODM_WARNING("No bounds found for any submodel.") # Merge orthophotos if args.merge in ['all', 'orthophoto']: if not io.dir_exists(tree.odm_orthophoto): system.mkdir_p(tree.odm_orthophoto) if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun(): all_orthos_and_cutlines = get_all_submodel_paths( tree.submodels_path, os.path.join("odm_orthophoto", "odm_orthophoto.tif"), os.path.join("odm_orthophoto", "cutline.gpkg"), ) if len(all_orthos_and_cutlines) > 1: log.ODM_INFO( "Found %s submodels with valid orthophotos and cutlines" % len(all_orthos_and_cutlines)) # TODO: histogram matching via rasterio # currently parts have different color tones merged_geotiff = os.path.join( tree.odm_orthophoto, "odm_orthophoto.merged.tif") kwargs = { 'orthophoto_merged': merged_geotiff, 'input_files': ' '.join( map(lambda i: quote(i[0]), all_orthos_and_cutlines)), 'max_memory': get_max_memory(), 'threads': args.max_concurrency, } # use bounds as cutlines (blending) if io.file_exists(merged_geotiff): os.remove(merged_geotiff) system.run('gdal_merge.py -o {orthophoto_merged} ' #'-createonly ' '-co "BIGTIFF=YES" ' '-co "BLOCKXSIZE=512" ' '-co "BLOCKYSIZE=512" ' '--config GDAL_CACHEMAX {max_memory}% ' '{input_files} '.format(**kwargs)) for ortho_cutline in all_orthos_and_cutlines: kwargs['input_file'], kwargs[ 'cutline'] = ortho_cutline # Note: cblend has a high performance penalty system.run( 'gdalwarp -cutline {cutline} ' '-cblend 20 ' '-r bilinear -multi ' '-wo NUM_THREADS={threads} ' '--config GDAL_CACHEMAX {max_memory}% ' '{input_file} {orthophoto_merged}'.format( **kwargs)) # Apply orthophoto settings (compression, tiling, etc.) orthophoto_vars = orthophoto.get_orthophoto_vars(args) if io.file_exists(tree.odm_orthophoto_tif): os.remove(tree.odm_orthophoto_tif) kwargs = { 'vars': ' '.join([ '-co %s=%s' % (k, orthophoto_vars[k]) for k in orthophoto_vars ]), 'max_memory': get_max_memory(), 'merged': merged_geotiff, 'log': tree.odm_orthophoto_tif_log, 'orthophoto': tree.odm_orthophoto_tif, } system.run( 'gdal_translate ' '{vars} ' '--config GDAL_CACHEMAX {max_memory}% ' '{merged} {orthophoto} > {log}'.format(**kwargs)) os.remove(merged_geotiff) # Crop if args.crop > 0: Cropper.crop(merged_bounds_file, tree.odm_orthophoto_tif, orthophoto_vars) # Overviews if args.build_overviews: orthophoto.build_overviews(tree.odm_orthophoto_tif) elif len(all_orthos_and_cutlines) == 1: # Simply copy log.ODM_WARNING( "A single orthophoto/cutline pair was found between all submodels." ) shutil.copyfile(all_orthos_and_cutlines[0][0], tree.odm_orthophoto_tif) else: log.ODM_WARNING( "No orthophoto/cutline pairs were found in any of the submodels. No orthophoto will be generated." ) else: log.ODM_WARNING("Found merged orthophoto in %s" % tree.odm_orthophoto_tif) self.update_progress(75) # Merge DEMs def merge_dems(dem_filename, human_name): if not io.dir_exists(tree.path('odm_dem')): system.mkdir_p(tree.path('odm_dem')) dem_file = tree.path("odm_dem", dem_filename) if not io.file_exists(dem_file) or self.rerun(): all_dems = get_submodel_paths(tree.submodels_path, "odm_dem", dem_filename) log.ODM_INFO("Merging %ss" % human_name) # Merge dem_vars = utils.get_dem_vars(args) eu_map_source = None # Default # Use DSM's euclidean map for DTMs # (requires the DSM to be computed) if human_name == "DTM": eu_map_source = "dsm" euclidean_merge_dems(all_dems, dem_file, dem_vars, euclidean_map_source=eu_map_source) if io.file_exists(dem_file): # Crop if args.crop > 0: Cropper.crop(merged_bounds_file, dem_file, dem_vars) log.ODM_INFO("Created %s" % dem_file) else: log.ODM_WARNING("Cannot merge %s, %s was not created" % (human_name, dem_file)) else: log.ODM_WARNING("Found merged %s in %s" % (human_name, dem_filename)) if args.merge in ['all', 'dem'] and args.dsm: merge_dems("dsm.tif", "DSM") if args.merge in ['all', 'dem'] and args.dtm: merge_dems("dtm.tif", "DTM") # Stop the pipeline short! We're done. self.next_stage = None else: log.ODM_INFO("Normal dataset, nothing to merge.") self.progress = 0.0
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Texturing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_texturing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_texturing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_texturing' in args.rerun_from) # Undistort radial distortion if not os.path.isdir(tree.odm_texturing_undistorted_image_path) or rerun_cell: system.run(' '.join([ 'cd {} &&'.format(tree.opensfm), 'PYTHONPATH={}:{}'.format(context.pyopencv_path, context.opensfm_path), 'python', os.path.join(context.odm_modules_src_path, 'odm_slam/src/undistort_radial.py'), '--output', tree.odm_texturing_undistorted_image_path, tree.opensfm, ])) system.run( 'PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid Bundler file in: %s' % (tree.opensfm_reconstruction)) if not io.file_exists(tree.odm_textured_model_obj) or rerun_cell: log.ODM_DEBUG('Writing ODM Textured file in: %s' % tree.odm_textured_model_obj) # odm_texturing definitions kwargs = { 'bin': context.odm_modules_path, 'out_dir': tree.odm_texturing, 'bundle': tree.opensfm_bundle, 'imgs_path': tree.odm_texturing_undistorted_image_path, 'imgs_list': tree.opensfm_bundle_list, 'model': tree.odm_mesh, 'log': tree.odm_texuring_log, 'resize': self.params.resize, 'resolution': self.params.resolution, 'size': self.params.size, 'verbose': verbose } # run texturing binary system.run('{bin}/odm_texturing -bundleFile {bundle} ' '-imagesPath {imgs_path} -imagesListPath {imgs_list} ' '-inputModelPath {model} -outputFolder {out_dir}/ ' '-textureResolution {resolution} -bundleResizedTo {resize} {verbose} ' '-textureWithSize {size} -logFile {log}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % tree.odm_textured_model_obj) if args.time: system.benchmark(start_time, tree.benchmarking, 'Texturing') log.ODM_INFO('Running ODM Texturing Cell - Finished') return ecto.OK if args.end_with != 'odm_texturing' else ecto.QUIT
def process(self, inputs, outputs): # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [f for f in io.get_files_list(in_dir) if supported_extension(f)] log.ODM_INFO('Running ODM Load Dataset Cell') # get inputs tree = self.inputs.tree args = self.inputs.args # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_INFO("Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir)] # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) if args.use_25dmesh: system.mkdir_p(tree.odm_25dgeoreferencing) log.ODM_DEBUG('Loading dataset from: %s' % images_dir) files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = [] with open(tree.dataset_list, 'w') as dataset_list: for files in path_files: photos += [make_odm_photo(self.params.force_focal, self.params.force_ccd, files)] dataset_list.write(photos[-1].filename + '\n') log.ODM_INFO('Found %s usable images' % len(photos)) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) return ecto.QUIT # append photos to cell output if not self.params.proj: if tree.odm_georeferencing_gcp: outputs.reconstruction = types.ODM_Reconstruction(photos, coords_file=tree.odm_georeferencing_gcp) else: verbose = '-verbose' if self.params.verbose else '' # Generate UTM from images # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_raw, 'imgs_list': tree.dataset_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log, 'verbose': verbose } # run UTM extraction binary extract_utm = system.run_and_return('{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} {verbose} ' '-logFile {log}'.format(**kwargs)) if extract_utm != '': log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file. Error: %s' % extract_utm) outputs.reconstruction = types.ODM_Reconstruction(photos, coords_file=tree.odm_georeferencing_coords) else: outputs.reconstruction = types.ODM_Reconstruction(photos, projstring=self.params.proj) # Save proj to file for future use with open(io.join_paths(tree.odm_georeferencing, tree.odm_georeferencing_proj), 'w') as f: f.write(outputs.reconstruction.projection.srs) log.ODM_INFO('Running ODM Load Dataset Cell - Finished') return ecto.OK if args.end_with != 'dataset' else ecto.QUIT