def no_resize(src_dir,target_dir,rerun_cell,photo): # define image paths path_file = photo.path_file new_path_file = io.join_paths(target_dir, photo.filename) # set raw image path in case we want to rerun cell if io.file_exists(new_path_file) and rerun_cell: path_file = io.join_paths(src_dir, photo.filename) if not io.file_exists(new_path_file) or rerun_cell: img = cv2.imread(path_file) io.copy(path_file, new_path_file) photo.path_file = new_path_file photo.width = img.shape[1] photo.height = img.shape[0] photo.update_focal() # log message log.ODM_DEBUG('Copied %s | dimensions: %s' % (photo.filename, img.shape)) else: # log message log.ODM_WARNING('Already copied %s | dimensions: %s x %s' % (photo.filename, photo.width, photo.height)) return photo
def create_lcd(image_dir, image_ext, ccd_width, ccd_height): ''' Create MicMac-LocalChantierDescripteur.xml :param image_dir: string path :param image_ext: string :param ccd_width: float :param ccd_height: float :return: ''' image_wildcard = '*.{}'.format(image_ext) image_files = glob.glob(io.join_paths(image_dir, image_wildcard)) image_files.sort(key=lambda f: int(filter(str.isdigit, f))) camera_model = subprocess.check_output( ['exiftool', '-Model', '-T', image_files[0]]) with open(io.join_paths(image_dir, 'MicMac-LocalChantierDescripteur.xml'), 'wb') as f: f.write('<Global>\n') f.write('\t<ChantierDescripteur>\n') f.write('\t\t<LocCamDataBase>\n') f.write('\t\t\t<CameraEntry>\n') name = '\t\t\t\t<Name> {} </Name>\n'.format(camera_model) f.write(name) ccd_size = '\t\t\t\t<SzCaptMm> {} {} </SzCaptMm>\n'.format( ccd_width, ccd_height) f.write(ccd_size) short_name = '\t\t\t\t<ShortName> {} </ShortName>\n'.format( camera_model) f.write(short_name) f.write('\t\t\t</CameraEntry>\n') f.write('\t\t</LocCamDataBase>\n') f.write('\t</ChantierDescripteur>\n') f.write('</Global>\n')
def no_resize(src_dir, target_dir, rerun_cell, photo): # define image paths path_file = photo.path_file new_path_file = io.join_paths(target_dir, photo.filename) # set raw image path in case we want to rerun cell if io.file_exists(new_path_file) and rerun_cell: path_file = io.join_paths(src_dir, photo.filename) if not io.file_exists(new_path_file) or rerun_cell: img = cv2.imread(path_file) io.copy(path_file, new_path_file) photo.path_file = new_path_file photo.width = img.shape[0] photo.height = img.shape[1] photo.update_focal() # log message log.ODM_DEBUG('Copied %s | dimensions: %s' % (photo.filename, img.shape)) else: # log message log.ODM_WARNING('Already copied %s | dimensions: %s x %s' % (photo.filename, photo.width, photo.height)) return photo
def process(self, inputs, outputs): # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [f for f in io.get_files_list(in_dir) if supported_extension(f)] log.ODM_INFO('Running ODM Load Dataset Cell') # get inputs tree = self.inputs.tree # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw resize_dir = tree.dataset_resize # Check first if a project already exists. This is a mediocre way to check, by checking the resize dir if io.dir_exists(resize_dir): log.ODM_DEBUG("resize dir: %s" % resize_dir) images_dir = resize_dir # if first time running, create project directory and copy images over to project/images else: if not io.dir_exists(images_dir): log.ODM_INFO("Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir)] log.ODM_DEBUG('Loading dataset from: %s' % images_dir) files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = Pool().map( partial(make_odm_photo, self.params.force_focal, self.params.force_ccd), path_files ) log.ODM_INFO('Found %s usable images' % len(photos)) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) return ecto.QUIT # append photos to cell output outputs.photos = photos log.ODM_INFO('Running ODM Load Dataset Cell - Finished') return ecto.OK
def process(self, args, outputs): log.ODM_INFO("args.multispectral = " + str(args.multispectral)) if (args.multispectral): tree = outputs['tree'] micasense_host_binding = os.environ['MICASENSE_HOST_BINDING'] micasense_working = os.path.abspath( '/opt/micasense/' + args.name ) # People running this docker image should have bound the micasense_host_binding directory to /opt/micasense. micain = io.join_paths(micasense_working, 'in') micaout = io.join_paths(micasense_working, 'out') micathumb = io.join_paths(micaout, 'thumbnails') odmImages = tree.input_images # This is the path that ODM expects the input images to exist in (which is our micasense output) odmMicasense = io.join_paths(tree.root_path, 'micasense') shutil.rmtree(micain, ignore_errors=True) shutil.rmtree(micaout, ignore_errors=True) system.mkdir_p(micain) system.mkdir_p(odmMicasense) try: log.ODM_INFO("Copying all files in " + odmImages) # Move all images from the odm images directory to our micasense/in directory for filePath in glob.glob(odmImages + '/*.*'): log.ODM_INFO("Moving file " + filePath + " to " + micain + ".") shutil.move(filePath, micain) # Wait for the OS to actually move the files... time.sleep(1) cmd = 'docker run --rm --mount type=bind,src=' + micasense_host_binding + ',dst=/opt/micawork -e MICASENSE_OUT=/opt/micawork/' + args.name + '/out -e MICASENSE_IN=/opt/micawork/' + args.name + '/in uasdm-micasense' log.ODM_INFO("Running command " + cmd) #subprocess.check_call(cmd, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.STDOUT) #self.run_command(cmd) subprocess.check_call(cmd, shell=True) # Move all micasense output images from micasense/out to the odmImages directory for filePath in glob.glob(micathumb + '/*.jpg'): log.ODM_INFO("Copying file " + filePath + " to " + odmImages + ".") shutil.copy(filePath, odmImages) shutil.copy(filePath, odmMicasense) finally: log.ODM_INFO("Removing directory " + micain) shutil.rmtree(micain, ignore_errors=True) log.ODM_INFO("Removing directory " + micaout) shutil.rmtree(micaout, ignore_errors=True)
def resize(src_dir, target_dir, resize_to, rerun_cell, photo): # define image paths path_file = photo.path_file new_path_file = io.join_paths(target_dir, photo.filename) # set raw image path in case we want to rerun cell if io.file_exists(new_path_file) and rerun_cell: path_file = io.join_paths(src_dir, photo.filename) if not io.file_exists(new_path_file) or rerun_cell: # open and resize image with opencv img = cv2.imread(path_file) # compute new size max_side = max(img.shape[0], img.shape[1]) if max_side <= resize_to: log.ODM_WARNING( 'Resize parameter is greater than or equal to the largest side of the image' ) ratio = float(resize_to) / float(max_side) img_r = cv2.resize(img, None, fx=ratio, fy=ratio) # write image with opencv cv2.imwrite(new_path_file, img_r) # read metadata with pyexiv2 old_meta = pyexiv2.ImageMetadata(path_file) new_meta = pyexiv2.ImageMetadata(new_path_file) old_meta.read() new_meta.read() # copy metadata old_meta.copy(new_meta) # update metadata size new_meta['Exif.Photo.PixelXDimension'] = img_r.shape[0] new_meta['Exif.Photo.PixelYDimension'] = img_r.shape[1] new_meta.write() # update photos array with new values photo.path_file = new_path_file photo.width = img_r.shape[0] photo.height = img_r.shape[1] photo.update_focal() # log message log.ODM_DEBUG('Resized %s | dimensions: %s' % (photo.filename, img_r.shape)) else: # log message log.ODM_WARNING('Already resized %s | dimensions: %s x %s' % (photo.filename, photo.width, photo.height)) return photo
def resize(src_dir, target_dir, resize_to, rerun_cell, photo): # define image paths path_file = photo.path_file new_path_file = io.join_paths(target_dir, photo.filename) # set raw image path in case we want to rerun cell if io.file_exists(new_path_file) and rerun_cell: path_file = io.join_paths(src_dir, photo.filename) if not io.file_exists(new_path_file) or rerun_cell: # open and resize image with opencv img = cv2.imread(path_file) # compute new size max_side = max(img.shape[0], img.shape[1]) if max_side <= resize_to: log.ODM_WARNING('Resize parameter is greater than or equal to the largest side of the image') ratio = float(resize_to) / float(max_side) img_r = cv2.resize(img, None, fx=ratio, fy=ratio) # write image with opencv cv2.imwrite(new_path_file, img_r) # read metadata with pyexiv2 old_meta = pyexiv2.ImageMetadata(path_file) new_meta = pyexiv2.ImageMetadata(new_path_file) old_meta.read() new_meta.read() # copy metadata old_meta.copy(new_meta) # update metadata size new_meta['Exif.Photo.PixelXDimension'] = img_r.shape[1] new_meta['Exif.Photo.PixelYDimension'] = img_r.shape[0] new_meta.write() # update photos array with new values photo.path_file = new_path_file photo.width = img_r.shape[1] photo.height = img_r.shape[0] photo.update_focal() # log message log.ODM_DEBUG('Resized %s | dimensions: %s' % (photo.filename, img_r.shape)) else: # log message log.ODM_WARNING('Already resized %s | dimensions: %s x %s' % (photo.filename, photo.width, photo.height)) return photo
def process(self, inputs, outputs): # check if the extension is sopported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions log.ODM_INFO('Running ODM Load Dataset Cell') # get inputs tree = self.inputs.tree # set images directory images_dir = tree.dataset_resize if not io.dir_exists(images_dir): images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_ERROR( "You must put your pictures into an <images> directory") return ecto.QUIT log.ODM_DEBUG('Loading dataset from: %s' % images_dir) # find files in the given directory files = io.get_files_list(images_dir) # filter images for its extension type files = [f for f in files if supported_extension(f)] if files: # create ODMPhoto list photos = [] for f in files: path_file = io.join_paths(images_dir, f) photo = types.ODM_Photo(path_file, self.params.force_focal, self.params.force_ccd) photos.append(photo) log.ODM_INFO('Found %s usable images' % len(photos)) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) return ecto.QUIT # append photos to cell output outputs.photos = photos log.ODM_INFO('Running ODM Load Dataset Cell - Finished') return ecto.OK
def process(self, inputs, outputs): # check if the extension is sopported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions log.ODM_INFO('Running ODM Load Dataset Cell') # get inputs tree = self.inputs.tree # set images directory images_dir = tree.dataset_resize if not io.dir_exists(images_dir): images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_ERROR("You must put your pictures into an <images> directory") return ecto.QUIT log.ODM_DEBUG('Loading dataset from: %s' % images_dir) # find files in the given directory files = io.get_files_list(images_dir) # filter images for its extension type files = [f for f in files if supported_extension(f)] if files: # create ODMPhoto list photos = [] for f in files: path_file = io.join_paths(images_dir, f) photo = types.ODM_Photo(path_file, self.params.force_focal, self.params.force_ccd) photos.append(photo) log.ODM_INFO('Found %s usable images' % len(photos)) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) return ecto.QUIT # append photos to cell output outputs.photos = photos log.ODM_INFO('Running ODM Load Dataset Cell - Finished') return ecto.OK
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Georeferencing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree gcpfile = io.join_paths(tree.root_path, self.params.gcp_file) # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) # in case a gcp file it's not provided, let's try to generate it using # images metadata. Internally calls jhead. if not self.params.use_gcp and \ not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('Warning: No coordinates file. ' 'Generating coordinates file in: %s' % tree.odm_georeferencing_coords) try: # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_raw, 'imgs_list': tree.opensfm_bundle_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log } # run UTM extraction binary system.run( '{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} ' '-logFile {log}'.format(**kwargs)) except Exception, e: log.ODM_ERROR( 'Could not generate GCP file from images metadata.' 'Consider rerunning with argument --odm_georeferencing-useGcp' ' and provide a proper GCP file') log.ODM_ERROR(e) return ecto.QUIT
def configuration(): args = config.config() args_dict = vars(args) args.split = 5 args.split_overlap = 10 args.rerun_all = True for k in sorted(args_dict.keys()): # Skip _is_set keys if k.endswith("_is_set"): continue # Don't leak token if k == 'sm_cluster' and args_dict[k] is not None: log.ODM_INFO('%s: True' % k) else: log.ODM_INFO('%s: %s' % (k, args_dict[k])) args.project_path = io.join_paths(args.project_path, args.name) print(args.project_path) args.project_path = '/home/j/ODM-master/dataset/images' if not io.dir_exists(args.project_path): log.ODM_WARNING('Directory %s does not exist. Creating it now.' % args.name) system.mkdir_p(os.path.abspath(args.project_path)) dataset = ODMLoadDatasetStage('dataset', args, progress=5.0, verbose=args.verbose) dataset.run() #upload images to server 2 #blocking call #run distance measuremeants #exchange images that are required by 2 and images required by 1 #opensfm in map reduce mode opensfm = ODMOpenSfMStage('opensfm', args, progress=25.0) opensfm.run()
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Georeferencing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree gcpfile = io.join_paths(tree.root_path, self.params.gcp_file) # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) # in case a gcp file it's not provided, let's try to generate it using # images metadata. Internally calls jhead. if not self.params.use_gcp and \ not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('Warning: No coordinates file. ' 'Generating coordinates file in: %s' % tree.odm_georeferencing_coords) try: # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_raw, 'imgs_list': tree.opensfm_bundle_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log } # run UTM extraction binary system.run('{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} ' '-logFile {log}'.format(**kwargs)) except Exception, e: log.ODM_ERROR('Could not generate GCP file from images metadata.' 'Consider rerunning with argument --odm_georeferencing-useGcp' ' and provide a proper GCP file') log.ODM_ERROR(e) return ecto.QUIT
def process_message(self, connector, host, secret_key, resource, parameters): starttime = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') self.logger.debug("Started computing images at %s" % str(starttime)) start_time = time.time() # We store the settings here in case they're # modified by the caller and we restore them when we're all done original_settings = self.opendrone_args original_project_path = self.opendrone_args.project_path paths = list() configfilename = "" try: for localfile in resource['local_paths']: # deal with mounted/local files if localfile.lower().endswith('.jpg'): paths.append(localfile) elif localfile.lower().endswith("extractors-opendronemap.txt"): configfilename = localfile else: # deal with downloaded files for image in resource['files']: if 'filepath' in image and image[ 'filepath'] == localfile: if image['filename'].lower().endswith('.jpg'): paths.append(image['filename']) elif image['filename'].lower().endswith( "extractors-opendronemap.txt"): configfilename = image['filename'] # Check for option overrides if configfilename != "" and os.stat(configfilename).st_size > 0: configfile = open(configfilename, "r") if configfile: newsettings = yaml.safe_load(configfile) if newsettings: # Use the merged settings for this run self.opendrone_args = self.merge_settings( self.opendrone_args, newsettings) override_settings = json.loads(parameters['parameters']) if override_settings: self.logger.debug('Overriding settings: ' + str(override_settings)) self.opendrone_args = self.merge_settings( self.opendrone_args, override_settings) # creating the folder to place the links to image files. Open Drone Maps wants all # the source image files in one folder self.opendrone_args.project_path = io.join_paths( self.opendrone_args.project_path, self.opendrone_args.name) imagesfolder = os.path.join(self.opendrone_args.project_path, "images") if not io.dir_exists(imagesfolder): self.logger.debug( 'Directory %s does not exist. Creating it now.' % imagesfolder) system.mkdir_p(os.path.abspath(imagesfolder)) self.logger.debug('[Prepare] create images folder: %s' % imagesfolder) # symlink input images files to imagesfolder for input in paths: source = os.path.join(imagesfolder, os.path.basename(input)) os.symlink(input, source) self.logger.debug("[Prepare] image symlink: %s" % source) # perform the drone processing and preserve log output in a file self.stitch(connector, resource) # Upload the logfile from the stitching operation to the dataset logfilepath = os.path.join(self.opendrone_args.project_path, self.args.logfilename) self.upload_file(self.opendrone_args.project_path, self.args.logfilename, self.args.logfilename, connector, host, secret_key, resource, False) # Upload the output files to the dataset, optionally compressing the larger files filename = self.args.orthophotoname if len( self.args.orthophotoname) > 0 else "odm_orthophoto" path = os.path.join(self.opendrone_args.project_path, filename) if not hasattr(self.opendrone_args, "noorthophoto"): self.upload_file(path, "odm_orthophoto.tif", filename + ".tif", connector, host, secret_key, resource, False) # Handle uploading two types of files from the georeferencing folder path = os.path.join(self.opendrone_args.project_path, "odm_georeferencing") filename = self.args.pointcloudname if len( self.args.pointcloudname) > 0 else "odm_georeferenced_model" if not hasattr(self.opendrone_args, "nolaz"): self.upload_file(path, "odm_georeferenced_model.laz", filename + ".laz", connector, host, secret_key, resource, False) filename = self.args.shapefilename if len( self.args.shapefilename ) > 0 else "odm_georeferenced_model.bounds" if not hasattr(self.opendrone_args, "noshp"): self.upload_file(path, "odm_georeferenced_model.bounds.shp", filename + ".shp", connector, host, secret_key, resource, False) self.upload_file(path, "odm_georeferenced_model.bounds.dbf", filename + ".dbf", connector, host, secret_key, resource, False) self.upload_file(path, "odm_georeferenced_model.bounds.prj", filename + ".prj", connector, host, secret_key, resource, False) self.upload_file(path, "odm_georeferenced_model.bounds.shx", filename + ".shx", connector, host, secret_key, resource, False) self.upload_file(path, "proj.txt", filename + ".proj.txt", connector, host, secret_key, resource, False) self.upload_file(path, "odm_georeferenced_model.bounds.geojson", filename + ".geojson", connector, host, secret_key, resource, False) self.upload_file(path, "odm_georeferenced_model.boundary.json", filename + ".json", connector, host, secret_key, resource, False) endtime = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') self.logger.debug("[Finish] complete computing images at %s" % str(endtime)) self.logger.debug("Elapse time: " + str((time.time() - start_time) / 60) + " minutes") except: self.logger.exception("Could not stich image.") finally: # Restore any settings that might have changed self.opendrone_args = original_settings try: # Clean up the working environment by removing links and created folders self.logger.debug("[Cleanup] remove computing folder: %s" % self.opendrone_args.project_path) for path in paths: inputfile = os.path.basename(path) odmfile = os.path.join("/tmp", inputfile + ".jpg") if os.path.isfile(odmfile): self.logger.debug("[Cleanup] remove odm .jpg: %s" % odmfile) os.remove(odmfile) shutil.rmtree(self.opendrone_args.project_path) except OSError: pass finally: self.opendrone_args.project_path = original_project_path
from opendm import system from opendm import io import ecto import os from scripts.odm_app import ODMApp if __name__ == '__main__': args = config.config() log.ODM_INFO('Initializing OpenDroneMap app - %s' % system.now()) # Add project dir if doesn't exist args.project_path = io.join_paths(args.project_path, args.name) if not io.dir_exists(args.project_path): log.ODM_WARNING('Directory %s does not exist. Creating it now.' % args.name) system.mkdir_p(os.path.abspath(args.project_path)) # If user asks to rerun everything, delete all of the existing progress directories. # TODO: Move this somewhere it's not hard-coded if args.rerun_all: log.ODM_DEBUG("Rerun all -- Removing old data") os.system("rm -rf " + args.project_path + "/images_resize " + args.project_path + "/odm_georeferencing " + args.project_path + "/odm_meshing " + args.project_path + "/odm_orthophoto " + args.project_path + "/odm_texturing " + args.project_path + "/opensfm " + args.project_path + "/mve")
def setup(self, args, images_path, photos, gcp_path=None, append_config = [], rerun=False): """ Setup a OpenSfM project """ if rerun and io.dir_exists(self.opensfm_project_path): shutil.rmtree(self.opensfm_project_path) if not io.dir_exists(self.opensfm_project_path): system.mkdir_p(self.opensfm_project_path) list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt') if not io.file_exists(list_path) or rerun: # create file list has_alt = True with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False fout.write('%s\n' % io.join_paths(images_path, photo.filename)) # check for image_groups.txt (split-merge) image_groups_file = os.path.join(args.project_path, "image_groups.txt") if io.file_exists(image_groups_file): log.ODM_INFO("Copied image_groups.txt to OpenSfM directory") io.copy(image_groups_file, os.path.join(self.opensfm_project_path, "image_groups.txt")) # check for cameras if args.cameras: try: camera_overrides = camera.get_opensfm_camera_models(args.cameras) with open(os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f: f.write(json.dumps(camera_overrides)) log.ODM_INFO("Wrote camera_models_overrides.json to OpenSfM directory") except Exception as e: log.ODM_WARNING("Cannot set camera_models_overrides.json: %s" % str(e)) # create config file for OpenSfM config = [ "use_exif_size: no", "feature_process_size: %s" % args.resize_to, "feature_min_frames: %s" % args.min_num_features, "processes: %s" % args.max_concurrency, "matching_gps_neighbors: %s" % args.matcher_neighbors, "matching_gps_distance: %s" % args.matcher_distance, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params or args.cameras else 'yes'), "undistorted_image_format: png", # mvs-texturing exhibits artifacts with JPG "bundle_outlier_filtering_type: AUTO", ] # TODO: add BOW matching when dataset is not georeferenced (no gps) if has_alt: log.ODM_INFO("Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") if has_alt or gcp_path: config.append("align_method: naive") else: config.append("align_method: orientation_prior") config.append("align_orientation_prior: vertical") if args.use_hybrid_bundle_adjustment: log.ODM_INFO("Enabling hybrid bundle adjustment") config.append("bundle_interval: 100") # Bundle after adding 'bundle_interval' cameras config.append("bundle_new_points_ratio: 1.2") # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append("local_bundle_radius: 1") # Max image graph distance for images to be included in local bundle adjustment if gcp_path: config.append("bundle_use_gcp: yes") config.append("bundle_use_gps: no") io.copy(gcp_path, self.path("gcp_list.txt")) config = config + append_config # write config file log.ODM_INFO(config) config_filename = self.get_config_file_path() with open(config_filename, 'w') as fout: fout.write("\n".join(config)) else: log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running MVS Texturing Cell') # get inputs args = inputs.args tree = inputs.tree reconstruction = inputs.reconstruction # define paths and create working directories system.mkdir_p(tree.odm_texturing) if not args.use_3dmesh: system.mkdir_p(tree.odm_25dtexturing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'mvs_texturing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'mvs_texturing' in args.rerun_from) runs = [{ 'out_dir': tree.odm_texturing, 'model': tree.odm_mesh, 'nadir': False }] if args.skip_3dmodel: runs = [] if not args.use_3dmesh: runs += [{ 'out_dir': tree.odm_25dtexturing, 'model': tree.odm_25dmesh, 'nadir': True }] for r in runs: odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj) if not io.file_exists(odm_textured_model_obj) or rerun_cell: log.ODM_DEBUG('Writing MVS Textured file in: %s' % odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGeometricVisibilityTest = "" skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" skipHoleFilling = "" keepUnseenFaces = "" nadir = "" if (self.params.skip_vis_test): skipGeometricVisibilityTest = "--skip_geometric_visibility_test" if (self.params.skip_glob_seam_leveling): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.skip_loc_seam_leveling): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.skip_hole_fill): skipHoleFilling = "--skip_hole_filling" if (self.params.keep_unseen_faces): keepUnseenFaces = "--keep_unseen_faces" if (r['nadir']): nadir = '--nadir_mode' # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': io.join_paths(r['out_dir'], "odm_textured_model"), 'model': r['model'], 'dataTerm': self.params.data_term, 'outlierRemovalType': self.params.outlier_rem_type, 'skipGeometricVisibilityTest': skipGeometricVisibilityTest, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'skipHoleFilling': skipHoleFilling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': self.params.tone_mapping, 'nadirMode': nadir, 'nadirWeight': 2 ** args.texturing_nadir_weight - 1, 'nvm_file': io.join_paths(tree.opensfm, "reconstruction.nvm") } # Make sure tmp directory is empty mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp') if io.dir_exists(mvs_tmp_dir): log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir)) shutil.rmtree(mvs_tmp_dir) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{skipGeometricVisibilityTest} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{skipHoleFilling} ' '{keepUnseenFaces} ' '{nadirMode} ' '-n {nadirWeight}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % odm_textured_model_obj) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'Texturing') log.ODM_INFO('Running ODM Texturing Cell - Finished') return ecto.OK if args.end_with != 'mvs_texturing' else ecto.QUIT
f.write('\t\t</LocCamDataBase>\n') f.write('\t</ChantierDescripteur>\n') f.write('</Global>\n') # RUN if __name__ == '__main__': args = config.config() log.MM_INFO('Initializing NodeMICMAC app - %s' % system.now()) log.MM_INFO(args) progressbc.set_project_name(args.name) project_dir = io.join_paths(args.project_path, args.name) image_dir = io.join_paths(project_dir, 'images') IN_DOCKER = os.environ.get('DEBIAN_FRONTEND', False) if IN_DOCKER: mm3d = 'mm3d' else: mm3d = '/home/drnmppr-micmac/bin/mm3d' # for dev: locally installed micmac branch try: log.MM_INFO('Starting..') os.chdir(image_dir) # create output directories (match ODM conventions for backward compatibility, even though this is MicMac) odm_dirs = [
def setup(self, args, images_path, photos, reconstruction, append_config=[], rerun=False): """ Setup a OpenSfM project """ if rerun and io.dir_exists(self.opensfm_project_path): shutil.rmtree(self.opensfm_project_path) if not io.dir_exists(self.opensfm_project_path): system.mkdir_p(self.opensfm_project_path) list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt') if not io.file_exists(list_path) or rerun: # create file list has_alt = True has_gps = False with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False if photo.latitude is not None and photo.longitude is not None: has_gps = True fout.write('%s\n' % io.join_paths(images_path, photo.filename)) # check for image_groups.txt (split-merge) image_groups_file = os.path.join(args.project_path, "image_groups.txt") if io.file_exists(image_groups_file): log.ODM_INFO("Copied image_groups.txt to OpenSfM directory") io.copy( image_groups_file, os.path.join(self.opensfm_project_path, "image_groups.txt")) # check for cameras if args.cameras: try: camera_overrides = camera.get_opensfm_camera_models( args.cameras) with open( os.path.join(self.opensfm_project_path, "camera_models_overrides.json"), 'w') as f: f.write(json.dumps(camera_overrides)) log.ODM_INFO( "Wrote camera_models_overrides.json to OpenSfM directory" ) except Exception as e: log.ODM_WARNING( "Cannot set camera_models_overrides.json: %s" % str(e)) use_bow = False feature_type = "SIFT" matcher_neighbors = args.matcher_neighbors if matcher_neighbors != 0 and reconstruction.multi_camera is not None: matcher_neighbors *= len(reconstruction.multi_camera) log.ODM_INFO( "Increasing matcher neighbors to %s to accomodate multi-camera setup" % matcher_neighbors) log.ODM_INFO("Multi-camera setup, using BOW matching") use_bow = True # GPSDOP override if we have GPS accuracy information (such as RTK) override_gps_dop = 'gps_accuracy_is_set' in args for p in photos: if p.get_gps_dop() is not None: override_gps_dop = True break if override_gps_dop: if 'gps_accuracy_is_set' in args: log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy) else: log.ODM_INFO( "Looks like we have RTK accuracy info for some photos. Good! We'll use it." ) exif_overrides = {} for p in photos: dop = args.gps_accuracy if 'gps_accuracy_is_set' in args else p.get_gps_dop( ) if dop is not None and p.latitude is not None and p.longitude is not None: exif_overrides[p.filename] = { 'gps': { 'latitude': p.latitude, 'longitude': p.longitude, 'altitude': p.altitude if p.altitude is not None else 0, 'dop': dop, } } with open( os.path.join(self.opensfm_project_path, "exif_overrides.json"), 'w') as f: f.write(json.dumps(exif_overrides)) # create config file for OpenSfM config = [ "use_exif_size: no", "flann_algorithm: KDTREE", # more stable, faster than KMEANS "feature_process_size: %s" % args.resize_to, "feature_min_frames: %s" % args.min_num_features, "processes: %s" % args.max_concurrency, "matching_gps_neighbors: %s" % matcher_neighbors, "matching_gps_distance: %s" % args.matcher_distance, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params or args.cameras else 'yes'), "undistorted_image_format: tif", "bundle_outlier_filtering_type: AUTO", "align_orientation_prior: vertical", "triangulation_type: ROBUST", "bundle_common_position_constraints: %s" % ('no' if reconstruction.multi_camera is None else 'yes'), ] if args.camera_lens != 'auto': config.append("camera_projection_type: %s" % args.camera_lens.upper()) if not has_gps: log.ODM_INFO("No GPS information, using BOW matching") use_bow = True feature_type = args.feature_type.upper() if use_bow: config.append("matcher_type: WORDS") # Cannot use SIFT with BOW if feature_type == "SIFT": log.ODM_WARNING( "Using BOW matching, will use HAHOG feature type, not SIFT" ) feature_type = "HAHOG" config.append("feature_type: %s" % feature_type) if has_alt: log.ODM_INFO( "Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") gcp_path = reconstruction.gcp.gcp_path if has_alt or gcp_path: config.append("align_method: auto") else: config.append("align_method: orientation_prior") if args.use_hybrid_bundle_adjustment: log.ODM_INFO("Enabling hybrid bundle adjustment") config.append( "bundle_interval: 100" ) # Bundle after adding 'bundle_interval' cameras config.append( "bundle_new_points_ratio: 1.2" ) # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append( "local_bundle_radius: 1" ) # Max image graph distance for images to be included in local bundle adjustment else: config.append("local_bundle_radius: 0") if gcp_path: config.append("bundle_use_gcp: yes") if not args.force_gps: config.append("bundle_use_gps: no") io.copy(gcp_path, self.path("gcp_list.txt")) config = config + append_config # write config file log.ODM_INFO(config) config_filename = self.get_config_file_path() with open(config_filename, 'w') as fout: fout.write("\n".join(config)) else: log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
from opendm import system from opendm import io import ecto import os from scripts.odm_app import ODMApp if __name__ == '__main__': args = config.config() log.ODM_INFO('Initializing OpenDroneMap app - %s' % system.now()) # Add project dir if doesn't exist args.project_path = io.join_paths(args.project_path, args.name) if not io.dir_exists(args.project_path): log.ODM_WARNING('Directory %s does not exist. Creating it now.' % args.name) system.mkdir_p(os.path.abspath(args.project_path)) # If user asks to rerun everything, delete all of the existing progress directories. # TODO: Move this somewhere it's not hard-coded if args.rerun_all: os.system("rm -rf " + args.project_path + "images_resize/ " + args.project_path + "odm_georeferencing/ " + args.project_path + "odm_meshing/ " + args.project_path + "odm_orthophoto/ " + args.project_path + "odm_texturing/ " + args.project_path + "opensfm/ " + args.project_path + "pmvs/")
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running MVS Texturing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree # define paths and create working directories system.mkdir_p(tree.odm_texturing) if args.use_25dmesh: system.mkdir_p(tree.odm_25dtexturing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'mvs_texturing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'mvs_texturing' in args.rerun_from) runs = [{ 'out_dir': tree.odm_texturing, 'model': tree.odm_mesh, 'force_skip_vis_test': False }] if args.fast_orthophoto: runs = [] if args.use_25dmesh: runs += [{ 'out_dir': tree.odm_25dtexturing, 'model': tree.odm_25dmesh, # We always skip the visibility test when using the 2.5D mesh # because many faces end up being narrow, and almost perpendicular # to the ground plane. The visibility test improperly classifies # them as "not seen" since the test is done on a single triangle vertex, # and while one vertex might be occluded, the other two might not. 'force_skip_vis_test': True }] for r in runs: odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj) if not io.file_exists(odm_textured_model_obj) or rerun_cell: log.ODM_DEBUG('Writing MVS Textured file in: %s' % odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGeometricVisibilityTest = "" skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" skipHoleFilling = "" keepUnseenFaces = "" if (self.params.skip_vis_test or r['force_skip_vis_test']): skipGeometricVisibilityTest = "--skip_geometric_visibility_test" if (self.params.skip_glob_seam_leveling): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.skip_loc_seam_leveling): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.skip_hole_fill): skipHoleFilling = "--skip_hole_filling" if (self.params.keep_unseen_faces): keepUnseenFaces = "--keep_unseen_faces" # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': io.join_paths(r['out_dir'], "odm_textured_model"), 'pmvs_folder': tree.pmvs_rec_path, 'nvm_file': io.join_paths(tree.pmvs_rec_path, "nvmCams.nvm"), 'model': r['model'], 'dataTerm': self.params.data_term, 'outlierRemovalType': self.params.outlier_rem_type, 'skipGeometricVisibilityTest': skipGeometricVisibilityTest, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'skipHoleFilling': skipHoleFilling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': self.params.tone_mapping } if not args.use_pmvs: kwargs['nvm_file'] = io.join_paths(tree.opensfm, "reconstruction.nvm") else: log.ODM_DEBUG('Generating .nvm file from pmvs output: %s' % '{nvm_file}'.format(**kwargs)) # Create .nvm camera file. pmvs2nvmcams.run('{pmvs_folder}'.format(**kwargs), '{nvm_file}'.format(**kwargs)) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{skipGeometricVisibilityTest} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{skipHoleFilling} ' '{keepUnseenFaces}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % odm_textured_model_obj) if args.time: system.benchmark(start_time, tree.benchmarking, 'Texturing') log.ODM_INFO('Running ODM Texturing Cell - Finished') return ecto.OK if args.end_with != 'mvs_texturing' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Resize Cell') # get inputs args = self.inputs.args tree = self.inputs.tree photos = self.inputs.photos if not photos: log.ODM_ERROR('Not enough photos in photos to resize') return ecto.QUIT if self.params.resize_to <= 0: log.ODM_ERROR('Resize parameter must be greater than 0') return ecto.QUIT # create working directory system.mkdir_p(tree.dataset_resize) log.ODM_DEBUG('Resizing dataset to: %s' % tree.dataset_resize) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'resize') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'resize' in args.rerun_from) # loop over photos for photo in photos: # define image paths path_file = photo.path_file new_path_file = io.join_paths(tree.dataset_resize, photo.filename) # set raw image path in case we want to rerun cell if io.file_exists(new_path_file) and rerun_cell: path_file = io.join_paths(tree.dataset_raw, photo.filename) if not io.file_exists(new_path_file) or rerun_cell: # open and resize image with opencv img = cv2.imread(path_file) # compute new size max_side = max(img.shape[0], img.shape[1]) if max_side <= self.params.resize_to: log.ODM_WARNING( 'Resize Parameter is greater than the largest side of the image' ) ratio = float(self.params.resize_to) / float(max_side) img_r = cv2.resize(img, None, fx=ratio, fy=ratio) # write image with opencv cv2.imwrite(new_path_file, img_r) # read metadata with pyexiv2 old_meta = pyexiv2.ImageMetadata(path_file) new_meta = pyexiv2.ImageMetadata(new_path_file) old_meta.read() new_meta.read() # copy metadata old_meta.copy(new_meta) # update metadata size new_meta['Exif.Photo.PixelXDimension'] = img_r.shape[0] new_meta['Exif.Photo.PixelYDimension'] = img_r.shape[1] new_meta.write() # update photos array with new values photo.path_file = new_path_file photo.width = img_r.shape[0] photo.height = img_r.shape[1] photo.update_focal() # log message log.ODM_DEBUG('Resized %s | dimensions: %s' % (photo.filename, img_r.shape)) else: # log message log.ODM_WARNING('Already resized %s | dimensions: %s x %s' % (photo.filename, photo.width, photo.height)) log.ODM_INFO('Resized %s images' % len(photos)) # append photos to cell output self.outputs.photos = photos if args.time: system.benchmark(start_time, tree.benchmarking, 'Resizing') log.ODM_INFO('Running ODM Resize Cell - Finished') return ecto.OK if args.end_with != 'resize' else ecto.QUIT
import numpy as np import json import pyproj if __name__ == "__main__": parser = argparse.ArgumentParser(description='Align metadaset submodels') parser.add_argument('dataset', help='path to the dataset to be processed') parser.add_argument('--overwrite', '-o', action='store_true', default=False, help='Force overwrite of generated files') args = parser.parse_args() submodels_path = io.join_paths(args.dataset, 'submodels') sfm_path = io.join_paths(args.dataset, 'opensfm') meta_data = metadataset.MetaDataSet(sfm_path) data = metadataset.DataSet(sfm_path) voronoi_file = io.join_paths(meta_data.data_path, 'voronoi.geojson') proj_path = io.join_paths(args.dataset, "odm_georeferencing/proj.txt") out_tif = io.join_paths(args.dataset, "merged.tif") addo_log = io.join_paths(args.dataset, "gdal_addo.log") bounds_files = {} for folder in os.listdir(io.join_paths(args.dataset, 'submodels')): if 'submodel' in folder: folder_number = '0' if folder.split('_')[1] == '0000' else folder.split('_')[1].lstrip('0') bounds_file = io.join_paths(submodels_path, folder + "/odm_georeferencing/odm_georeferenced_model.bounds.geojson") if io.file_exists(bounds_file):
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = self.inputs.tree args = self.inputs.args photos = self.inputs.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) system.mkdir_p(tree.pmvs) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) # check if reconstruction was done before if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') with open(list_path, 'w') as fout: for photo in photos: fout.write('%s\n' % photo.path_file) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors ] if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) # write config file config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction system.run('PYTHONPATH=%s %s/bin/run_all %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run('PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to pmvs before if not io.file_exists(tree.pmvs_visdat) or rerun_cell: # run PMVS converter system.run('PYTHONPATH=%s %s/bin/export_pmvs %s --output %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, tree.pmvs)) else: log.ODM_WARNING('Found a valid CMVS file in: %s' % tree.pmvs_visdat) if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
def process(self, inputs, outputs): # find a file in the root directory def find(file, dir): for root, dirs, files in os.walk(dir): return '/'.join((root, file)) if file in files else None # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Georeferencing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree gcpfile = io.join_paths(tree.root_path, self.params.gcp_file) \ if self.params.gcp_file else find('gcp_list.txt', tree.root_path) geocreated = True verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) # in case a gcp file it's not provided, let's try to generate it using # images metadata. Internally calls jhead. log.ODM_DEBUG(self.params.gcp_file) if not self.params.gcp_file: # and \ # not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('No coordinates file. ' 'Generating coordinates file: %s' % tree.odm_georeferencing_coords) # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log, 'verbose': verbose } # run UTM extraction binary extract_utm = system.run_and_return('{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} {verbose} ' '-logFile {log}'.format(**kwargs)) if extract_utm != '': log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file. Error: %s' % extract_utm) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_georeferencing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_georeferencing' in args.rerun_from) if not io.file_exists(tree.odm_georeferencing_model_obj_geo) or \ not io.file_exists(tree.odm_georeferencing_model_ply_geo) or rerun_cell: # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'bundle': tree.opensfm_bundle, 'imgs': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'model': tree.odm_textured_model_obj, 'log': tree.odm_georeferencing_log, 'coords': tree.odm_georeferencing_coords, 'pc_geo': tree.odm_georeferencing_model_ply_geo, 'geo_sys': tree.odm_georeferencing_model_txt_geo, 'model_geo': tree.odm_georeferencing_model_obj_geo, 'size': self.params.img_size, 'gcp': gcpfile, 'verbose': verbose } if args.use_opensfm_pointcloud: kwargs['pc'] = tree.opensfm_model else: kwargs['pc'] = tree.pmvs_model # Check to see if the GCP file exists if not self.params.use_exif and (self.params.gcp_file or find('gcp_list.txt', tree.root_path)): log.ODM_INFO('Found %s' % gcpfile) try: system.run('{bin}/odm_georef -bundleFile {bundle} -imagesPath {imgs} -imagesListPath {imgs_list} ' '-bundleResizedTo {size} -inputFile {model} -outputFile {model_geo} ' '-inputPointCloudFile {pc} -outputPointCloudFile {pc_geo} {verbose} ' '-logFile {log} -georefFileOutputPath {geo_sys} -gcpFile {gcp} ' '-outputCoordFile {coords}'.format(**kwargs)) except Exception: log.ODM_EXCEPTION('Georeferencing failed. ') return ecto.QUIT elif io.file_exists(tree.odm_georeferencing_coords): log.ODM_INFO('Running georeferencing with generated coords file.') system.run('{bin}/odm_georef -bundleFile {bundle} -inputCoordFile {coords} ' '-inputFile {model} -outputFile {model_geo} ' '-inputPointCloudFile {pc} -outputPointCloudFile {pc_geo} {verbose} ' '-logFile {log} -georefFileOutputPath {geo_sys}'.format(**kwargs)) else: log.ODM_WARNING('Georeferencing failed. Make sure your ' 'photos have geotags in the EXIF or you have ' 'provided a GCP file. ') geocreated = False # skip the rest of the georeferencing if geocreated: # update images metadata geo_ref = types.ODM_GeoRef() geo_ref.parse_coordinate_system(tree.odm_georeferencing_coords) for idx, photo in enumerate(self.inputs.photos): geo_ref.utm_to_latlon(tree.odm_georeferencing_latlon, photo, idx) # convert ply model to LAS reference system geo_ref.convert_to_las(tree.odm_georeferencing_model_ply_geo, tree.odm_georeferencing_pdal) # XYZ point cloud output log.ODM_INFO("Creating geo-referenced CSV file (XYZ format, can be used with GRASS to create DEM)") with open(tree.odm_georeferencing_xyz_file, "wb") as csvfile: csvfile_writer = csv.writer(csvfile, delimiter=",") reachedpoints = False with open(tree.odm_georeferencing_model_ply_geo) as f: for lineNumber, line in enumerate(f): if reachedpoints: tokens = line.split(" ") csv_line = [float(tokens[0])+geo_ref.utm_east_offset, float(tokens[1])+geo_ref.utm_north_offset, tokens[2]] csvfile_writer.writerow(csv_line) if line.startswith("end_header"): reachedpoints = True csvfile.close() else: log.ODM_WARNING('Found a valid georeferenced model in: %s' % tree.odm_georeferencing_model_ply_geo) if args.time: system.benchmark(start_time, tree.benchmarking, 'Georeferencing') log.ODM_INFO('Running ODM Georeferencing Cell - Finished') return ecto.OK if args.end_with != 'odm_georeferencing' else ecto.QUIT
def process(self, args, outputs): # get inputs tree = outputs['tree'] reconstruction = outputs['reconstruction'] photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start MVE') exit(1) # check if reconstruction was done before if not io.file_exists(tree.mve_model) or self.rerun(): # cleanup if a rerun if io.dir_exists(tree.mve_path) and self.rerun(): shutil.rmtree(tree.mve_path) # make bundle directory if not io.file_exists(tree.mve_bundle): system.mkdir_p(tree.mve_path) system.mkdir_p(io.join_paths(tree.mve_path, 'bundle')) octx = OSFMContext(tree.opensfm) octx.save_absolute_image_list_to(tree.mve_image_list) io.copy(tree.opensfm_bundle, tree.mve_bundle) # mve makescene wants the output directory # to not exists before executing it (otherwise it # will prompt the user for confirmation) if io.dir_exists(tree.mve): shutil.rmtree(tree.mve) # run mve makescene if not io.dir_exists(tree.mve_views): system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) self.update_progress(10) # Compute mve output scale based on depthmap_resolution max_width = 0 max_height = 0 for photo in photos: max_width = max(photo.width, max_width) max_height = max(photo.height, max_height) max_pixels = args.depthmap_resolution * args.depthmap_resolution if max_width * max_height <= max_pixels: mve_output_scale = 0 else: ratio = float(max_width * max_height) / float(max_pixels) mve_output_scale = int( math.ceil(math.log(ratio) / math.log(4.0))) dmrecon_config = [ "-s%s" % mve_output_scale, "--progress=silent", "--local-neighbors=2", ] # Run MVE's dmrecon log.ODM_INFO( ' ' ) log.ODM_INFO( ' ,*/** ' ) log.ODM_INFO( ' ,*@%*/@%* ' ) log.ODM_INFO( ' ,/@%******@&*. ' ) log.ODM_INFO( ' ,*@&*********/@&* ' ) log.ODM_INFO( ' ,*@&**************@&* ' ) log.ODM_INFO( ' ,/@&******************@&*. ' ) log.ODM_INFO( ' ,*@&*********************/@&* ' ) log.ODM_INFO( ' ,*@&**************************@&*. ' ) log.ODM_INFO( ' ,/@&******************************&&*, ' ) log.ODM_INFO( ' ,*&&**********************************@&*. ' ) log.ODM_INFO( ' ,*@&**************************************@&*. ' ) log.ODM_INFO( ' ,*@&***************#@@@@@@@@@%****************&&*, ' ) log.ODM_INFO( ' .*&&***************&@@@@@@@@@@@@@@****************@@*. ' ) log.ODM_INFO( ' .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*. ' ) log.ODM_INFO( ' .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*, ' ) log.ODM_INFO( ' .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*. ' ) log.ODM_INFO( ' .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*. ' ) log.ODM_INFO( ' .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*, ' ) log.ODM_INFO( ' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(, ' ) log.ODM_INFO( ' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&* ' ) log.ODM_INFO( ' *#@/*******************************@@@@@***&@&**********************&@*, ' ) log.ODM_INFO( ' *#@#******************************&@@@***@#*********************&@*, ' ) log.ODM_INFO( ' */@#*****************************@@@************************@@*. ' ) log.ODM_INFO( ' *#@/***************************/@@/*********************%@*, ' ) log.ODM_INFO( ' *#@#**************************#@@%******************%@*, ' ) log.ODM_INFO( ' */@#*************************(@@@@@@@&%/********&@*. ' ) log.ODM_INFO( ' *(@(*********************************/%@@%**%@*, ' ) log.ODM_INFO( ' *(@%************************************%@** ' ) log.ODM_INFO( ' **@%********************************&@*, ' ) log.ODM_INFO( ' *(@(****************************%@/* ' ) log.ODM_INFO( ' ,(@%************************#@/* ' ) log.ODM_INFO( ' ,*@%********************&@/, ' ) log.ODM_INFO( ' */@#****************#@/* ' ) log.ODM_INFO( ' ,/@&************#@/* ' ) log.ODM_INFO( ' ,*@&********%@/, ' ) log.ODM_INFO( ' */@#****(@/* ' ) log.ODM_INFO( ' ,/@@@@(* ' ) log.ODM_INFO( ' .**, ' ) log.ODM_INFO('') log.ODM_INFO( "Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung." ) log.ODM_INFO(" Process is running") # TODO: find out why MVE is crashing at random # MVE *seems* to have a race condition, triggered randomly, regardless of dataset # https://gist.github.com/pierotofy/6c9ce93194ba510b61e42e3698cfbb89 # Temporary workaround is to retry the reconstruction until we get it right # (up to a certain number of retries). retry_count = 1 while retry_count < 10: try: system.run( '%s %s %s' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) break except Exception as e: if str(e) == "Child returned 134" or str( e) == "Child returned 1": retry_count += 1 log.ODM_WARNING( "Caught error code, retrying attempt #%s" % retry_count) else: raise e self.update_progress(90) scene2pset_config = ["-F%s" % mve_output_scale] # run scene2pset system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) else: log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' % tree.mve_model)
def process(args, current_path, max_concurrency, reconstruction): #args = vars(args) orthophoto_cutline = True odm_orthophoto = io.join_paths(current_path, 'orthophoto') odm_orthophoto_path = odm_orthophoto odm_orthophoto_render = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_render.tif') odm_orthophoto_tif = io.join_paths(odm_orthophoto_path, 'odm_orthophoto.tif') odm_orthophoto_corners = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_corners.tif') odm_orthophoto_log = io.join_paths(odm_orthophoto_path, 'odm_orthophoto_log.tif') odm_orthophoto_tif_log = io.join_paths(odm_orthophoto_path, 'gdal_translate_log.txt') odm_25dgeoreferencing = io.join_paths(current_path, 'odm_georeferencing') odm_georeferencing = io.join_paths(current_path, 'odm_georeferencing') odm_georeferencing_coords = io.join_paths(odm_georeferencing, 'coords.txt') odm_georeferencing_gcp = io.find('gcp_list.txt', current_path) odm_georeferencing_gcp_utm = io.join_paths(odm_georeferencing, 'gcp_list_utm.txt') odm_georeferencing_utm_log = io.join_paths( odm_georeferencing, 'odm_georeferencing_utm_log.txt') odm_georeferencing_log = 'odm_georeferencing_log.txt' odm_georeferencing_transform_file = 'odm_georeferencing_transform.txt' odm_georeferencing_proj = 'proj.txt' odm_georeferencing_model_txt_geo = 'odm_georeferencing_model_geo.txt' odm_georeferencing_model_obj_geo = 'odm_textured_model_geo.obj' odm_georeferencing_xyz_file = io.join_paths(odm_georeferencing, 'odm_georeferenced_model.csv') odm_georeferencing_las_json = io.join_paths(odm_georeferencing, 'las.json') odm_georeferencing_model_laz = io.join_paths( odm_georeferencing, 'odm_georeferenced_model.laz') odm_georeferencing_model_las = io.join_paths( odm_georeferencing, 'odm_georeferenced_model.las') odm_georeferencing_dem = io.join_paths(odm_georeferencing, 'odm_georeferencing_model_dem.tif') opensfm_reconstruction = io.join_paths(current_path, 'reconstruction.json') odm_texturing = io.join_paths(current_path, 'mvs') odm_textured_model_obj = io.join_paths(odm_texturing, 'odm_textured_model.obj') images_dir = io.join_paths(current_path, 'images') reconstruction = reconstruction verbose = '' #"-verbose" # define paths and create working directories system.mkdir_p(odm_orthophoto) if not io.file_exists(odm_orthophoto_tif): gsd_error_estimate = 0.1 ignore_resolution = False if not reconstruction.is_georeferenced(): # Match DEMs gsd_error_estimate = -3 ignore_resolution = True orthophoto_resolution = 5 resolution = 1.0 / ( gsd.cap_resolution(orthophoto_resolution, opensfm_reconstruction, gsd_error_estimate=gsd_error_estimate, ignore_gsd=True, ignore_resolution=ignore_resolution, has_gcp=reconstruction.has_gcp()) / 100.0) # odm_orthophoto definitions kwargs = { 'bin': context.odm_modules_path, 'log': odm_orthophoto_log, 'ortho': odm_orthophoto_render, 'corners': odm_orthophoto_corners, 'res': resolution, 'bands': '', 'verbose': verbose } # Check if the georef object is initialized # (during a --rerun this might not be) # TODO: this should be moved to a more central location? if reconstruction.is_georeferenced( ) and not reconstruction.georef.valid_utm_offsets(): georeferencing_dir = odm_georeferencing #if args.use_3dmesh and not args.skip_3dmodel else odm_25dgeoreferencing odm_georeferencing_model_txt_geo_file = os.path.join( georeferencing_dir, odm_georeferencing_model_txt_geo) if io.file_exists(odm_georeferencing_model_txt_geo_file): reconstruction.georef.extract_offsets( odm_georeferencing_model_txt_geo_file) else: log.ODM_WARNING('Cannot read UTM offset from {}.'.format( odm_georeferencing_model_txt_geo_file)) models = [] base_dir = odm_texturing if reconstruction.is_georeferenced(): model_file = odm_georeferencing_model_obj_geo else: model_file = odm_textured_model_obj if reconstruction.multi_camera: for band in reconstruction.multi_camera: primary = band == reconstruction.multi_camera[0] subdir = "" if not primary: subdir = band['name'].lower() models.append(os.path.join(base_dir, subdir, model_file)) kwargs['bands'] = '-bands %s' % (','.join([ quote(b['name'].lower()) for b in reconstruction.multi_camera ])) else: models.append(os.path.join(base_dir, model_file)) kwargs['models'] = ','.join(map(quote, models)) # run odm_orthophoto system.run( '{bin}/odm_orthophoto -inputFiles {models} ' '-logFile {log} -outputFile {ortho} -resolution {res} {verbose} ' '-outputCornerFile {corners} {bands}'.format(**kwargs)) # Create georeferenced GeoTiff geotiffcreated = False if reconstruction.is_georeferenced( ) and reconstruction.georef.valid_utm_offsets(): ulx = uly = lrx = lry = 0.0 with open(odm_orthophoto_corners) as f: for lineNumber, line in enumerate(f): if lineNumber == 0: tokens = line.split(' ') if len(tokens) == 4: ulx = float(tokens[0]) + \ float(reconstruction.georef.utm_east_offset) lry = float(tokens[1]) + \ float(reconstruction.georef.utm_north_offset) lrx = float(tokens[2]) + \ float(reconstruction.georef.utm_east_offset) uly = float(tokens[3]) + \ float(reconstruction.georef.utm_north_offset) log.ODM_INFO('Creating GeoTIFF') orthophoto_vars = orthophoto.get_orthophoto_vars(args) kwargs = { 'ulx': ulx, 'uly': uly, 'lrx': lrx, 'lry': lry, 'vars': ' '.join([ '-co %s=%s' % (k, orthophoto_vars[k]) for k in orthophoto_vars ]), 'proj': reconstruction.georef.proj4(), 'input': odm_orthophoto_render, 'output': odm_orthophoto_tif, 'log': odm_orthophoto_tif_log, 'max_memory': get_max_memory(), } system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} ' '{vars} ' '-a_srs \"{proj}\" ' '--config GDAL_CACHEMAX {max_memory}% ' '--config GDAL_TIFF_INTERNAL_MASK YES ' '{input} {output} > {log}'.format(**kwargs)) bounds_file_path = os.path.join( odm_georeferencing, 'odm_georeferenced_model.bounds.gpkg') # Cutline computation, before cropping # We want to use the full orthophoto, not the cropped one. pio = True if pio: cutline_file = os.path.join(odm_orthophoto, "cutline.gpkg") compute_cutline(odm_orthophoto_tif, bounds_file_path, cutline_file, max_concurrency, tmpdir=os.path.join(odm_orthophoto, "grass_cutline_tmpdir"), scale=0.25) orthophoto.compute_mask_raster(odm_orthophoto_tif, cutline_file, os.path.join( odm_orthophoto, "odm_orthophoto_cut.tif"), blend_distance=20, only_max_coords_feature=True) orthophoto.post_orthophoto_steps(args, bounds_file_path, odm_orthophoto_tif) # Generate feathered orthophoto also if pio: orthophoto.feather_raster(odm_orthophoto_tif, os.path.join( odm_orthophoto, "odm_orthophoto_feathered.tif"), blend_distance=20) geotiffcreated = True if not geotiffcreated: if io.file_exists(odm_orthophoto_render): pseudogeo.add_pseudo_georeferencing(odm_orthophoto_render) log.ODM_INFO("Renaming %s --> %s" % (odm_orthophoto_render, odm_orthophoto_tif)) os.rename(odm_orthophoto_render, odm_orthophoto_tif) else: log.ODM_WARNING( "Could not generate an orthophoto (it did not render)") else: log.ODM_WARNING('Found a valid orthophoto in: %s' % odm_orthophoto_tif) #generate png orthophoto.generate_png(odm_orthophoto_tif)
def convert_gcp(gcp_dir, utm_zone, hemisphere): ''' Convert MicMac GCP TXT files to MicMac GCP XML format :param image_dir: path :return: Expects files to be named, DroneMapperGCP_2D.txt and DroneMapperGCP_3D.txt or ODM format: gcp_list.txt DroneMapperGCP_2D.txt format (single space delimiter): GCP IMAGENAME PIXELX PIXELY DroneMapperGCP_3D.txt format (single space delimiter): GCP UTMX UTMY Z PRECISION X/Y PRECISIONZ ''' from opendm import gcp log.MM_INFO('Converting GCP.') gcp_files = os.listdir(gcp_dir) for file in gcp_files: if '3d' in file.lower(): gcp_3d_file = file if '2d' in file.lower(): gcp_2d_file = file if 'gcp_list' in file.lower(): gcp_file = gcp.GCPFile(os.path.join(gcp_dir, file)) gcp_file.make_micmac_copy(gcp_dir, utm_zone='WGS84 UTM {}{}'.format( utm_zone, hemisphere)) gcp_2d_file = '2d_gcp.txt' gcp_3d_file = '3d_gcp.txt' # MicMac GCP 2D - target locations in images # GCPNAME IMAGE PIXELX PIXELY MM2D = namedtuple('MM2D', ['gcp', 'img', 'px', 'py']) with open(io.join_paths(gcp_dir, gcp_2d_file), 'r') as f2d_txt: lines = (l.split() for l in f2d_txt.readlines()) images = [ MM2D(gcp=l[0].strip(), img=l[1].strip(), px=l[2].strip(), py=l[3].strip()) for l in lines ] with open(io.join_paths(image_dir, 'images.xml'), 'wb') as images_xml: images_xml.write('<?xml version="1.0"?>\n') images_xml.write('<SetOfMesureAppuisFlottants>\n') for image in images: log.MM_INFO('GCP in image {}'.format(image)) gcp = image[0] img = image[1] px = image[2] py = image[3] images_xml.write('\t<MesureAppuiFlottant1Im>\n') name_im = '\t\t<NameIm> {} </NameIm>\n'.format(img) images_xml.write(name_im) images_xml.write('\t\t<OneMesureAF1I>\n') name_pt = '\t\t\t<NamePt> {} </NamePt>\n'.format(gcp) images_xml.write(name_pt) pt_im = '\t\t\t<PtIm> {} {} </PtIm>\n'.format(px, py) images_xml.write(pt_im) images_xml.write('\t\t</OneMesureAF1I>\n') images_xml.write('\t</MesureAppuiFlottant1Im>\n') images_xml.write('</SetOfMesureAppuisFlottants>\n') # MicMac GCP 3D - real world target position on ground (UTM) # GCPNAME UTMX UTMY Z PRECISIONXY PRECISIONZ MM3D = namedtuple('MM3D', ['gcp', 'x', 'y', 'z', 'pxy', 'pz']) with open(io.join_paths(gcp_dir, gcp_3d_file), 'r') as f3d_txt: lines = (l.split() for l in f3d_txt.readlines()) coords = [ MM3D(gcp=l[0].strip(), x=l[1].strip(), y=l[2].strip(), z=l[3].strip(), pxy=l[4].strip(), pz=l[5].strip()) for l in lines ] with open(io.join_paths(image_dir, 'ground.xml'), 'wb') as ground_xml: ground_xml.write('<?xml version="1.0"?>\n') ground_xml.write('<Global>\n') ground_xml.write('\t<DicoAppuisFlottant>\n') for c in coords: log.MM_INFO('GCP on ground {}'.format(c)) gcp = c[0] x = c[1] y = c[2] z = c[3] pxy = c[4] pz = c[5] ground_xml.write('\t\t<OneAppuisDAF>\n') pt = '\t\t\t<Pt> {} {} {} </Pt>\n'.format(x, y, z) ground_xml.write(pt) name_pt = '\t\t\t<NamePt> {} </NamePt>\n'.format(gcp) ground_xml.write(name_pt) precision = '\t\t\t<Incertitude> {} {} {} </Incertitude>\n'.format( pxy, pxy, pz) ground_xml.write(precision) ground_xml.write('\t\t</OneAppuisDAF>\n') ground_xml.write('\t</DicoAppuisFlottant>\n') ground_xml.write('</Global>\n')
def process(self, inputs, outputs): # find a file in the root directory def find(file, dir): for root, dirs, files in os.walk(dir): return '/'.join((root, file)) if file in files else None # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Georeferencing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree gcpfile = io.join_paths(tree.root_path, self.params.gcp_file) \ if self.params.gcp_file else find('gcp_list.txt', tree.root_path) geocreated = True verbose = '-verbose' if self.params.verbose else '' # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) # in case a gcp file it's not provided, let's try to generate it using # images metadata. Internally calls jhead. log.ODM_DEBUG(self.params.gcp_file) if not self.params.gcp_file: # and \ # not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('No coordinates file. ' 'Generating coordinates file: %s' % tree.odm_georeferencing_coords) # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log, 'verbose': verbose } # run UTM extraction binary extract_utm = system.run_and_return( '{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} {verbose} ' '-logFile {log}'.format(**kwargs)) if extract_utm != '': log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file. Error: %s' % extract_utm) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'odm_georeferencing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'odm_georeferencing' in args.rerun_from) if not io.file_exists(tree.odm_georeferencing_model_obj_geo) or \ not io.file_exists(tree.odm_georeferencing_model_ply_geo) or rerun_cell: # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'bundle': tree.opensfm_bundle, 'imgs': tree.dataset_resize, 'imgs_list': tree.opensfm_bundle_list, 'model': tree.odm_textured_model_obj, 'log': tree.odm_georeferencing_log, 'coords': tree.odm_georeferencing_coords, 'pc_geo': tree.odm_georeferencing_model_ply_geo, 'geo_sys': tree.odm_georeferencing_model_txt_geo, 'model_geo': tree.odm_georeferencing_model_obj_geo, 'size': self.params.img_size, 'gcp': gcpfile, 'verbose': verbose } if not args.use_pmvs: kwargs['pc'] = tree.opensfm_model else: kwargs['pc'] = tree.pmvs_model # Check to see if the GCP file exists if not self.params.use_exif and (self.params.gcp_file or find( 'gcp_list.txt', tree.root_path)): log.ODM_INFO('Found %s' % gcpfile) try: system.run( '{bin}/odm_georef -bundleFile {bundle} -imagesPath {imgs} -imagesListPath {imgs_list} ' '-bundleResizedTo {size} -inputFile {model} -outputFile {model_geo} ' '-inputPointCloudFile {pc} -outputPointCloudFile {pc_geo} {verbose} ' '-logFile {log} -georefFileOutputPath {geo_sys} -gcpFile {gcp} ' '-outputCoordFile {coords}'.format(**kwargs)) except Exception: log.ODM_EXCEPTION('Georeferencing failed. ') return ecto.QUIT elif io.file_exists(tree.odm_georeferencing_coords): log.ODM_INFO( 'Running georeferencing with generated coords file.') system.run( '{bin}/odm_georef -bundleFile {bundle} -inputCoordFile {coords} ' '-inputFile {model} -outputFile {model_geo} ' '-inputPointCloudFile {pc} -outputPointCloudFile {pc_geo} {verbose} ' '-logFile {log} -georefFileOutputPath {geo_sys}'.format( **kwargs)) else: log.ODM_WARNING('Georeferencing failed. Make sure your ' 'photos have geotags in the EXIF or you have ' 'provided a GCP file. ') geocreated = False # skip the rest of the georeferencing if geocreated: # update images metadata geo_ref = types.ODM_GeoRef() geo_ref.parse_coordinate_system(tree.odm_georeferencing_coords) for idx, photo in enumerate(self.inputs.photos): geo_ref.utm_to_latlon(tree.odm_georeferencing_latlon, photo, idx) # convert ply model to LAS reference system geo_ref.convert_to_las(tree.odm_georeferencing_model_ply_geo, tree.odm_georeferencing_pdal) # XYZ point cloud output log.ODM_INFO( "Creating geo-referenced CSV file (XYZ format, can be used with GRASS to create DEM)" ) with open(tree.odm_georeferencing_xyz_file, "wb") as csvfile: csvfile_writer = csv.writer(csvfile, delimiter=",") reachedpoints = False with open(tree.odm_georeferencing_model_ply_geo) as f: for lineNumber, line in enumerate(f): if reachedpoints: tokens = line.split(" ") csv_line = [ float(tokens[0]) + geo_ref.utm_east_offset, float(tokens[1]) + geo_ref.utm_north_offset, tokens[2] ] csvfile_writer.writerow(csv_line) if line.startswith("end_header"): reachedpoints = True csvfile.close() else: log.ODM_WARNING('Found a valid georeferenced model in: %s' % tree.odm_georeferencing_model_ply_geo) if args.time: system.benchmark(start_time, tree.benchmarking, 'Georeferencing') log.ODM_INFO('Running ODM Georeferencing Cell - Finished') return ecto.OK if args.end_with != 'odm_georeferencing' else ecto.QUIT
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] # define paths and create working directories system.mkdir_p(tree.odm_texturing) if not args.use_3dmesh: system.mkdir_p(tree.odm_25dtexturing) runs = [{ 'out_dir': tree.odm_texturing, 'model': tree.odm_mesh, 'nadir': False }] if args.skip_3dmodel: runs = [] if not args.use_3dmesh: runs += [{ 'out_dir': tree.odm_25dtexturing, 'model': tree.odm_25dmesh, 'nadir': True }] for r in runs: odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj) if not io.file_exists(odm_textured_model_obj) or self.rerun(): log.ODM_DEBUG('Writing MVS Textured file in: %s' % odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGeometricVisibilityTest = "" skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" skipHoleFilling = "" keepUnseenFaces = "" nadir = "" if (self.params.get('skip_vis_test')): skipGeometricVisibilityTest = "--skip_geometric_visibility_test" if (self.params.get('skip_glob_seam_leveling')): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.get('skip_loc_seam_leveling')): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.get('skip_hole_fill')): skipHoleFilling = "--skip_hole_filling" if (self.params.get('keep_unseen_faces')): keepUnseenFaces = "--keep_unseen_faces" if (r['nadir']): nadir = '--nadir_mode' # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': io.join_paths(r['out_dir'], "odm_textured_model"), 'model': r['model'], 'dataTerm': self.params.get('data_term'), 'outlierRemovalType': self.params.get('outlier_rem_type'), 'skipGeometricVisibilityTest': skipGeometricVisibilityTest, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'skipHoleFilling': skipHoleFilling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': self.params.get('tone_mapping'), 'nadirMode': nadir, 'nadirWeight': 2**args.texturing_nadir_weight - 1, 'nvm_file': io.join_paths(tree.opensfm, "reconstruction.nvm") } # Make sure tmp directory is empty mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp') if io.dir_exists(mvs_tmp_dir): log.ODM_INFO( "Removing old tmp directory {}".format(mvs_tmp_dir)) shutil.rmtree(mvs_tmp_dir) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{skipGeometricVisibilityTest} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{skipHoleFilling} ' '{keepUnseenFaces} ' '{nadirMode} ' '-n {nadirWeight}'.format(**kwargs)) self.update_progress(50) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % odm_textured_model_obj)
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Resize Cell') # get inputs args = self.inputs.args tree = self.inputs.tree photos = self.inputs.photos if not photos: log.ODM_ERROR('Not enough photos in photos to resize') return ecto.QUIT if self.params.resize_to <= 0: log.ODM_ERROR('Resize parameter must be greater than 0') return ecto.QUIT # create working directory system.mkdir_p(tree.dataset_resize) log.ODM_DEBUG('Resizing dataset to: %s' % tree.dataset_resize) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'resize') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'resize' in args.rerun_from) # loop over photos for photo in photos: # define image paths path_file = photo.path_file new_path_file = io.join_paths(tree.dataset_resize, photo.filename) # set raw image path in case we want to rerun cell if io.file_exists(new_path_file) and rerun_cell: path_file = io.join_paths(tree.dataset_raw, photo.filename) if not io.file_exists(new_path_file) or rerun_cell: # open and resize image with opencv img = cv2.imread(path_file) # compute new size max_side = max(img.shape[0], img.shape[1]) if max_side <= self.params.resize_to: log.ODM_WARNING('Resize Parameter is greater than the largest side of the image') ratio = float(self.params.resize_to) / float(max_side) img_r = cv2.resize(img, None, fx=ratio, fy=ratio) # write image with opencv cv2.imwrite(new_path_file, img_r) # read metadata with pyexiv2 old_meta = pyexiv2.ImageMetadata(path_file) new_meta = pyexiv2.ImageMetadata(new_path_file) old_meta.read() new_meta.read() # copy metadata old_meta.copy(new_meta) # update metadata size new_meta['Exif.Photo.PixelXDimension'] = img_r.shape[0] new_meta['Exif.Photo.PixelYDimension'] = img_r.shape[1] new_meta.write() # update photos array with new values photo.path_file = new_path_file photo.width = img_r.shape[0] photo.height = img_r.shape[1] photo.update_focal() # log message log.ODM_DEBUG('Resized %s | dimensions: %s' % (photo.filename, img_r.shape)) else: # log message log.ODM_WARNING('Already resized %s | dimensions: %s x %s' % (photo.filename, photo.width, photo.height)) log.ODM_INFO('Resized %s images' % len(photos)) # append photos to cell output self.outputs.photos = photos if args.time: system.benchmark(start_time, tree.benchmarking, 'Resizing') log.ODM_INFO('Running ODM Resize Cell - Finished') return ecto.OK if args.end_with != 'resize' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running SMVS Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start SMVS') return ecto.QUIT # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'smvs') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'smvs' in args.rerun_from) # check if reconstruction was done before if not io.file_exists(tree.smvs_model) or rerun_cell: # cleanup if a rerun if io.dir_exists(tree.mve_path) and rerun_cell: shutil.rmtree(tree.mve_path) # make bundle directory if not io.file_exists(tree.mve_bundle): system.mkdir_p(tree.mve_path) system.mkdir_p(io.join_paths(tree.mve_path, 'bundle')) io.copy(tree.opensfm_image_list, tree.mve_image_list) io.copy(tree.opensfm_bundle, tree.mve_bundle) # mve makescene wants the output directory # to not exists before executing it (otherwise it # will prompt the user for confirmation) if io.dir_exists(tree.smvs): shutil.rmtree(tree.smvs) # run mve makescene if not io.dir_exists(tree.mve_views): system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.smvs)) # config config = [ "-t%s" % self.params.threads, "-a%s" % self.params.alpha, "--max-pixels=%s" % int(self.params.max_pixels), "-o%s" % self.params.output_scale, "--debug-lvl=%s" % ('1' if self.params.verbose else '0'), "%s" % '-S' if self.params.shading else '', "%s" % '-g' if self.params.gamma_srgb and self.params.shading else '', "--force" if rerun_cell else '' ] # run smvs system.run('%s %s %s' % (context.smvs_path, ' '.join(config), tree.smvs)) # find and rename the output file for simplicity smvs_files = glob.glob(os.path.join(tree.smvs, 'smvs-*')) smvs_files.sort(key=os.path.getmtime) # sort by last modified date if len(smvs_files) > 0: old_file = smvs_files[-1] if not (io.rename_file(old_file, tree.smvs_model)): log.ODM_WARNING("File %s does not exist, cannot be renamed. " % old_file) # Filter point_cloud.filter(tree.smvs_model, standard_deviation=args.pc_filter, verbose=args.verbose) else: log.ODM_WARNING("Cannot find a valid point cloud (smvs-XX.ply) in %s. Check the console output for errors." % tree.smvs) else: log.ODM_WARNING('Found a valid SMVS reconstruction file in: %s' % tree.smvs_model) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'SMVS') log.ODM_INFO('Running ODM SMVS Cell - Finished') return ecto.OK if args.end_with != 'smvs' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) if args.fast_orthophoto: output_file = io.join_paths(tree.opensfm, 'reconstruction.ply') elif args.use_opensfm_dense: output_file = tree.opensfm_model else: output_file = tree.opensfm_reconstruction # check if reconstruction was done before if not io.file_exists(output_file) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') has_alt = True with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False fout.write('%s\n' % io.join_paths(tree.dataset_raw, photo.filename)) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if self.params.fixed_camera_params else 'yes') ] if has_alt: log.ODM_DEBUG("Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") config.append("align_method: naive") else: config.append("align_method: orientation_prior") config.append("align_orientation_prior: vertical") if args.use_hybrid_bundle_adjustment: log.ODM_DEBUG("Enabling hybrid bundle adjustment") config.append("bundle_interval: 100") # Bundle after adding 'bundle_interval' cameras config.append("bundle_new_points_ratio: 1.2") # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append("local_bundle_radius: 1") # Max image graph distance for images to be included in local bundle adjustment if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) if tree.odm_georeferencing_gcp: config.append("bundle_use_gcp: yes") io.copy(tree.odm_georeferencing_gcp, tree.opensfm) # write config file log.ODM_DEBUG(config) config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction matched_done_file = io.join_paths(tree.opensfm, 'matching_done.txt') if not io.file_exists(matched_done_file) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) with open(matched_done_file, 'w') as fout: fout.write("Matching done!\n") else: log.ODM_WARNING('Found a feature matching done progress file in: %s' % matched_done_file) if not io.file_exists(tree.opensfm_tracks) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tree.opensfm_tracks) if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # Check that a reconstruction file has been created if not io.file_exists(tree.opensfm_reconstruction): log.ODM_ERROR("The program could not process this dataset using the current settings. " "Check that the images have enough overlap, " "that there are enough recognizable features " "and that the images are in focus. " "You could also try to increase the --min-num-features parameter." "The program will now exit.") sys.exit(1) # Always export VisualSFM's reconstruction and undistort images # as we'll use these for texturing (after GSD estimation and resizing) if not args.ignore_gsd: image_scale = gsd.image_scale_factor(args.orthophoto_resolution, tree.opensfm_reconstruction) else: image_scale = 1.0 if not io.file_exists(tree.opensfm_reconstruction_nvm) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm export_visualsfm --image_extension png --scale_focal %s %s' % (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM NVM reconstruction file in: %s' % tree.opensfm_reconstruction_nvm) # These will be used for texturing system.run('PYTHONPATH=%s %s/bin/opensfm undistort --image_format png --image_scale %s %s' % (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm)) # Skip dense reconstruction if necessary and export # sparse reconstruction instead if args.fast_orthophoto: system.run('PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) elif args.use_opensfm_dense: # Undistort images at full scale in JPG # (TODO: we could compare the size of the PNGs if they are < than depthmap_resolution # and use those instead of re-exporting full resolution JPGs) system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run('PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) if reconstruction.georef: system.run('PYTHONPATH=%s %s/bin/opensfm export_geocoords %s --transformation --proj \'%s\'' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, reconstruction.georef.projection.srs)) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
import argparse from opendm import context from opendm import io from opendm import log from appsettings import SettingsParser import sys # parse arguments processopts = ['dataset', 'opensfm', 'slam', 'mve', 'odm_filterpoints', 'odm_meshing', 'odm_25dmeshing', 'mvs_texturing', 'odm_georeferencing', 'odm_dem', 'odm_orthophoto'] with open(io.join_paths(context.root_path, 'VERSION')) as version_file: __version__ = version_file.read().strip() def alphanumeric_string(string): import re if re.match('^[a-zA-Z0-9_-]+$', string) is None: msg = '{0} is not a valid name. Must use alphanumeric characters.'.format(string) raise argparse.ArgumentTypeError(msg) return string class RerunFrom(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, processopts[processopts.index(values):]) parser = SettingsParser(description='OpenDroneMap',
def get_config_file_path(self): return io.join_paths(self.opensfm_project_path, 'config.yaml')
def process(self, args, outputs): # Load tree tree = types.ODM_Tree(args.project_path, args.images, args.gcp) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write( 'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [ f for f in io.get_files_list(in_dir) if supported_extension(f) ] # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_INFO( "Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [ copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir) ] # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) if not args.use_3dmesh: system.mkdir_p(tree.odm_25dgeoreferencing) log.ODM_DEBUG('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = io.join_paths(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = [] with open(tree.dataset_list, 'w') as dataset_list: for f in path_files: photos += [types.ODM_Photo(f)] dataset_list.write(photos[-1].filename + '\n') # Save image database for faster restart save_images_database(photos, images_database_file) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) exit(1) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) # append photos to cell output if not self.params.get('proj'): if tree.odm_georeferencing_gcp: outputs['reconstruction'] = types.ODM_Reconstruction( photos, coords_file=tree.odm_georeferencing_gcp) else: # Generate UTM from images try: if not io.file_exists( tree.odm_georeferencing_coords) or self.rerun(): location.extract_utm_coords( photos, tree.dataset_raw, tree.odm_georeferencing_coords) else: log.ODM_INFO("Coordinates file already exist: %s" % tree.odm_georeferencing_coords) except: log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file') outputs['reconstruction'] = types.ODM_Reconstruction( photos, coords_file=tree.odm_georeferencing_coords) else: outputs['reconstruction'] = types.ODM_Reconstruction( photos, projstring=self.params.get('proj')) # Save proj to file for future use (unless this # dataset is not georeferenced) if outputs['reconstruction'].projection: with open( io.join_paths(tree.odm_georeferencing, tree.odm_georeferencing_proj), 'w') as f: f.write(outputs['reconstruction'].projection.srs)
def process(self): def add_run(nvm_file, primary=True, band=None): subdir = "" self.runs += [{ 'out_dir': os.path.join(self.odm_texturing_path, subdir), 'model': self.odm_mesh, 'nadir': False, 'nvm_file': nvm_file }] add_run(self.opensfm_reconstruction_nvm) for r in self.runs: if not io.dir_exists(r['out_dir']): system.mkdir_p(r['out_dir']) odm_textured_model_obj = os.path.join(r['out_dir'], self.odm_textured_model_obj) if not io.file_exists(odm_textured_model_obj): log.ODM_INFO('Writing MVS Textured file in: %s' % odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGeometricVisibilityTest = "" skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" skipHoleFilling = "" keepUnseenFaces = "" nadir = "" if (self.params.get('skip_vis_test')): skipGeometricVisibilityTest = "--skip_geometric_visibility_test" if (self.params.get('skip_glob_seam_leveling')): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.get('skip_loc_seam_leveling')): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.get('skip_hole_fill')): skipHoleFilling = "--skip_hole_filling" if (self.params.get('keep_unseen_faces')): keepUnseenFaces = "--keep_unseen_faces" if (r['nadir']): nadir = '--nadir_mode' # mvstex definitions print('$$$$$$$$$$$$$$$$$$$$4') print(r['nvm_file']) print('$$$$$$$$$$$$$$$$$44') kwargs = { 'bin': context.mvstex_path, 'out_dir': io.join_paths(r['out_dir'], "odm_textured_model"), 'model': r['model'], 'dataTerm': 'gmi', 'outlierRemovalType': "gauss_clamping", 'skipGeometricVisibilityTest': skipGeometricVisibilityTest, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'skipHoleFilling': skipHoleFilling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': 'none', 'nadirMode': nadir, 'nadirWeight': 2**self.texturing_nadir_weight - 1, 'nvm_file': r['nvm_file'] } mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp') # Make sure tmp directory is empty if io.dir_exists(mvs_tmp_dir): log.ODM_INFO( "Removing old tmp directory {}".format(mvs_tmp_dir)) shutil.rmtree(mvs_tmp_dir) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{skipGeometricVisibilityTest} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{skipHoleFilling} ' '{keepUnseenFaces} ' '{nadirMode} ' '-n {nadirWeight}'.format(**kwargs)) # if args.optimize_disk_space: # cleanup_files = [ # os.path.join(r['out_dir'], "odm_textured_model_data_costs.spt"), # os.path.join(r['out_dir'], "odm_textured_model_labeling.vec"), # ] # for f in cleanup_files: # if io.file_exists(f): # os.remove(f # if args.optimize_disk_space: # cleanup_files = [ # os.path.join(r['out_dir'], "odm_textured_model_data_costs.spt"), # os.path.join(r['out_dir'], "odm_textured_model_labeling.vec"), # ] # for f in cleanup_files: # if io.file_exists(f): # os.remove(f)) #progress += progress_per_run #self.update_progress(progress) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % odm_textured_model_obj) # if args.optimize_disk_space: # for r in nonloc.runs: # if io.file_exists(r['model']): # os.remove(r['model']) # undistorted_images_path = os.path.join(tree.opensfm, "undistorted", "images") # if io.dir_exists(undistorted_images_path): # shutil.rmtree(undistorted_images_path) # texturing = ODMMvsTexStage('mvs_texturing', args, progress=70.0, # data_term=args.texturing_data_term, # outlier_rem_type=args.texturing_outlier_removal_type, # skip_vis_test=args.texturing_skip_visibility_test, # skip_glob_seam_leveling=args.texturing_skip_global_seam_leveling, # skip_loc_seam_leveling=args.texturing_skip_local_seam_leveling, # skip_hole_fill=args.texturing_skip_hole_filling, # keep_unseen_faces=args.texturing_keep_unseen_faces, # tone_mapping=args.texturing_tone_mapping)
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = self.inputs.tree args = self.inputs.args photos = self.inputs.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) system.mkdir_p(tree.pmvs) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) if not args.use_pmvs: output_file = tree.opensfm_model else: output_file = tree.opensfm_reconstruction # check if reconstruction was done before if not io.file_exists(output_file) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') has_alt = True with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False fout.write('%s\n' % photo.path_file) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors, "optimize_camera_parameters: %s" % ('no' if self.params.fixed_camera_params else 'yes') ] if has_alt: log.ODM_DEBUG( "Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: True") config.append("align_method: naive") if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) # write config file config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction matched_done_file = io.join_paths(tree.opensfm, 'matching_done.txt') if not io.file_exists(matched_done_file) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) with open(matched_done_file, 'w') as fout: fout.write("Matching done!\n") else: log.ODM_WARNING( 'Found a feature matching done progress file in: %s' % matched_done_file) if not io.file_exists(tree.opensfm_tracks) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tree.opensfm_tracks) if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) if not io.file_exists( tree.opensfm_reconstruction_meshed) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm mesh %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM meshed reconstruction file in: %s' % tree.opensfm_reconstruction_meshed) if not args.use_pmvs: if not io.file_exists( tree.opensfm_reconstruction_nvm) or rerun_cell: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_visualsfm %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM NVM reconstruction file in: %s' % tree.opensfm_reconstruction_nvm) system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run( 'PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run( 'PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) if args.use_pmvs: # check if reconstruction was exported to pmvs before if not io.file_exists(tree.pmvs_visdat) or rerun_cell: # run PMVS converter system.run('PYTHONPATH=%s %s/bin/export_pmvs %s --output %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, tree.pmvs)) else: log.ODM_WARNING('Found a valid CMVS file in: %s' % tree.pmvs_visdat) if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
def setup(self, args, images_path, photos, gcp_path=None, append_config = [], rerun=False): """ Setup a OpenSfM project """ if rerun and io.dir_exists(self.opensfm_project_path): shutil.rmtree(self.opensfm_project_path) if not io.dir_exists(self.opensfm_project_path): system.mkdir_p(self.opensfm_project_path) list_path = io.join_paths(self.opensfm_project_path, 'image_list.txt') if not io.file_exists(list_path) or rerun: # create file list has_alt = True with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False fout.write('%s\n' % io.join_paths(images_path, photo.filename)) # create config file for OpenSfM config = [ "use_exif_size: no", "feature_process_size: %s" % args.resize_to, "feature_min_frames: %s" % args.min_num_features, "processes: %s" % args.max_concurrency, "matching_gps_neighbors: %s" % args.matcher_neighbors, "matching_gps_distance: %s" % args.matcher_distance, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if args.use_fixed_camera_params else 'yes'), ] if has_alt: log.ODM_DEBUG("Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") config.append("align_method: naive") else: config.append("align_method: orientation_prior") config.append("align_orientation_prior: vertical") if args.use_hybrid_bundle_adjustment: log.ODM_DEBUG("Enabling hybrid bundle adjustment") config.append("bundle_interval: 100") # Bundle after adding 'bundle_interval' cameras config.append("bundle_new_points_ratio: 1.2") # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append("local_bundle_radius: 1") # Max image graph distance for images to be included in local bundle adjustment if gcp_path: config.append("bundle_use_gcp: yes") io.copy(gcp_path, self.path("gcp_list.txt")) config = config + append_config # write config file log.ODM_DEBUG(config) config_filename = io.join_paths(self.opensfm_project_path, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # check for image_groups.txt (split-merge) image_groups_file = os.path.join(args.project_path, "image_groups.txt") if io.file_exists(image_groups_file): log.ODM_DEBUG("Copied image_groups.txt to OpenSfM directory") io.copy(image_groups_file, os.path.join(self.opensfm_project_path, "image_groups.txt")) else: log.ODM_WARNING("%s already exists, not rerunning OpenSfM setup" % list_path)
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) if args.fast_orthophoto: output_file = io.join_paths(tree.opensfm, 'reconstruction.ply') elif args.use_opensfm_dense: output_file = tree.opensfm_model else: output_file = tree.opensfm_reconstruction # check if reconstruction was done before if not io.file_exists(output_file) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') has_alt = True with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False fout.write('%s\n' % io.join_paths(tree.dataset_raw, photo.filename)) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if self.params.fixed_camera_params else 'yes') ] if has_alt: log.ODM_DEBUG( "Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: yes") config.append("align_method: naive") else: config.append("align_method: orientation_prior") config.append("align_orientation_prior: vertical") if args.use_hybrid_bundle_adjustment: log.ODM_DEBUG("Enabling hybrid bundle adjustment") config.append( "bundle_interval: 100" ) # Bundle after adding 'bundle_interval' cameras config.append( "bundle_new_points_ratio: 1.2" ) # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append( "local_bundle_radius: 1" ) # Max image graph distance for images to be included in local bundle adjustment if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) if tree.odm_georeferencing_gcp: config.append("bundle_use_gcp: yes") io.copy(tree.odm_georeferencing_gcp, tree.opensfm) # write config file log.ODM_DEBUG(config) config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction matched_done_file = io.join_paths(tree.opensfm, 'matching_done.txt') if not io.file_exists(matched_done_file) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) with open(matched_done_file, 'w') as fout: fout.write("Matching done!\n") else: log.ODM_WARNING( 'Found a feature matching done progress file in: %s' % matched_done_file) if not io.file_exists(tree.opensfm_tracks) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tree.opensfm_tracks) if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # Check that a reconstruction file has been created if not io.file_exists(tree.opensfm_reconstruction): log.ODM_ERROR( "The program could not process this dataset using the current settings. " "Check that the images have enough overlap, " "that there are enough recognizable features " "and that the images are in focus. " "You could also try to increase the --min-num-features parameter." "The program will now exit.") sys.exit(1) # Always export VisualSFM's reconstruction and undistort images # as we'll use these for texturing (after GSD estimation and resizing) if not args.ignore_gsd: image_scale = gsd.image_scale_factor( args.orthophoto_resolution, tree.opensfm_reconstruction) else: image_scale = 1.0 if not io.file_exists( tree.opensfm_reconstruction_nvm) or rerun_cell: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_visualsfm --image_extension png --scale_focal %s %s' % (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM NVM reconstruction file in: %s' % tree.opensfm_reconstruction_nvm) # These will be used for texturing system.run( 'PYTHONPATH=%s %s/bin/opensfm undistort --image_format png --image_scale %s %s' % (context.pyopencv_path, context.opensfm_path, image_scale, tree.opensfm)) # Skip dense reconstruction if necessary and export # sparse reconstruction instead if args.fast_orthophoto: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) elif args.use_opensfm_dense: # Undistort images at full scale in JPG # (TODO: we could compare the size of the PNGs if they are < than depthmap_resolution # and use those instead of re-exporting full resolution JPGs) system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run( 'PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run( 'PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) if reconstruction.georef: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_geocoords %s --transformation --proj \'%s\'' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, reconstruction.georef.projection.srs)) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running MVS Texturing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree # define paths and create working directories system.mkdir_p(tree.odm_texturing) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'mvs_texturing') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'mvs_texturing' in args.rerun_from) if not io.file_exists(tree.odm_textured_model_obj) or rerun_cell: log.ODM_DEBUG('Writing MVS Textured file in: %s' % tree.odm_textured_model_obj) # Format arguments to fit Mvs-Texturing app skipGeometricVisibilityTest = "" skipGlobalSeamLeveling = "" skipLocalSeamLeveling = "" skipHoleFilling = "" keepUnseenFaces = "" if (self.params.skip_vis_test): skipGeometricVisibilityTest = "--skip_geometric_visibility_test" if (self.params.skip_glob_seam_leveling): skipGlobalSeamLeveling = "--skip_global_seam_leveling" if (self.params.skip_loc_seam_leveling): skipLocalSeamLeveling = "--skip_local_seam_leveling" if (self.params.skip_hole_fill): skipHoleFilling = "--skip_hole_filling" if (self.params.keep_unseen_faces): keepUnseenFaces = "--keep_unseen_faces" # mvstex definitions kwargs = { 'bin': context.mvstex_path, 'out_dir': io.join_paths(tree.odm_texturing, "odm_textured_model"), 'pmvs_folder': tree.pmvs_rec_path, 'nvm_file': io.join_paths(tree.pmvs_rec_path, "nvmCams.nvm"), 'model': tree.odm_mesh, 'dataTerm': self.params.data_term, 'outlierRemovalType': self.params.outlier_rem_type, 'skipGeometricVisibilityTest': skipGeometricVisibilityTest, 'skipGlobalSeamLeveling': skipGlobalSeamLeveling, 'skipLocalSeamLeveling': skipLocalSeamLeveling, 'skipHoleFilling': skipHoleFilling, 'keepUnseenFaces': keepUnseenFaces, 'toneMapping': self.params.tone_mapping } if not args.use_pmvs: kwargs['nvm_file'] = io.join_paths(tree.opensfm, "reconstruction.nvm") else: log.ODM_DEBUG('Generating .nvm file from pmvs output: %s' % '{nvm_file}'.format(**kwargs)) # Create .nvm camera file. pmvs2nvmcams.run('{pmvs_folder}'.format(**kwargs), '{nvm_file}'.format(**kwargs)) # run texturing binary system.run('{bin} {nvm_file} {model} {out_dir} ' '-d {dataTerm} -o {outlierRemovalType} ' '-t {toneMapping} ' '{skipGeometricVisibilityTest} ' '{skipGlobalSeamLeveling} ' '{skipLocalSeamLeveling} ' '{skipHoleFilling} ' '{keepUnseenFaces}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid ODM Texture file in: %s' % tree.odm_textured_model_obj) if args.time: system.benchmark(start_time, tree.benchmarking, 'Texturing') log.ODM_INFO('Running ODM Texturing Cell - Finished') return ecto.OK if args.end_with != 'odm_texturing' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running MVE Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start MVE') return ecto.QUIT # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'mve') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'mve' in args.rerun_from) # check if reconstruction was done before if not io.file_exists(tree.mve_model) or rerun_cell: # cleanup if a rerun if io.dir_exists(tree.mve_path) and rerun_cell: shutil.rmtree(tree.mve_path) # make bundle directory if not io.file_exists(tree.mve_bundle): system.mkdir_p(tree.mve_path) system.mkdir_p(io.join_paths(tree.mve_path, 'bundle')) io.copy(tree.opensfm_image_list, tree.mve_image_list) io.copy(tree.opensfm_bundle, tree.mve_bundle) # mve makescene wants the output directory # to not exists before executing it (otherwise it # will prompt the user for confirmation) if io.dir_exists(tree.mve): shutil.rmtree(tree.mve) # run mve makescene if not io.dir_exists(tree.mve_views): system.run('%s %s %s' % (context.makescene_path, tree.mve_path, tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) # Compute mve output scale based on depthmap_resolution max_width = 0 max_height = 0 for photo in photos: max_width = max(photo.width, max_width) max_height = max(photo.height, max_height) max_pixels = args.depthmap_resolution * args.depthmap_resolution if max_width * max_height <= max_pixels: mve_output_scale = 0 else: ratio = float(max_width * max_height) / float(max_pixels) mve_output_scale = int(math.ceil(math.log(ratio) / math.log(4.0))) dmrecon_config = [ "-s%s" % mve_output_scale, "--progress=silent", "--local-neighbors=2", "--force", ] # Run MVE's dmrecon log.ODM_INFO(' ') log.ODM_INFO(' ,*/** ') log.ODM_INFO(' ,*@%*/@%* ') log.ODM_INFO(' ,/@%******@&*. ') log.ODM_INFO(' ,*@&*********/@&* ') log.ODM_INFO(' ,*@&**************@&* ') log.ODM_INFO(' ,/@&******************@&*. ') log.ODM_INFO(' ,*@&*********************/@&* ') log.ODM_INFO(' ,*@&**************************@&*. ') log.ODM_INFO(' ,/@&******************************&&*, ') log.ODM_INFO(' ,*&&**********************************@&*. ') log.ODM_INFO(' ,*@&**************************************@&*. ') log.ODM_INFO(' ,*@&***************#@@@@@@@@@%****************&&*, ') log.ODM_INFO(' .*&&***************&@@@@@@@@@@@@@@****************@@*. ') log.ODM_INFO(' .*@&***************&@@@@@@@@@@@@@@@@@%****(@@%********@@*. ') log.ODM_INFO(' .*@@***************%@@@@@@@@@@@@@@@@@@@@@#****&@@@@%******&@*, ') log.ODM_INFO(' .*&@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@/*****@@*. ') log.ODM_INFO(' .*@@****************@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@%*************@@*. ') log.ODM_INFO(' .*@@****/***********@@@@@&**(@@@@@@@@@@@@@@@@@@@@@@@#*****************%@*, ') log.ODM_INFO(' */@*******@*******#@@@@%*******/@@@@@@@@@@@@@@@@@@@@********************/@(, ') log.ODM_INFO(' ,*@(********&@@@@@@#**************/@@@@@@@#**(@@&/**********************@&* ') log.ODM_INFO(' *#@/*******************************@@@@@***&@&**********************&@*, ') log.ODM_INFO(' *#@#******************************&@@@***@#*********************&@*, ') log.ODM_INFO(' */@#*****************************@@@************************@@*. ') log.ODM_INFO(' *#@/***************************/@@/*********************%@*, ') log.ODM_INFO(' *#@#**************************#@@%******************%@*, ') log.ODM_INFO(' */@#*************************(@@@@@@@&%/********&@*. ') log.ODM_INFO(' *(@(*********************************/%@@%**%@*, ') log.ODM_INFO(' *(@%************************************%@** ') log.ODM_INFO(' **@%********************************&@*, ') log.ODM_INFO(' *(@(****************************%@/* ') log.ODM_INFO(' ,(@%************************#@/* ') log.ODM_INFO(' ,*@%********************&@/, ') log.ODM_INFO(' */@#****************#@/* ') log.ODM_INFO(' ,/@&************#@/* ') log.ODM_INFO(' ,*@&********%@/, ') log.ODM_INFO(' */@#****(@/* ') log.ODM_INFO(' ,/@@@@(* ') log.ODM_INFO(' .**, ') log.ODM_INFO('') log.ODM_INFO("Running dense reconstruction. This might take a while. Please be patient, the process is not dead or hung.") log.ODM_INFO(" Process is running") system.run('%s %s %s' % (context.dmrecon_path, ' '.join(dmrecon_config), tree.mve), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) scene2pset_config = [ "-F%s" % mve_output_scale ] # run scene2pset system.run('%s %s "%s" "%s"' % (context.scene2pset_path, ' '.join(scene2pset_config), tree.mve, tree.mve_model), env_vars={'OMP_NUM_THREADS': args.max_concurrency}) else: log.ODM_WARNING('Found a valid MVE reconstruction file in: %s' % tree.mve_model) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'MVE') log.ODM_INFO('Running ODM MVE Cell - Finished') return ecto.OK if args.end_with != 'mve' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = inputs.tree args = inputs.args reconstruction = inputs.reconstruction photos = reconstruction.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) system.mkdir_p(tree.pmvs) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) if not args.use_pmvs: output_file = tree.opensfm_model if args.fast_orthophoto: output_file = io.join_paths(tree.opensfm, 'reconstruction.ply') else: output_file = tree.opensfm_reconstruction # check if reconstruction was done before if not io.file_exists(output_file) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') has_alt = True with open(list_path, 'w') as fout: for photo in photos: if not photo.altitude: has_alt = False fout.write('%s\n' % photo.path_file) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors, "depthmap_method: %s" % args.opensfm_depthmap_method, "depthmap_resolution: %s" % args.opensfm_depthmap_resolution, "depthmap_min_patch_sd: %s" % args.opensfm_depthmap_min_patch_sd, "depthmap_min_consistent_views: %s" % args.opensfm_depthmap_min_consistent_views, "optimize_camera_parameters: %s" % ('no' if self.params.fixed_camera_params else 'yes') ] if has_alt: log.ODM_DEBUG( "Altitude data detected, enabling it for GPS alignment") config.append("use_altitude_tag: True") config.append("align_method: naive") if args.use_hybrid_bundle_adjustment: log.ODM_DEBUG("Enabling hybrid bundle adjustment") config.append( "bundle_interval: 100" ) # Bundle after adding 'bundle_interval' cameras config.append( "bundle_new_points_ratio: 1.2" ) # Bundle when (new points) / (bundled points) > bundle_new_points_ratio config.append( "local_bundle_radius: 1" ) # Max image graph distance for images to be included in local bundle adjustment if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) if tree.odm_georeferencing_gcp: config.append("bundle_use_gcp: yes") io.copy(tree.odm_georeferencing_gcp, tree.opensfm) # write config file log.ODM_DEBUG(config) config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction matched_done_file = io.join_paths(tree.opensfm, 'matching_done.txt') if not io.file_exists(matched_done_file) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm extract_metadata %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm detect_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm match_features %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) with open(matched_done_file, 'w') as fout: fout.write("Matching done!\n") else: log.ODM_WARNING( 'Found a feature matching done progress file in: %s' % matched_done_file) if not io.file_exists(tree.opensfm_tracks) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm create_tracks %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM tracks file in: %s' % tree.opensfm_tracks) if not io.file_exists(tree.opensfm_reconstruction) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm reconstruct %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) if not io.file_exists( tree.opensfm_reconstruction_meshed) or rerun_cell: system.run('PYTHONPATH=%s %s/bin/opensfm mesh %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM meshed reconstruction file in: %s' % tree.opensfm_reconstruction_meshed) if not args.use_pmvs: if not io.file_exists( tree.opensfm_reconstruction_nvm) or rerun_cell: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_visualsfm %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM NVM reconstruction file in: %s' % tree.opensfm_reconstruction_nvm) system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) # Skip dense reconstruction if necessary and export # sparse reconstruction instead if args.fast_orthophoto: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_ply --no-cameras %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: system.run( 'PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING( 'Found a valid OpenSfM reconstruction file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run( 'PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) if args.use_pmvs: # check if reconstruction was exported to pmvs before if not io.file_exists(tree.pmvs_visdat) or rerun_cell: # run PMVS converter system.run('PYTHONPATH=%s %s/bin/export_pmvs %s --output %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, tree.pmvs)) else: log.ODM_WARNING('Found a valid CMVS file in: %s' % tree.pmvs_visdat) if reconstruction.georef: system.run( 'PYTHONPATH=%s %s/bin/opensfm export_geocoords %s --transformation --proj \'%s\'' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, reconstruction.georef.projection.srs)) outputs.reconstruction = reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT
lib.mve_cleanmesh_function(current_path, max_concurrency) end = timer() mve_mve_cleanmesh_time = end - start start = timer() lib.odm_filterpoints_function(current_path, max_concurrency) end = timer() odm_filterpoint_time = end - start start = timer() from opendm import io images_database_file = io.join_paths(current_path, 'images.json') if not io.file_exists(images_database_file): files = photo_list images_dir = io.join_paths(file_path, 'images') if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = [] dataset_list = io.join_paths(file_path, 'img_list') with open(dataset_list, 'w') as dataset_list: log.ODM_INFO("Loading %s images" % len(path_files)) for f in path_files: photos += [types.ODM_Photo(f)] dataset_list.write(photos[-1].filename + '\n')
import argparse from opendm import context from opendm import io from opendm import log from appsettings import SettingsParser import sys # parse arguments processopts = [ 'dataset', 'opensfm', 'slam', 'smvs', 'odm_meshing', 'odm_25dmeshing', 'mvs_texturing', 'odm_georeferencing', 'odm_dem', 'odm_orthophoto' ] with open(io.join_paths(context.root_path, 'VERSION')) as version_file: __version__ = version_file.read().strip() def alphanumeric_string(string): import re if re.match('^[a-zA-Z0-9_-]+$', string) is None: msg = '{0} is not a valid name. Must use alphanumeric characters.'.format( string) raise argparse.ArgumentTypeError(msg) return string class RerunFrom(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, processopts[processopts.index(values):])
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM OpenSfM Cell') # get inputs tree = self.inputs.tree args = self.inputs.args photos = self.inputs.photos if not photos: log.ODM_ERROR('Not enough photos in photos array to start OpenSfM') return ecto.QUIT # create working directories system.mkdir_p(tree.opensfm) system.mkdir_p(tree.pmvs) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'opensfm') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'opensfm' in args.rerun_from) if not args.use_pmvs: output_file = tree.opensfm_model else: output_file = tree.opensfm_reconstruction # check if reconstruction was done before if not io.file_exists(output_file) or rerun_cell: # create file list list_path = io.join_paths(tree.opensfm, 'image_list.txt') with open(list_path, 'w') as fout: for photo in photos: fout.write('%s\n' % photo.path_file) # create config file for OpenSfM config = [ "use_exif_size: %s" % ('no' if not self.params.use_exif_size else 'yes'), "feature_process_size: %s" % self.params.feature_process_size, "feature_min_frames: %s" % self.params.feature_min_frames, "processes: %s" % self.params.processes, "matching_gps_neighbors: %s" % self.params.matching_gps_neighbors ] if args.matcher_distance > 0: config.append("matching_gps_distance: %s" % self.params.matching_gps_distance) # write config file config_filename = io.join_paths(tree.opensfm, 'config.yaml') with open(config_filename, 'w') as fout: fout.write("\n".join(config)) # run OpenSfM reconstruction system.run( 'PYTHONPATH=%s %s/bin/run_all %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) if not args.use_pmvs: system.run('PYTHONPATH=%s %s/bin/opensfm export_visualsfm %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run('PYTHONPATH=%s %s/bin/opensfm undistort %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) system.run( 'PYTHONPATH=%s %s/bin/opensfm compute_depthmaps %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid OpenSfM file in: %s' % tree.opensfm_reconstruction) # check if reconstruction was exported to bundler before if not io.file_exists(tree.opensfm_bundle_list) or rerun_cell: # convert back to bundler's format system.run( 'PYTHONPATH=%s %s/bin/export_bundler %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm)) else: log.ODM_WARNING('Found a valid Bundler file in: %s' % tree.opensfm_reconstruction) if args.use_pmvs: # check if reconstruction was exported to pmvs before if not io.file_exists(tree.pmvs_visdat) or rerun_cell: # run PMVS converter system.run('PYTHONPATH=%s %s/bin/export_pmvs %s --output %s' % (context.pyopencv_path, context.opensfm_path, tree.opensfm, tree.pmvs)) else: log.ODM_WARNING('Found a valid CMVS file in: %s' % tree.pmvs_visdat) if args.time: system.benchmark(start_time, tree.benchmarking, 'OpenSfM') log.ODM_INFO('Running ODM OpenSfM Cell - Finished') return ecto.OK if args.end_with != 'opensfm' else ecto.QUIT