def test_reconstruction(self): # Multi camera setup micasa_redsense_files = [('IMG_0298_1.tif', 'Red', 1), ('IMG_0298_2.tif', 'Green', 2), ('IMG_0298_3.tif', 'Blue', 3), ('IMG_0298_4.tif', 'NIR', 4), ('IMG_0298_5.tif', 'Rededge', 5), ('IMG_0299_1.tif', 'Red', 1), ('IMG_0299_2.tif', 'Green', 2), ('IMG_0299_3.tif', 'Blue', 3), ('IMG_0299_4.tif', 'NIR', 4), ('IMG_0299_5.tif', 'Rededge', 5), ('IMG_0300_1.tif', 'Red', 1), ('IMG_0300_2.tif', 'Green', 2), ('IMG_0300_3.tif', 'Blue', 3), ('IMG_0300_4.tif', 'NIR', 4), ('IMG_0300_5.tif', 'Rededge', 5)] photos = [ODMPhotoMock(f, b, i) for f, b, i in micasa_redsense_files] recon = types.ODM_Reconstruction(photos) self.assertTrue(recon.multi_camera is not None) # Found all 5 bands bands = ["Red", "Green", "Blue", "NIR", "Rededge"] for i in range(len(bands)): self.assertEqual(bands[i], recon.multi_camera[i]['name']) self.assertTrue([p.filename for p in recon.multi_camera[0]['photos']] == ['IMG_0298_1.tif', 'IMG_0299_1.tif', 'IMG_0300_1.tif']) # Missing a file micasa_redsense_files = [('IMG_0298_1.tif', 'Red', 1), ('IMG_0298_2.tif', 'Green', 2), ('IMG_0298_3.tif', 'Blue', 3), ('IMG_0298_4.tif', 'NIR', 4), ('IMG_0298_5.tif', 'Rededge', 5), ('IMG_0299_2.tif', 'Green', 2), ('IMG_0299_3.tif', 'Blue', 3), ('IMG_0299_4.tif', 'NIR', 4), ('IMG_0299_5.tif', 'Rededge', 5), ('IMG_0300_1.tif', 'Red', 1), ('IMG_0300_2.tif', 'Green', 2), ('IMG_0300_3.tif', 'Blue', 3), ('IMG_0300_4.tif', 'NIR', 4), ('IMG_0300_5.tif', 'Rededge', 5)] photos = [ODMPhotoMock(f, b, i) for f, b, i in micasa_redsense_files] self.assertRaises(RuntimeError, types.ODM_Reconstruction, photos) # Single camera dji_files = [ 'DJI_0018.JPG', 'DJI_0019.JPG', 'DJI_0020.JPG', 'DJI_0021.JPG', 'DJI_0022.JPG', 'DJI_0023.JPG' ] photos = [ODMPhotoMock(f, 'RGB', 0) for f in dji_files] recon = types.ODM_Reconstruction(photos) self.assertTrue(recon.multi_camera is None)
def process(self, args, outputs): outputs['start_time'] = system.now_raw() tree = types.ODM_Tree(args.project_path, args.gcp, args.geo) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write( 'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the image filename is supported def valid_image_filename(filename): (pathfn, ext) = os.path.splitext(filename) return ext.lower( ) in context.supported_extensions and pathfn[-5:] != "_mask" # Get supported images from dir def get_images(in_dir): log.ODM_DEBUG(in_dir) entries = os.listdir(in_dir) valid, rejects = [], [] for f in entries: if valid_image_filename(f): valid.append(f) else: rejects.append(f) return valid, rejects def find_mask(photo_path, masks): (pathfn, ext) = os.path.splitext(os.path.basename(photo_path)) k = "{}_mask".format(pathfn) mask = masks.get(k) if mask: # Spaces are not supported due to OpenSfM's mask_list.txt format reqs if not " " in mask: return mask else: log.ODM_WARNING( "Image mask {} has a space. Spaces are currently not supported for image masks." .format(mask)) # get images directory images_dir = tree.dataset_raw # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) log.ODM_INFO('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = os.path.join(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): if not os.path.exists(images_dir): log.ODM_ERROR( "There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path)) exit(1) files, rejects = get_images(images_dir) if files: # create ODMPhoto list path_files = [os.path.join(images_dir, f) for f in files] # Lookup table for masks masks = {} for r in rejects: (p, ext) = os.path.splitext(r) if p[-5:] == "_mask" and ext.lower( ) in context.supported_extensions: masks[p] = r photos = [] with open(tree.dataset_list, 'w') as dataset_list: log.ODM_INFO("Loading %s images" % len(path_files)) for f in path_files: p = types.ODM_Photo(f) p.set_mask(find_mask(f, masks)) photos += [p] dataset_list.write(photos[-1].filename + '\n') # Check if a geo file is available if tree.odm_geo_file is not None and os.path.exists( tree.odm_geo_file): log.ODM_INFO("Found image geolocation file") gf = GeoFile(tree.odm_geo_file) updated = 0 for p in photos: entry = gf.get_entry(p.filename) if entry: p.update_with_geo_entry(entry) updated += 1 log.ODM_INFO("Updated %s image positions" % updated) # Save image database for faster restart save_images_database(photos, images_database_file) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) exit(1) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) # Create reconstruction object reconstruction = types.ODM_Reconstruction(photos) if tree.odm_georeferencing_gcp and not args.use_exif: reconstruction.georeference_with_gcp( tree.odm_georeferencing_gcp, tree.odm_georeferencing_coords, tree.odm_georeferencing_gcp_utm, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) else: reconstruction.georeference_with_gps( tree.dataset_raw, tree.odm_georeferencing_coords, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) reconstruction.save_proj_srs( os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj)) outputs['reconstruction'] = reconstruction
def process(self, args, outputs): outputs['start_time'] = system.now_raw() tree = types.ODM_Tree(args.project_path, args.gcp, args.geo) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the image filename is supported def valid_image_filename(filename): (pathfn, ext) = os.path.splitext(filename) return ext.lower() in context.supported_extensions and pathfn[-5:] != "_mask" # Get supported images from dir def get_images(in_dir): log.ODM_DEBUG(in_dir) entries = os.listdir(in_dir) valid, rejects = [], [] for f in entries: if valid_image_filename(f): valid.append(f) else: rejects.append(f) return valid, rejects def find_mask(photo_path, masks): (pathfn, ext) = os.path.splitext(os.path.basename(photo_path)) k = "{}_mask".format(pathfn) mask = masks.get(k) if mask: # Spaces are not supported due to OpenSfM's mask_list.txt format reqs if not " " in mask: return mask else: log.ODM_WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask)) # get images directory images_dir = tree.dataset_raw # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) log.ODM_INFO('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = os.path.join(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): if not os.path.exists(images_dir): raise system.ExitException("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path)) files, rejects = get_images(images_dir) if files: # create ODMPhoto list path_files = [os.path.join(images_dir, f) for f in files] # Lookup table for masks masks = {} for r in rejects: (p, ext) = os.path.splitext(r) if p[-5:] == "_mask" and ext.lower() in context.supported_extensions: masks[p] = r photos = [] with open(tree.dataset_list, 'w') as dataset_list: log.ODM_INFO("Loading %s images" % len(path_files)) for f in path_files: try: p = types.ODM_Photo(f) p.set_mask(find_mask(f, masks)) photos.append(p) dataset_list.write(photos[-1].filename + '\n') except PhotoCorruptedException: log.ODM_WARNING("%s seems corrupted and will not be used" % os.path.basename(f)) # Check if a geo file is available if tree.odm_geo_file is not None and os.path.isfile(tree.odm_geo_file): log.ODM_INFO("Found image geolocation file") gf = GeoFile(tree.odm_geo_file) updated = 0 for p in photos: entry = gf.get_entry(p.filename) if entry: p.update_with_geo_entry(entry) p.compute_opk() updated += 1 log.ODM_INFO("Updated %s image positions" % updated) # GPSDOP override if we have GPS accuracy information (such as RTK) if 'gps_accuracy_is_set' in args: log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy) for p in photos: p.override_gps_dop(args.gps_accuracy) # Override projection type if args.camera_lens != "auto": log.ODM_INFO("Setting camera lens to %s for all images" % args.camera_lens) for p in photos: p.override_camera_projection(args.camera_lens) # Automatic sky removal if args.sky_removal: # For each image that : # - Doesn't already have a mask, AND # - Is not nadir (or if orientation info is missing), AND # - There are no spaces in the image filename (OpenSfM requirement) # Automatically generate a sky mask # Generate list of sky images sky_images = [] for p in photos: if p.mask is None and (p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename): sky_images.append({'file': os.path.join(images_dir, p.filename), 'p': p}) if len(sky_images) > 0: log.ODM_INFO("Automatically generating sky masks for %s images" % len(sky_images)) model = ai.get_model("skyremoval", "https://github.com/OpenDroneMap/SkyRemoval/releases/download/v1.0.5/model.zip", "v1.0.5") if model is not None: sf = SkyFilter(model=model) def parallel_sky_filter(item): try: mask_file = sf.run_img(item['file'], images_dir) # Check and set if mask_file is not None and os.path.isfile(mask_file): item['p'].set_mask(os.path.basename(mask_file)) log.ODM_INFO("Wrote %s" % os.path.basename(mask_file)) else: log.ODM_WARNING("Cannot generate mask for %s" % img) except Exception as e: log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e))) parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency) log.ODM_INFO("Sky masks generation completed!") else: log.ODM_WARNING("Cannot load AI model (you might need to be connected to the internet?)") else: log.ODM_INFO("No sky masks will be generated (masks already provided, or images are nadir)") # End sky removal # Save image database for faster restart save_images_database(photos, images_database_file) else: raise system.ExitException('Not enough supported images in %s' % images_dir) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) log.logger.log_json_images(len(photos)) # Create reconstruction object reconstruction = types.ODM_Reconstruction(photos) if tree.odm_georeferencing_gcp and not args.use_exif: reconstruction.georeference_with_gcp(tree.odm_georeferencing_gcp, tree.odm_georeferencing_coords, tree.odm_georeferencing_gcp_utm, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) else: reconstruction.georeference_with_gps(tree.dataset_raw, tree.odm_georeferencing_coords, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) reconstruction.save_proj_srs(os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj)) outputs['reconstruction'] = reconstruction # Try to load boundaries if args.boundary: if reconstruction.is_georeferenced(): outputs['boundary'] = boundary.load_boundary(args.boundary, reconstruction.get_proj_srs()) else: args.boundary = None log.ODM_WARNING("Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)") # If sfm-algorithm is triangulation, check if photos have OPK if args.sfm_algorithm == 'triangulation': for p in photos: if not p.has_opk(): log.ODM_WARNING("No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename) args.sfm_algorithm = 'incremental' break
def process(self, args, outputs): # Load tree tree = types.ODM_Tree(args.project_path, args.images, args.gcp) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write( 'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [ f for f in io.get_files_list(in_dir) if supported_extension(f) ] # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_INFO( "Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [ copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir) ] # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) if not args.use_3dmesh: system.mkdir_p(tree.odm_25dgeoreferencing) log.ODM_DEBUG('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = io.join_paths(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = [] with open(tree.dataset_list, 'w') as dataset_list: for f in path_files: photos += [types.ODM_Photo(f)] dataset_list.write(photos[-1].filename + '\n') # Save image database for faster restart save_images_database(photos, images_database_file) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) exit(1) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) # append photos to cell output if not self.params.get('proj'): if tree.odm_georeferencing_gcp: outputs['reconstruction'] = types.ODM_Reconstruction( photos, coords_file=tree.odm_georeferencing_gcp) else: # Generate UTM from images try: if not io.file_exists( tree.odm_georeferencing_coords) or self.rerun(): location.extract_utm_coords( photos, tree.dataset_raw, tree.odm_georeferencing_coords) else: log.ODM_INFO("Coordinates file already exist: %s" % tree.odm_georeferencing_coords) except: log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file') outputs['reconstruction'] = types.ODM_Reconstruction( photos, coords_file=tree.odm_georeferencing_coords) else: outputs['reconstruction'] = types.ODM_Reconstruction( photos, projstring=self.params.get('proj')) # Save proj to file for future use (unless this # dataset is not georeferenced) if outputs['reconstruction'].projection: with open( io.join_paths(tree.odm_georeferencing, tree.odm_georeferencing_proj), 'w') as f: f.write(outputs['reconstruction'].projection.srs)
photos += [types.ODM_Photo(f)] dataset_list.write(photos[-1].filename + '\n') # Save image database for faster restart save_images_database(photos, images_database_file) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) exit(1) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) # Create reconstruction object reconstruction = types.ODM_Reconstruction(photos) lib.odm_mesh_function(opensfm_config, current_path, max_concurrency, reconstruction) end = timer() odm_mesh_time = end - start start = timer() lib.odm_texturing_function(current_path) end = timer() odm_texturing_time = end - start import orthophoto
def process(self, args, outputs): # Load tree tree = types.ODM_Tree(args.project_path, args.gcp) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write( 'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [ f for f in io.get_files_list(in_dir) if supported_extension(f) ] # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_INFO( "Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [ copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir) ] # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) if not args.use_3dmesh: system.mkdir_p(tree.odm_25dgeoreferencing) log.ODM_INFO('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = io.join_paths(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = [] with open(tree.dataset_list, 'w') as dataset_list: log.ODM_INFO("Loading %s images" % len(path_files)) for f in path_files: photos += [types.ODM_Photo(f)] dataset_list.write(photos[-1].filename + '\n') # Save image database for faster restart save_images_database(photos, images_database_file) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) exit(1) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) # Create reconstruction object reconstruction = types.ODM_Reconstruction(photos) if tree.odm_georeferencing_gcp and not args.use_exif: reconstruction.georeference_with_gcp( tree.odm_georeferencing_gcp, tree.odm_georeferencing_coords, tree.odm_georeferencing_gcp_utm, rerun=self.rerun()) else: reconstruction.georeference_with_gps( tree.dataset_raw, tree.odm_georeferencing_coords, rerun=self.rerun()) reconstruction.save_proj_srs( io.join_paths(tree.odm_georeferencing, tree.odm_georeferencing_proj)) outputs['reconstruction'] = reconstruction
def process(self, args, outputs): outputs['start_time'] = system.now_raw() tree = types.ODM_Tree(args.project_path, args.gcp, args.geo) outputs['tree'] = tree if args.time and io.file_exists(tree.benchmarking): # Delete the previously made file os.remove(tree.benchmarking) with open(tree.benchmarking, 'a') as b: b.write( 'ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores)) # check if the image filename is supported def valid_image_filename(filename): (pathfn, ext) = os.path.splitext(filename) return ext.lower( ) in context.supported_extensions and pathfn[-5:] != "_mask" # Get supported images from dir def get_images(in_dir): log.ODM_DEBUG(in_dir) entries = os.listdir(in_dir) valid, rejects = [], [] for f in entries: if valid_image_filename(f): valid.append(f) else: rejects.append(f) return valid, rejects def find_mask(photo_path, masks): (pathfn, ext) = os.path.splitext(os.path.basename(photo_path)) k = "{}_mask".format(pathfn) mask = masks.get(k) if mask: # Spaces are not supported due to OpenSfM's mask_list.txt format reqs if not " " in mask: return mask else: log.ODM_WARNING( "Image mask {} has a space. Spaces are currently not supported for image masks." .format(mask)) # get images directory images_dir = tree.dataset_raw # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) log.ODM_INFO('Loading dataset from: %s' % images_dir) # check if we rerun cell or not images_database_file = os.path.join(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or self.rerun(): if not os.path.exists(images_dir): raise system.ExitException( "There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path)) files, rejects = get_images(images_dir) if files: # create ODMPhoto list path_files = [os.path.join(images_dir, f) for f in files] # Lookup table for masks masks = {} for r in rejects: (p, ext) = os.path.splitext(r) if p[-5:] == "_mask" and ext.lower( ) in context.supported_extensions: masks[p] = r photos = [] with open(tree.dataset_list, 'w') as dataset_list: log.ODM_INFO("Loading %s images" % len(path_files)) for f in path_files: try: p = types.ODM_Photo(f) p.set_mask(find_mask(f, masks)) photos += [p] dataset_list.write(photos[-1].filename + '\n') except PhotoCorruptedException: log.ODM_WARNING( "%s seems corrupted and will not be used" % os.path.basename(f)) # Check if a geo file is available if tree.odm_geo_file is not None and os.path.isfile( tree.odm_geo_file): log.ODM_INFO("Found image geolocation file") gf = GeoFile(tree.odm_geo_file) updated = 0 for p in photos: entry = gf.get_entry(p.filename) if entry: p.update_with_geo_entry(entry) p.compute_opk() updated += 1 log.ODM_INFO("Updated %s image positions" % updated) # GPSDOP override if we have GPS accuracy information (such as RTK) if 'gps_accuracy_is_set' in args: log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy) for p in photos: p.override_gps_dop(args.gps_accuracy) # Override projection type if args.camera_lens != "auto": log.ODM_INFO("Setting camera lens to %s for all images" % args.camera_lens) for p in photos: p.override_camera_projection(args.camera_lens) # Save image database for faster restart save_images_database(photos, images_database_file) else: raise system.ExitException( 'Not enough supported images in %s' % images_dir) else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) log.logger.log_json_images(len(photos)) # Create reconstruction object reconstruction = types.ODM_Reconstruction(photos) if tree.odm_georeferencing_gcp and not args.use_exif: reconstruction.georeference_with_gcp( tree.odm_georeferencing_gcp, tree.odm_georeferencing_coords, tree.odm_georeferencing_gcp_utm, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) else: reconstruction.georeference_with_gps( tree.dataset_raw, tree.odm_georeferencing_coords, tree.odm_georeferencing_model_txt_geo, rerun=self.rerun()) reconstruction.save_proj_srs( os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj)) outputs['reconstruction'] = reconstruction # Try to load boundaries if args.boundary: if reconstruction.is_georeferenced(): outputs['boundary'] = boundary.load_boundary( args.boundary, reconstruction.get_proj_srs()) else: args.boundary = None log.ODM_WARNING( "Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)" ) # If sfm-algorithm is triangulation, check if photos have OPK if args.sfm_algorithm == 'triangulation': for p in photos: if not p.has_opk(): log.ODM_WARNING( "No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename) args.sfm_algorithm = 'incremental' break
def process(self, inputs, outputs): # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [f for f in io.get_files_list(in_dir) if supported_extension(f)] log.ODM_INFO('Running ODM Load Dataset Cell') # get inputs tree = self.inputs.tree args = self.inputs.args # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_INFO("Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir)] # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) if args.use_25dmesh: system.mkdir_p(tree.odm_25dgeoreferencing) log.ODM_DEBUG('Loading dataset from: %s' % images_dir) files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = [] with open(tree.dataset_list, 'w') as dataset_list: for files in path_files: photos += [make_odm_photo(self.params.force_focal, self.params.force_ccd, files)] dataset_list.write(photos[-1].filename + '\n') log.ODM_INFO('Found %s usable images' % len(photos)) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) return ecto.QUIT # append photos to cell output if not self.params.proj: if tree.odm_georeferencing_gcp: outputs.reconstruction = types.ODM_Reconstruction(photos, coords_file=tree.odm_georeferencing_gcp) else: verbose = '-verbose' if self.params.verbose else '' # Generate UTM from images # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_raw, 'imgs_list': tree.dataset_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log, 'verbose': verbose } # run UTM extraction binary extract_utm = system.run_and_return('{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} {verbose} ' '-logFile {log}'.format(**kwargs)) if extract_utm != '': log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file. Error: %s' % extract_utm) outputs.reconstruction = types.ODM_Reconstruction(photos, coords_file=tree.odm_georeferencing_coords) else: outputs.reconstruction = types.ODM_Reconstruction(photos, projstring=self.params.proj) # Save proj to file for future use with open(io.join_paths(tree.odm_georeferencing, tree.odm_georeferencing_proj), 'w') as f: f.write(outputs.reconstruction.projection.srs) log.ODM_INFO('Running ODM Load Dataset Cell - Finished') return ecto.OK if args.end_with != 'dataset' else ecto.QUIT
def process(self, inputs, outputs): # check if the extension is supported def supported_extension(file_name): (pathfn, ext) = os.path.splitext(file_name) return ext.lower() in context.supported_extensions # Get supported images from dir def get_images(in_dir): # filter images for its extension type log.ODM_DEBUG(in_dir) return [ f for f in io.get_files_list(in_dir) if supported_extension(f) ] log.ODM_INFO('Running ODM Load Dataset Cell') # get inputs tree = self.inputs.tree args = self.inputs.args # get images directory input_dir = tree.input_images images_dir = tree.dataset_raw if not io.dir_exists(images_dir): log.ODM_INFO( "Project directory %s doesn't exist. Creating it now. " % images_dir) system.mkdir_p(images_dir) copied = [ copyfile(io.join_paths(input_dir, f), io.join_paths(images_dir, f)) for f in get_images(input_dir) ] # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) if not args.use_3dmesh: system.mkdir_p(tree.odm_25dgeoreferencing) log.ODM_DEBUG('Loading dataset from: %s' % images_dir) # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'dataset') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'dataset' in args.rerun_from) images_database_file = io.join_paths(tree.root_path, 'images.json') if not io.file_exists(images_database_file) or rerun_cell: files = get_images(images_dir) if files: # create ODMPhoto list path_files = [io.join_paths(images_dir, f) for f in files] photos = [] with open(tree.dataset_list, 'w') as dataset_list: for f in path_files: photos += [types.ODM_Photo(f)] dataset_list.write(photos[-1].filename + '\n') # Save image database for faster restart save_images_database(photos, images_database_file) else: log.ODM_ERROR('Not enough supported images in %s' % images_dir) return ecto.QUIT else: # We have an images database, just load it photos = load_images_database(images_database_file) log.ODM_INFO('Found %s usable images' % len(photos)) # append photos to cell output if not self.params.proj: if tree.odm_georeferencing_gcp: outputs.reconstruction = types.ODM_Reconstruction( photos, coords_file=tree.odm_georeferencing_gcp) else: # Generate UTM from images try: if not io.file_exists( tree.odm_georeferencing_coords) or rerun_cell: location.extract_utm_coords( photos, tree.dataset_raw, tree.odm_georeferencing_coords) else: log.ODM_INFO("Coordinates file already exist: %s" % tree.odm_georeferencing_coords) except: log.ODM_WARNING('Could not generate coordinates file. ' 'Ignore if there is a GCP file') outputs.reconstruction = types.ODM_Reconstruction( photos, coords_file=tree.odm_georeferencing_coords) else: outputs.reconstruction = types.ODM_Reconstruction( photos, projstring=self.params.proj) # Save proj to file for future use (unless this # dataset is not georeferenced) if outputs.reconstruction.projection: with open( io.join_paths(tree.odm_georeferencing, tree.odm_georeferencing_proj), 'w') as f: f.write(outputs.reconstruction.projection.srs) log.ODM_INFO('Running ODM Load Dataset Cell - Finished') return ecto.OK if args.end_with != 'dataset' else ecto.QUIT