def FromCoordsFile(coords_file): # check for coordinate file existence if not io.file_exists(coords_file): log.ODM_WARNING('Could not find file %s' % coords_file) return srs = None with open(coords_file) as f: # extract reference system and utm zone from first line. # We will assume the following format: # 'WGS84 UTM 17N' or 'WGS84 UTM 17N \n' line = f.readline().rstrip() srs = location.parse_srs_header(line) return ODM_GeoRef(srs)
def read(self): if self.exists(): with open(self.gcp_path, 'r') as f: contents = f.read().strip() lines = map(str.strip, contents.split('\n')) if lines: self.srs = lines[0] # SRS for line in lines[1:]: if line != "" and line[0] != "#": parts = line.split() if len(parts) >= 6: self.entries.append(line) else: log.ODM_WARNING("Malformed GCP line: %s" % line)
def convert_and_undistort(self, rerun=False, imageFilter=None, image_list=None, runId="nominal"): log.ODM_INFO("Undistorting %s ..." % self.opensfm_project_path) done_flag_file = self.path("undistorted", "%s_done.txt" % runId) if not io.file_exists(done_flag_file) or rerun: ds = DataSet(self.opensfm_project_path) if image_list is not None: ds._set_image_list(image_list) undistort.run_dataset(ds, "reconstruction.json", 0, None, "undistorted", imageFilter) self.touch(done_flag_file) else: log.ODM_WARNING("Already undistorted (%s)" % runId)
def status_callback(info): # If a task switches from RUNNING to QUEUED, then we need to # stop the process and re-add the task to the queue. if info.status == TaskStatus.QUEUED: log.ODM_WARNING( "LRE: %s (%s) turned from RUNNING to QUEUED. Re-adding to back of the queue." % (self, task.uuid)) raise NodeTaskLimitReachedException( "Delayed task limit reached") elif info.status == TaskStatus.RUNNING: # Print a status message once in a while nonloc.status_callback_calls += 1 if nonloc.status_callback_calls > 30: log.ODM_INFO("LRE: %s (%s) is still running" % (self, task.uuid)) nonloc.status_callback_calls = 0
def merge_ply(input_point_cloud_files, output_file, dims=None): num_files = len(input_point_cloud_files) if num_files == 0: log.ODM_WARNING("No input point cloud files to process") return cmd = [ 'pdal', 'merge', '--writers.ply.sized_types=false', '--writers.ply.storage_mode="little endian"', ('--writers.ply.dims="%s"' % dims) if dims is not None else '', ' '.join(map(double_quote, input_point_cloud_files + [output_file])), ] system.run(' '.join(cmd))
def get_primary_band_name(multi_camera, user_band_name): if len(multi_camera) < 1: raise Exception("Invalid multi_camera list") # multi_camera is already sorted by band_index if user_band_name == "auto": return multi_camera[0]['name'] for band in multi_camera: if band['name'].lower() == user_band_name.lower(): return band['name'] band_name_fallback = multi_camera[0]['name'] log.ODM_WARNING("Cannot find band name \"%s\", will use \"%s\" instead" % (user_band_name, band_name_fallback)) return band_name_fallback
def create_bounds_gpkg(self, pointcloud_path, buffer_distance=0, decimation_step=40): """ Compute a buffered polygon around the data extents (not just a bounding box) of the given point cloud. @return filename to Geopackage containing the polygon """ if not os.path.exists(pointcloud_path): log.ODM_WARNING( 'Point cloud does not exist, cannot generate GPKG bounds {}'. format(pointcloud_path)) return '' bounds_geojson_path = self.create_bounds_geojson( pointcloud_path, buffer_distance, decimation_step) summary_file_path = os.path.join( self.storage_dir, '{}.summary.json'.format(self.files_prefix)) run('pdal info --summary {0} > {1}'.format(pointcloud_path, summary_file_path)) pc_proj4 = None with open(summary_file_path, 'r') as f: json_f = json.loads(f.read()) pc_proj4 = json_f['summary']['srs']['proj4'] if pc_proj4 is None: raise RuntimeError( "Could not determine point cloud proj4 declaration") bounds_gpkg_path = os.path.join( self.storage_dir, '{}.bounds.gpkg'.format(self.files_prefix)) # Convert bounds to GPKG kwargs = { 'input': bounds_geojson_path, 'output': bounds_gpkg_path, 'proj4': pc_proj4 } run('ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'. format(**kwargs)) return bounds_gpkg_path
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, boundary=None, verbose=False, max_concurrency=1): """ Filters a point cloud """ if not os.path.exists(input_point_cloud): log.ODM_ERROR("{} does not exist. The program will now exit.".format( input_point_cloud)) sys.exit(1) args = [ '--input "%s"' % input_point_cloud, '--output "%s"' % output_point_cloud, '--concurrency %s' % max_concurrency, '--verbose' if verbose else '', ] if sample_radius > 0: log.ODM_INFO("Sampling points around a %sm radius" % sample_radius) args.append('--radius %s' % sample_radius) if standard_deviation > 0 and meank > 0: log.ODM_INFO( "Filtering {} (statistical, meanK {}, standard deviation {})". format(input_point_cloud, meank, standard_deviation)) args.append('--meank %s' % meank) args.append('--std %s' % standard_deviation) if boundary is not None: log.ODM_INFO("Boundary {}".format(boundary)) fd, boundary_json_file = tempfile.mkstemp(suffix='.boundary.json') os.close(fd) with open(boundary_json_file, 'w') as f: f.write(as_geojson(boundary)) args.append('--boundary "%s"' % boundary_json_file) system.run('"%s" %s' % (context.fpcfilter_path, " ".join(args))) if not os.path.exists(output_point_cloud): log.ODM_WARNING( "{} not found, filtering has failed.".format(output_point_cloud))
def __init__(self, geo_path): self.geo_path = geo_path self.entries = {} self.srs = None with open(self.geo_path, 'r') as f: contents = f.read().strip() lines = list(map(str.strip, contents.split('\n'))) if lines: self.raw_srs = lines[0] # SRS self.srs = location.parse_srs_header(self.raw_srs) longlat = CRS.from_epsg("4326") for line in lines[1:]: if line != "" and line[0] != "#": parts = line.split() if len(parts) >= 3: i = 3 filename = parts[0] x, y = [float(p) for p in parts[1:3]] z = float(parts[3]) if len(parts) >= 4 else None # Always convert coordinates to WGS84 if z is not None: x, y, z = location.transform3(self.srs, longlat, x, y, z) else: x, y = location.transform2(self.srs, longlat, x, y) omega = phi = kappa = None if len(parts) >= 7: omega, phi, kappa = [float(p) for p in parts[4:7]] i = 7 horizontal_accuracy = vertical_accuracy = None if len(parts) >= 9: horizontal_accuracy, vertical_accuracy = [float(p) for p in parts[7:9]] i = 9 extras = " ".join(parts[i:]) self.entries[filename] = GeoEntry(filename, x, y, z, omega, phi, kappa, horizontal_accuracy, vertical_accuracy, extras) else: log.ODM_WARNING("Malformed geo line: %s" % line)
def read(self): if self.exists(): with open(self.gcp_path, 'r') as f: contents = f.read().decode('utf-8-sig').encode('utf-8').strip() lines = map(str.strip, contents.split('\n')) if lines: self.raw_srs = lines[0] # SRS self.srs = location.parse_srs_header(self.raw_srs) for line in lines[1:]: if line != "" and line[0] != "#": parts = line.split() if len(parts) >= 6: self.entries.append(line) else: log.ODM_WARNING("Malformed GCP line: %s" % line)
def has_gpus(): # TODO: python3 use shutil.which if not os.path.exists("/usr/bin/nvidia-smi"): return False try: out = subprocess.check_output(["nvidia-smi", "-q"]) for line in out.split("\n"): line = line.strip() if "Attached GPUs" in line: _, numGpus = map(lambda i: i.strip(), line.split(":")) numGpus = int(numGpus) return numGpus > 0 except Exception as e: log.ODM_WARNING("Cannot call nvidia-smi: %s" % str(e)) return False
def post_process(geotiff_path, output_path, smoothing_iterations=1): """ Apply median smoothing """ start = datetime.now() if not os.path.exists(geotiff_path): raise Exception('File %s does not exist!' % geotiff_path) log.ODM_INFO('Starting post processing (smoothing)...') with rasterio.open(geotiff_path) as img: nodata = img.nodatavals[0] dtype = img.dtypes[0] arr = img.read()[0] # Median filter (careful, changing the value 5 might require tweaking) # the lines below. There's another numpy function that takes care of # these edge cases, but it's slower. for i in range(smoothing_iterations): log.ODM_INFO("Smoothing iteration %s" % str(i + 1)) try: arr = signal.medfilt(arr, 5) except MemoryError: log.ODM_WARNING( "medfilt ran out of memory, switching to slower median_filter" ) arr = ndimage.median_filter(arr, size=5) # Fill corner points with nearest value if arr.shape >= (4, 4): arr[0][:2] = arr[1][0] = arr[1][1] arr[0][-2:] = arr[1][-1] = arr[2][-1] arr[-1][:2] = arr[-2][0] = arr[-2][1] arr[-1][-2:] = arr[-2][-1] = arr[-2][-2] # Median filter leaves a bunch of zeros in nodata areas locs = numpy.where(arr == 0.0) arr[locs] = nodata # write output with rasterio.open(output_path, 'w', **img.profile) as imgout: imgout.write(arr.astype(dtype), 1) log.ODM_INFO('Completed post processing to create %s in %s' % (os.path.relpath(output_path), datetime.now() - start)) return output_path
def get_image_size(file_path, fallback_on_error=True): """ Return (width, height) for a given img file """ try: with Image.open(file_path) as img: width, height = img.size except Exception as e: if fallback_on_error: log.ODM_WARNING("Cannot read %s with PIL, fallback to cv2: %s" % (file_path, str(e))) img = cv2.imread(file_path) width = img.shape[1] height = img.shape[0] else: raise e return (width, height)
def get_submodel_paths(submodels_path, *paths): """ :return Existing paths for all submodels """ result = [] if not os.path.exists(submodels_path): return result for f in os.listdir(submodels_path): if f.startswith('submodel'): p = os.path.join(submodels_path, f, *paths) if os.path.exists(p): result.append(p) else: log.ODM_WARNING("Missing %s from submodel %s" % (p, f)) return result
def align_to_primary_band(shot_id, image): photo = reconstruction.get_photo(shot_id) # No need to align if requested by user if args.skip_band_alignment: return image # No need to align primary if photo.band_name == primary_band_name: return image ainfo = alignment_info.get(photo.band_name) if ainfo is not None: return multispectral.align_image(image, ainfo['warp_matrix'], ainfo['dimension']) else: log.ODM_WARNING("Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected." % (shot_id)) return image
def get_opensfm_camera_models(cameras): """ Convert cameras to a format OpenSfM can understand (opposite of get_cameras_from_opensfm) """ if isinstance(cameras, dict): result = {} for camera_id in cameras: # Quick check on IDs if len(camera_id.split(" ")) < 6: raise RuntimeError("Invalid cameraID: %s" % camera_id) # Add "v2" to camera ID if not camera_id.startswith("v2 "): osfm_camera_id = "v2 " + camera_id else: osfm_camera_id = camera_id # Add "_prior" keys camera = cameras[camera_id] prior_fields = [ "focal", "focal_x", "focal_y", "c_x", "c_y", "k1", "k2", "p1", "p2", "k3" ] valid_fields = [ "id", "width", "height", "projection_type" ] + prior_fields + [f + "_prior" for f in prior_fields] keys = list(camera.keys()) for param in keys: param_prior = param + "_prior" if param in prior_fields and not param_prior in camera: camera[param_prior] = camera[param] # Remove invalid keys keys = list(camera.keys()) for k in keys: if not k in valid_fields: camera.pop(k) log.ODM_WARNING("Invalid camera key ignored: %s" % k) result[osfm_camera_id] = camera return result else: raise RuntimeError("Invalid cameras format: %s. Expected dict." % str(cameras))
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM CMVS Cell') # get inputs args = self.inputs.args tree = self.inputs.tree # check if we rerun cell or not rerun_cell = (args.rerun is not None and args.rerun == 'cmvs') or \ (args.rerun_all) or \ (args.rerun_from is not None and 'cmvs' in args.rerun_from) if not io.file_exists(tree.pmvs_bundle) or rerun_cell: log.ODM_DEBUG('Writing CMVS vis in: %s' % tree.pmvs_bundle) # copy bundle file to pmvs dir from shutil import copyfile copyfile(tree.opensfm_bundle, tree.pmvs_bundle) kwargs = { 'bin': context.cmvs_path, 'prefix': self.inputs.tree.pmvs_rec_path, 'max_images': self.params.max_images, 'cores': self.params.cores } # run cmvs system.run('{bin} {prefix}/ {max_images} {cores}'.format(**kwargs)) else: log.ODM_WARNING('Found a valid CMVS file in: %s' % tree.pmvs_bundle) outputs.reconstruction = inputs.reconstruction if args.time: system.benchmark(start_time, tree.benchmarking, 'CMVS') log.ODM_INFO('Running ODM CMVS Cell - Finished') return ecto.OK if args.end_with != 'cmvs' else ecto.QUIT
def process(self, inputs, outputs): # Benchmarking start_time = system.now_raw() log.ODM_INFO('Running ODM Georeferencing Cell') # get inputs args = self.inputs.args tree = self.inputs.tree gcpfile = io.join_paths(tree.root_path, self.params.gcp_file) # define paths and create working directories system.mkdir_p(tree.odm_georeferencing) # in case a gcp file it's not provided, let's try to generate it using # images metadata. Internally calls jhead. if not self.params.use_gcp and \ not io.file_exists(tree.odm_georeferencing_coords): log.ODM_WARNING('Warning: No coordinates file. ' 'Generating coordinates file in: %s' % tree.odm_georeferencing_coords) try: # odm_georeference definitions kwargs = { 'bin': context.odm_modules_path, 'imgs': tree.dataset_raw, 'imgs_list': tree.opensfm_bundle_list, 'coords': tree.odm_georeferencing_coords, 'log': tree.odm_georeferencing_utm_log } # run UTM extraction binary system.run( '{bin}/odm_extract_utm -imagesPath {imgs}/ ' '-imageListFile {imgs_list} -outputCoordFile {coords} ' '-logFile {log}'.format(**kwargs)) except Exception, e: log.ODM_ERROR( 'Could not generate GCP file from images metadata.' 'Consider rerunning with argument --odm_georeferencing-useGcp' ' and provide a proper GCP file') log.ODM_ERROR(e) return ecto.QUIT
def compute_irradiance(photo, use_sun_sensor=True): # Thermal? if photo.band_name == "LWIR": return 1.0 # Some cameras (Micasense) store the value (nice! just return) hirradiance = photo.get_horizontal_irradiance() if hirradiance is not None: return hirradiance # TODO: support for calibration panels if use_sun_sensor and photo.get_sun_sensor(): # Estimate it dls_orientation_vector = np.array([0, 0, -1]) sun_vector_ned, sensor_vector_ned, sun_sensor_angle, \ solar_elevation, solar_azimuth = dls.compute_sun_angle([photo.latitude, photo.longitude], photo.get_dls_pose(), photo.get_utc_time(), dls_orientation_vector) angular_correction = dls.fresnel(sun_sensor_angle) # TODO: support for direct and scattered irradiance direct_to_diffuse_ratio = 6.0 # Assumption, clear skies spectral_irradiance = photo.get_sun_sensor() percent_diffuse = 1.0 / direct_to_diffuse_ratio sensor_irradiance = spectral_irradiance / angular_correction # Find direct irradiance in the plane normal to the sun untilted_direct_irr = sensor_irradiance / (percent_diffuse + np.cos(sun_sensor_angle)) direct_irradiance = untilted_direct_irr scattered_irradiance = untilted_direct_irr * percent_diffuse # compute irradiance on the ground using the solar altitude angle horizontal_irradiance = direct_irradiance * np.sin( solar_elevation) + scattered_irradiance return horizontal_irradiance elif use_sun_sensor: log.ODM_WARNING("No sun sensor values found for %s" % photo.filename) return 1.0
def opensfm_reconstruction_average_gsd(reconstruction_json): """ Computes the average Ground Sampling Distance of an OpenSfM reconstruction. :param reconstruction_json path to OpenSfM's reconstruction.json :return Ground Sampling Distance value (cm / pixel) or None if a GSD estimate cannot be compute """ if not os.path.isfile(reconstruction_json): raise IOError(reconstruction_json + " does not exist.") with open(reconstruction_json) as f: data = json.load(f) # Calculate median height from sparse reconstruction reconstruction = data[0] point_heights = [] for pointId in reconstruction['points']: point = reconstruction['points'][pointId] point_heights.append(point['coordinates'][2]) ground_height = np.median(point_heights) gsds = [] for shotImage in reconstruction['shots']: shot = reconstruction['shots'][shotImage] if shot['gps_dop'] < 999999: camera = reconstruction['cameras'][shot['camera']] shot_height = shot['translation'][2] focal_ratio = camera.get('focal', camera.get('focal_x')) if not focal_ratio: log.ODM_WARNING("Cannot parse focal values from %s. This is likely an unsupported camera model." % reconstruction_json) return None gsds.append(calculate_gsd_from_focal_ratio(focal_ratio, shot_height - ground_height, camera['width'])) if len(gsds) > 0: mean = np.mean(gsds) if mean > 0: return mean return None
def monitor(): class nonloc: status_callback_calls = 0 last_update = 0 def status_callback(info): # If a task switches from RUNNING to QUEUED, then we need to # stop the process and re-add the task to the queue. if info.status == TaskStatus.QUEUED: log.ODM_WARNING("LRE: %s (%s) turned from RUNNING to QUEUED. Re-adding to back of the queue." % (self, task.uuid)) raise NodeTaskLimitReachedException("Delayed task limit reached") elif info.status == TaskStatus.RUNNING: # Print a status message once in a while nonloc.status_callback_calls += 1 if nonloc.status_callback_calls > 30: log.ODM_INFO("LRE: %s (%s) is still running" % (self, task.uuid)) nonloc.status_callback_calls = 0 try: def print_progress(percentage): if (time.time() - nonloc.last_update >= 2) or int(percentage) == 100: log.ODM_INFO("LRE: Download of %s at [%s%%]" % (self, int(percentage))) nonloc.last_update = time.time() task.wait_for_completion(status_callback=status_callback) log.ODM_INFO("LRE: Downloading assets for %s" % self) task.download_assets(self.project_path, progress_callback=print_progress) log.ODM_INFO("LRE: Downloaded and extracted assets for %s" % self) done() except exceptions.TaskFailedError as e: # Try to get output try: output_lines = task.output() # Save to file error_log_path = self.path("error.log") with open(error_log_path, 'w') as f: f.write('\n'.join(output_lines) + '\n') msg = "(%s) failed with task output: %s\nFull log saved at %s" % (task.uuid, "\n".join(output_lines[-10:]), error_log_path) done(exceptions.TaskFailedError(msg)) except: log.ODM_WARNING("LRE: Could not retrieve task output for %s (%s)" % (self, task.uuid)) done(e) except Exception as e: done(e)
def FromCoordsFile(coords_file): # check for coordinate file existence if not io.file_exists(coords_file): log.ODM_WARNING('Could not find file %s' % coords_file) return with open(coords_file) as f: # extract reference system and utm zone from first line. # We will assume the following format: # 'WGS84 UTM 17N' or 'WGS84 UTM 17N \n' line = f.readline().rstrip() srs = location.parse_srs_header(line) # second line is a northing/easting offset line = f.readline().rstrip() utm_east_offset, utm_north_offset = map(float, line.split(" ")) return ODM_GeoRef(srs, utm_east_offset, utm_north_offset)
def __init__(self, nodeUrl): self.node = Node.from_url(nodeUrl) self.params = { 'tasks': [], 'threads': [] } self.node_online = True log.ODM_INFO("LRE: Initializing using cluster node %s:%s" % (self.node.host, self.node.port)) try: odm_version = self.node.info().odm_version log.ODM_INFO("LRE: Node is online and running ODM version: %s" % odm_version) except exceptions.NodeConnectionError: log.ODM_WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.") self.node_online = False except Exception as e: log.ODM_ERROR("LRE: An unexpected problem happened while opening the node connection: %s" % str(e)) exit(1)
def __init__(self, nodeUrl, rerun = False): self.node = Node.from_url(nodeUrl) self.params = { 'tasks': [], 'threads': [], 'rerun': rerun } self.node_online = True log.ODM_INFO("LRE: Initializing using cluster node %s:%s" % (self.node.host, self.node.port)) try: info = self.node.info() log.ODM_INFO("LRE: Node is online and running %s version %s" % (info.engine, info.engine_version)) except exceptions.NodeConnectionError: log.ODM_WARNING("LRE: The node seems to be offline! We'll still process the dataset, but it's going to run entirely locally.") self.node_online = False except Exception as e: raise system.ExitException("LRE: An unexpected problem happened while opening the node connection: %s" % str(e))
def configuration(): args = config.config() args_dict = vars(args) args.split = 5 args.split_overlap = 10 args.rerun_all = True for k in sorted(args_dict.keys()): # Skip _is_set keys if k.endswith("_is_set"): continue # Don't leak token if k == 'sm_cluster' and args_dict[k] is not None: log.ODM_INFO('%s: True' % k) else: log.ODM_INFO('%s: %s' % (k, args_dict[k])) args.project_path = io.join_paths(args.project_path, args.name) print(args.project_path) args.project_path = '/home/j/ODM-master/dataset/images' if not io.dir_exists(args.project_path): log.ODM_WARNING('Directory %s does not exist. Creating it now.' % args.name) system.mkdir_p(os.path.abspath(args.project_path)) dataset = ODMLoadDatasetStage('dataset', args, progress=5.0, verbose=args.verbose) dataset.run() #upload images to server 2 #blocking call #run distance measuremeants #exchange images that are required by 2 and images required by 1 #opensfm in map reduce mode opensfm = ODMOpenSfMStage('opensfm', args, progress=25.0) opensfm.run()
def photos_to_metadata(self, photos, rolling_shutter, rolling_shutter_readout, rerun=False): metadata_dir = self.path("exif") if io.dir_exists(metadata_dir) and not rerun: log.ODM_WARNING( "%s already exists, not rerunning photo to metadata" % metadata_dir) return if io.dir_exists(metadata_dir): shutil.rmtree(metadata_dir) os.makedirs(metadata_dir, exist_ok=True) camera_models = {} data = DataSet(self.opensfm_project_path) for p in photos: d = p.to_opensfm_exif(rolling_shutter, rolling_shutter_readout) with open(os.path.join(metadata_dir, "%s.exif" % p.filename), 'w') as f: f.write(json.dumps(d, indent=4)) camera_id = p.camera_id() if camera_id not in camera_models: camera = exif.camera_from_exif_metadata(d, data) camera_models[camera_id] = camera # Override any camera specified in the camera models overrides file. if data.camera_models_overrides_exists(): overrides = data.load_camera_models_overrides() if "all" in overrides: for key in camera_models: camera_models[key] = copy.copy(overrides["all"]) camera_models[key].id = key else: for key, value in overrides.items(): camera_models[key] = value data.save_camera_models(camera_models)
def generate_dem_tiles(geotiff, output_dir, max_concurrency): relief_file = os.path.join(os.path.dirname(__file__), "color_relief.txt") hsv_merge_script = os.path.join(os.path.dirname(__file__), "hsv_merge.py") colored_dem = io.related_file_path(geotiff, postfix="color") hillshade_dem = io.related_file_path(geotiff, postfix="hillshade") colored_hillshade_dem = io.related_file_path(geotiff, postfix="colored_hillshade") try: system.run('gdaldem color-relief "%s" "%s" "%s" -alpha -co ALPHA=YES' % (geotiff, relief_file, colored_dem)) system.run('gdaldem hillshade "%s" "%s" -z 1.0 -s 1.0 -az 315.0 -alt 45.0' % (geotiff, hillshade_dem)) system.run('python3 "%s" "%s" "%s" "%s"' % (hsv_merge_script, colored_dem, hillshade_dem, colored_hillshade_dem)) generate_tiles(colored_hillshade_dem, output_dir, max_concurrency) # Cleanup for f in [colored_dem, hillshade_dem, colored_hillshade_dem]: if os.path.isfile(f): os.remove(f) except Exception as e: log.ODM_WARNING("Cannot generate DEM tiles: %s" % str(e))
def replace_nvm_images(src_nvm_file, img_map, dst_nvm_file): """ Create a new NVM file from an existing NVM file replacing the image references based on img_map where img_map is a dict { "old_image" --> "new_image" } (filename only). The function does not write the points information (they are discarded) """ with open(src_nvm_file) as f: lines = list(map(str.strip, f.read().split("\n"))) # Quick check if len(lines) < 3 or lines[0] != "NVM_V3" or lines[1].strip() != "": raise Exception("%s does not seem to be a valid NVM file" % src_nvm_file) num_images = int(lines[2]) entries = [] for l in lines[3:3 + num_images]: image_path, *p = l.split(" ") dir_name = os.path.dirname(image_path) file_name = os.path.basename(image_path) new_filename = img_map.get(file_name) if new_filename is not None: entries.append("%s %s" % (os.path.join(dir_name, new_filename), " ".join(p))) else: log.ODM_WARNING("Cannot find %s in image map for %s" % (file_name, dst_nvm_file)) if num_images != len(entries): raise Exception( "Cannot write %s, not all band images have been matched" % dst_nvm_file) with open(dst_nvm_file, "w") as f: f.write("NVM_V3\n\n%s\n" % len(entries)) f.write("\n".join(entries)) f.write("\n\n0\n0\n\n0")
def georeference_with_gps(self, images_path, output_coords_file, rerun=False): try: if not io.file_exists(output_coords_file) or rerun: location.extract_utm_coords(self.photos, images_path, output_coords_file) else: log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file) self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file) except: log.ODM_WARNING( 'Could not generate coordinates file. The orthophoto will not be georeferenced.' ) self.gcp = GCPFile(None) return self.georef
def add_shots_to_reconstruction(self, p2s): with open(self.recon_file()) as f: reconstruction = json.loads(f.read()) # Augment reconstruction.json for recon in reconstruction: shots = recon['shots'] sids = list(shots) for shot_id in sids: secondary_photos = p2s.get(shot_id) if secondary_photos is None: log.ODM_WARNING("Cannot find secondary photos for %s" % shot_id) continue for p in secondary_photos: shots[p.filename] = shots[shot_id] with open(self.recon_file(), 'w') as f: f.write(json.dumps(reconstruction))