def memmap_loader( image_list, memmap_handle, idx, loader=default.loader, new_width=None, new_height=None, ): if image_list[idx] is None: Console.error("Image at idx", idx, "is None") Console.error( "Please check your navigation CSV for any missing values") Console.quit("Image list is malformed") np_im = loader(image_list[idx]).astype(np.float32) dimensions = np_im.shape if new_height is not None and new_width is not None: same_dimensions = (new_width == dimensions[1]) and (new_height == dimensions[0]) if not same_dimensions: im2 = cv2.resize(np_im, (new_width, new_height), cv2.INTER_CUBIC) memmap_handle[idx, ...] = im2 else: memmap_handle[idx, ...] = np_im else: memmap_handle[idx, ...] = np_im
def call_rescale(args): path = Path(args.path).resolve() time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime()) Console.set_logging_file( get_processed_folder(path) / ("log/" + time_string + "_correct_images_rescale.log")) correct_config, camerasystem = load_configuration_and_camera_system( path, args.suffix) # install freeimage plugins if not installed imageio.plugins.freeimage.download() if correct_config.camerarescale is None: Console.error("Camera rescale configuration not found") Console.error( "Please populate the correct_images.yaml file with a rescale configuration" ) Console.quit("Malformed correct_images.yaml file") # obtain parameters for rescale from correct_config rescale_cameras = correct_config.camerarescale.rescale_cameras for camera in rescale_cameras: corrections.rescale_camera(path, camerasystem, camera) Console.info("Rescaling completed for all cameras ...")
def __init__(self, node): """__init__ is the constructor function Parameters ----------- node : cdict dictionary object for an entry in correct_images.yaml file """ valid_distance_metrics = ["uniform", "altitude", "depth_map"] self.distance_metric = node["distance_metric"] if self.distance_metric == "none": self.distance_metric = "uniform" Console.warn("Distance metric is set to none. This is deprecated.", "Please use uniform instead.") if self.distance_metric not in valid_distance_metrics: Console.error("Distance metric not valid. Please use one of the following:", valid_distance_metrics) Console.quit("Invalid distance metric: {}".format(self.distance_metric)) self.metric_path = node["metric_path"] self.altitude_max = node["altitude_filter"]["max_m"] self.altitude_min = node["altitude_filter"]["min_m"] self.smoothing = node["smoothing"] self.window_size = node["window_size"] self.outlier_reject = node["curve_fitting_outlier_rejection"]
def update_camera_list( camera_list: List[Camera], ekf_list: List[SyncedOrientationBodyVelocity], origin_offsets: List[float], camera1_offsets: List[float], latlon_reference: List[float], ) -> List[Camera]: ekf_idx = 0 c_idx = 0 while c_idx < len(camera_list) and ekf_idx < len(ekf_list): cam_ts = camera_list[c_idx].epoch_timestamp ekf_ts = ekf_list[ekf_idx].epoch_timestamp if cam_ts < ekf_ts: if not camera_list[c_idx].updated: Console.error( "There is a camera entry with index", c_idx, "that is not updated to EKF...", ) c_idx += 1 elif cam_ts > ekf_ts: ekf_idx += 1 elif cam_ts == ekf_ts: camera_list[c_idx].fromSyncedBodyVelocity( ekf_list[ekf_idx], origin_offsets, camera1_offsets, latlon_reference, ) c_idx += 1 ekf_idx += 1 return camera_list
def add(self, measurement): data = None if type(measurement) is BodyVelocity: if self.rdi_ready(): data = measurement.to_acfr(self.rdi_altitude, self.rdi_orientation) self.rdi_orientation = None self.rdi_altitude = None elif type(measurement) is InertialVelocity: pass elif type(measurement) is Altitude: self.rdi_altitude = measurement elif type(measurement) is Depth: data = measurement.to_acfr() elif type(measurement) is Usbl: data = measurement.to_acfr() elif type(measurement) is Orientation: data = measurement.to_acfr() self.rdi_orientation = measurement elif type(measurement) is Other: pass elif type(measurement) is Camera: # Get rid of laser images if "xxx" in measurement.filename: pass else: data = measurement.to_acfr() else: Console.error("AcfrConverter type {} not supported".format( type(measurement))) if data is not None: self.f.write(data)
def parse(self, line): """Parses a line of the ACFR stereo pose data file Parameters ---------- line : a string that contains a line of the document The string should contain 11 items separated by spaces. According to ACFR format, the items should be: 1) Pose identifier - integer value 2) Timestamp - in seconds 3) Latitude - in degrees 4) Longitude - in degrees 5) X position (North) - in meters, relative to local nav frame 6) Y position (East) - in meters, relative to local nav frame 7) Z position (Depth) - in meters, relative to local nav frame 8) X-axis Euler angle - in radians, relative to local nav frame 9) Y-axis Euler angle - in radians, relative to local nav frame 10) Z-axis Euler angle - in radians, relative to local nav frame 11) Vehicle altitude - in meters """ parts = line.split() if len(parts) != 11: Console.error( "The line passed to ACFR stereo pose parser is malformed.") self.id = int(parts[0]) self.stamp = float(parts[1]) self.latitude = float(parts[2]) self.longitude = float(parts[3]) self.x_north = float(parts[4]) self.y_east = float(parts[5]) self.z_depth = float(parts[6]) self.x_euler_angle = math.degrees(float(parts[7])) self.y_euler_angle = math.degrees(float(parts[8])) self.z_euler_angle = math.degrees(float(parts[9])) self.altitude = float(parts[10])
def calibrate_laser( laser_cam_name, laser_cam_filepath, laser_cam_extension, non_laser_cam_name, non_laser_cam_filepath, non_laser_cam_extension, stereo_calibration_file, config, output_file, output_file_b, skip_first=0, fo=False, foa=False, ): Console.info( "Looking for calibration images in {}".format(laser_cam_filepath)) laser_cam_image_list = collect_image_files(laser_cam_filepath, laser_cam_extension) Console.info("Found " + str(len(laser_cam_image_list)) + " left images.") Console.info( "Looking for calibration images in {}".format(non_laser_cam_filepath)) non_laser_cam_image_list = collect_image_files(non_laser_cam_filepath, non_laser_cam_extension) Console.info("Found " + str(len(non_laser_cam_image_list)) + " right images.") if len(laser_cam_image_list) < 8 or len(non_laser_cam_image_list) < 8: Console.error("Too few images. Try to get more.") return try: model = StereoCamera(stereo_calibration_file) def check_prop(name, dic_to_check, default_value): if name in dic_to_check: return dic_to_check[name] else: return default_value lc = LaserCalibrator(stereo_camera_model=model, config=config, overwrite=foa) lc.cal( laser_cam_image_list[skip_first:], non_laser_cam_image_list[skip_first:], output_file.parent, ) Console.info("Writing calibration to " "'{}'" "".format(output_file)) with output_file.open("w") as f: f.write(lc.yaml()) if "two_lasers" not in config["detection"]: return if config["detection"]["two_lasers"]: Console.info("Writing calibration to " "'{}'" "".format(output_file_b)) with output_file_b.open("w") as f: f.write(lc.yaml_b()) except CalibrationException: Console.error("The calibration pattern was not found in the images.")
def write_csv( csv_filepath: Path, data_list: Union[List[BodyVelocity], List[Orientation], List[Depth], List[Altitude], List[Usbl], List[Tide], List[Other], List[Camera], List[SyncedOrientationBodyVelocity], ], csv_filename: str, csv_flag: Optional[bool] = True, mutex: Optional[Lock] = None, ): if csv_flag is True and len(data_list) > 1: csv_file = Path(csv_filepath) if not csv_file.exists(): if mutex is not None: mutex.acquire() csv_file.mkdir(parents=True, exist_ok=True) if mutex is not None: mutex.release() Console.info("Writing outputs to {}.csv ...".format(csv_filename)) file = csv_file / "{}.csv".format(csv_filename) covariance_file = csv_file / "{}_cov.csv".format(csv_filename) fileout = None fileout_cov = None if len(data_list) > 0: fileout = file.open("w") # write headers str_to_write = data_list[0].get_csv_header() fileout.write(str_to_write) if hasattr(data_list[0], "covariance") and hasattr( data_list[0], "get_csv_header_cov"): if data_list[0].covariance is not None: fileout_cov = covariance_file.open("w") str_to_write_cov = data_list[0].get_csv_header_cov() fileout_cov.write(str_to_write_cov) # Loop for each line in csv for i in range(len(data_list)): try: str_to_write = data_list[i].to_csv_row() if fileout_cov is not None: str_to_write_cov = data_list[i].to_csv_cov_row() fileout_cov.write(str_to_write_cov) fileout.write(str_to_write) except IndexError: Console.error( "There is something wrong with camera filenames and \ indexing for the file", csv_filename, ) Console.quit("Check write_csv function.") fileout.close() if fileout_cov is not None: fileout_cov.close() Console.info("... done writing {}.csv.".format(csv_filename)) else: Console.warn("Empty data list {}".format(str(csv_filename)))
def test_console(self): with patch.object(Console, "get_version", return_value="testing"): Console.warn("This is a warning") Console.error("This is an error message") Console.info("This is an informative message") Console.banner() Console.get_username() Console.get_hostname() Console.get_date() Console.get_version() for i in range(1, 10): Console.progress(i, 10)
def merge_json_files(json_file_list): # Check that all origins are the same origin_lat = None origin_lon = None data_list = [] for fn in json_file_list: filepath = Path(fn) data = [] with filepath.open("r") as json_file: data = json.load(json_file) # Origins are by default at the top of the json file lat = data[0]["data"][0]["latitude"] lon = data[0]["data"][0]["longitude"] if origin_lat is None: origin_lat = lat origin_lon = lon data_list.append(data[0]) elif origin_lat != lat or origin_lon != lon: Console.error( "The datasets you want to merge do not belong to the same", "origin.", ) Console.error( "Change the origins to be identical and parse them again.") Console.quit("Invalid origins for merging datasets.") # Get dive name # json_file_list = .../DIVE_NAME/nav/nav_standard.json dive_prefix = filepath.parents[1].name + "/" # Preceed all filenames with the dive name # And skip the origin for item in data[1:]: if "camera1" in item: item["camera1"][0]["filename"] = ( dive_prefix + item["camera1"][0]["filename"]) if "camera2" in item: item["camera2"][0]["filename"] = ( dive_prefix + item["camera2"][0]["filename"]) if "camera3" in item: item["camera3"][0]["filename"] = ( dive_prefix + item["camera3"][0]["filename"]) if item["category"] == "laser": item["filename"] = dive_prefix + item["filename"] data_list.append(item) return data_list
def calibrate_mono( name, filepaths, extension, config, output_file, fo, foa, target_width=None, target_height=None, ): if not check_dirs_exist(filepaths): filepaths = get_raw_folders(filepaths) Console.info("Looking for {} calibration images in {}".format( extension, str(filepaths))) image_list = collect_image_files(filepaths, extension) Console.info("Found " + str(len(image_list)) + " images.") if len(image_list) < 8: Console.quit("Too few images. Try to get more.") mc = MonoCalibrator( boards=[ ChessboardInfo(config["rows"], config["cols"], config["size"]) ], pattern=check_pattern(config), invert=config["invert"], name=name, target_width=target_width, target_height=target_height, ) try: image_list_file = output_file.with_suffix(".json") if foa or not image_list_file.exists(): mc.cal(image_list) with image_list_file.open("w") as f: Console.info("Writing JSON to " "'{}'" "".format(image_list_file)) json.dump(mc.json, f) else: mc.cal_from_json(image_list_file, image_list) mc.report() Console.info("Writing calibration to " "'{}'" "".format(output_file)) with output_file.open("w") as f: f.write(mc.yaml()) except CalibrationException: Console.error("The calibration pattern was not found in the images.")
def parse(self, line): """Parses a line of the ACFR stereo pose data file Parameters ---------- line : a string that contains a line of the document The string should contain 15 items separated by spaces. According to ACFR format, # noqa the items should be: 1) Pose identifier - integer value 2) Timestamp - in seconds 3) Latitude - in degrees 4) Longitude - in degrees 5) X position (North) - in meters, relative to local nav frame # noqa 6) Y position (East) - in meters, relative to local nav frame # noqa 7) Z position (Depth) - in meters, relative to local nav frame # noqa 8) X-axis Euler angle - in radians, relative to local nav frame # noqa 9) Y-axis Euler angle - in radians, relative to local nav frame # noqa 10) Z-axis Euler angle - in radians, relative to local nav frame # noqa 11) Left image name 12) Right image name 13) Vehicle altitude - in meters 14) Approx. bounding image radius - in meters 15) Likely trajectory cross-over point - 1 for true, 0 for false """ parts = line.split() if len(parts) != 15: Console.error( "The line passed to ACFR stereo pose parser is malformed.") self.id = int(parts[0]) self.stamp = float(parts[1]) self.latitude = float(parts[2]) self.longitude = float(parts[3]) self.x_north = float(parts[4]) self.y_east = float(parts[5]) self.z_depth = float(parts[6]) self.x_euler_angle = math.degrees(float(parts[7])) self.y_euler_angle = math.degrees(float(parts[8])) self.z_euler_angle = math.degrees(float(parts[9])) self.left_image_name = str(parts[10]) self.right_image_name = str(parts[11]) self.altitude = float(parts[12]) self.bounding_image_radius = float(parts[13]) self.crossover_likelihood = int(parts[14])
def mono(self): for c in self.calibration_config["cameras"]: cam_name = c["name"] # Find if the calibration file exists calibration_file = self.output_path / str("mono_" + cam_name + ".yaml") Console.info("Looking for a calibration file at " + str(calibration_file)) if calibration_file.exists() and not self.fo: Console.warn( "The camera " + c["name"] + " has already been calibrated. If you want to overwrite " + "the JSON, use the -F flag.") else: Console.info( "The camera is not calibrated, running mono calibration..." ) filepaths = build_filepath( get_processed_folder(self.filepath), c["camera_calibration"]["path"], ) if "glob_pattern" not in c["camera_calibration"]: Console.error( "Could not find the key glob_pattern for the camera ", c["name"], ) Console.quit("glob_pattern expected in calibration.yaml") calibrate_mono( cam_name, filepaths, str(c["camera_calibration"]["glob_pattern"]), self.calibration_config["camera_calibration"], calibration_file, self.fo, self.foa, )
def __init__(self, filename=None): """__init__ is the constructor function Parameters ----------- filename : Path path to the correct_images.yaml file """ if filename is None: return with filename.open("r") as stream: data = yaml.safe_load(stream) self.color_correction = None self.camerarescale = None if "version" not in data: Console.error( "It seems you are using an old correct_images.yaml.", "You will have to delete it and run this software again.", ) Console.error("Delete the file with:") Console.error(" rm ", filename) Console.quit("Wrong correct_images.yaml format") self.version = data["version"] valid_methods = ["manual_balance", "colour_correction"] self.method = data["method"] if self.method not in valid_methods: Console.quit( "The method requested (", self.method, ") is not supported or implemented. The valid methods", "are:", " ".join(m for m in valid_methods), ) if "colour_correction" in data: self.color_correction = ColourCorrection(data["colour_correction"]) node = data["cameras"] self.configs = CameraConfigs(node) node = data["output_settings"] self.output_settings = OutputSettings(node) if "rescale" in data: self.camerarescale = CameraRescale(data["rescale"])
def calibrate_stereo( left_name, left_filepaths, left_extension, left_calib, right_name, right_filepaths, right_extension, right_calib, config, output_file, fo, foa, ): if not check_dirs_exist(left_filepaths) or not check_dirs_exist( right_filepaths): left_filepaths = get_raw_folders(left_filepaths) right_filepaths = get_raw_folders(right_filepaths) Console.info("Looking for calibration images in {}".format( str(left_filepaths))) left_image_list = collect_image_files(left_filepaths, left_extension) Console.info("Found " + str(len(left_image_list)) + " left images.") Console.info("Looking for calibration images in {}".format( str(right_filepaths))) right_image_list = collect_image_files(right_filepaths, right_extension) Console.info("Found " + str(len(right_image_list)) + " right images.") if len(left_image_list) < 8 or len(right_image_list) < 8: Console.quit("Too few images. Try to get more.") try: with left_calib.with_suffix(".json").open("r") as f: left_json = json.load(f) with right_calib.with_suffix(".json").open("r") as f: right_json = json.load(f) model = StereoCamera(left=left_calib, right=right_calib) if model.different_aspect_ratio or model.different_resolution: Console.warn( "Stereo calibration: Calibrating two cameras with different", "resolution.", ) Console.info(" Camera:", left_name, "is", model.left.size) Console.info(" Camera:", right_name, "is", model.right.size) right_calib = right_calib.parent / ("resized_" + right_calib.name) if not right_calib.with_suffix(".json").exists(): calibrate_mono( right_name, right_filepaths, right_extension, config, right_calib, fo, foa, target_width=model.left.image_width, target_height=model.left.image_height, ) model = StereoCamera(left=left_calib, right=right_calib) right_calib = right_calib.with_suffix(".json") with right_calib.open("r") as f: right_json = json.load(f) sc = StereoCalibrator( name=left_name + "-" + right_name, stereo_camera_model=model, boards=[ ChessboardInfo(config["rows"], config["cols"], config["size"]) ], pattern=check_pattern(config), invert=config["invert"], ) # sc.cal(left_image_list, right_image_list) sc.cal_from_json( left_json=left_json, right_json=right_json, ) sc.report() Console.info("Writing calibration to " "'{}'" "".format(output_file)) with output_file.open("w") as f: f.write(sc.yaml()) except CalibrationException: Console.error("The calibration pattern was not found in the images.")
def stereo(self): if len(self.calibration_config["cameras"]) > 1: c0 = self.calibration_config["cameras"][0] c1 = self.calibration_config["cameras"][1] calibration_file = self.output_path / str("stereo_" + c0["name"] + "_" + c1["name"] + ".yaml") Console.info("Looking for a calibration file at " + str(calibration_file)) if calibration_file.exists() and not self.fo: Console.quit( "The stereo pair " + c0["name"] + "_" + c1["name"] + " has already been calibrated. If you want to overwrite " + "the calibration, use the -F flag.") Console.info( "The stereo camera is not calibrated, running stereo", "calibration...", ) left_filepaths = build_filepath( get_processed_folder(self.filepath), c0["camera_calibration"]["path"], ) right_filepaths = build_filepath( get_processed_folder(self.filepath), c1["camera_calibration"]["path"], ) left_name = c0["name"] if "glob_pattern" not in c0["camera_calibration"]: Console.error( "Could not find the key glob_pattern for the camera ", c0["name"], ) Console.quit("glob_pattern expected in calibration.yaml") left_extension = str(c0["camera_calibration"]["glob_pattern"]) right_name = c1["name"] if "glob_pattern" not in c1["camera_calibration"]: Console.error( "Could not find the key glob_pattern for the camera ", c1["name"], ) Console.quit("glob_pattern expected in calibration.yaml") right_extension = str(c1["camera_calibration"]["glob_pattern"]) left_calibration_file = self.output_path / str("mono_" + left_name + ".yaml") right_calibration_file = self.output_path / str("mono_" + right_name + ".yaml") if (not left_calibration_file.exists() or not right_calibration_file.exists()): if not left_calibration_file.exists(): Console.warn( "Could not find a monocular calibration file " + str(left_calibration_file) + "...") if not right_calibration_file.exists(): Console.warn( "Could not find a monocular calibration file " + str(right_calibration_file) + "...") self.mono() if left_calibration_file.exists( ) and right_calibration_file.exists(): Console.info("Loading previous monocular calibrations at \ \n\t * {}\n\t * {}".format( str(left_calibration_file), str(right_calibration_file))) calibrate_stereo( left_name, left_filepaths, left_extension, left_calibration_file, right_name, right_filepaths, right_extension, right_calibration_file, self.calibration_config["camera_calibration"], calibration_file, self.fo, self.foa, ) # Check for a second stereo pair if len(self.calibration_config["cameras"]) > 2: c0 = self.calibration_config["cameras"][0] c2 = self.calibration_config["cameras"][2] calibration_file = self.output_path / str("stereo_" + c0["name"] + "_" + c2["name"] + ".yaml") Console.info("Looking for a calibration file at " + str(calibration_file)) if calibration_file.exists() and not self.fo: Console.quit( "The stereo pair " + c0["name"] + "_" + c2["name"] + " has already been calibrated. If you want to overwrite " + "the calibration, use the -F flag.") Console.info( "The stereo camera is not calibrated, running stereo", "calibration...", ) left_name = c0["name"] left_filepaths = build_filepath( get_processed_folder(self.filepath), c0["camera_calibration"]["path"], ) left_extension = str(c0["camera_calibration"]["glob_pattern"]) right_name = c2["name"] right_filepaths = build_filepath(self.filepath, c2["camera_calibration"]["path"]) right_extension = str(c2["camera_calibration"]["glob_pattern"]) left_calibration_file = self.output_path / str("mono_" + left_name + ".yaml") right_calibration_file = self.output_path / str("mono_" + right_name + ".yaml") if (not left_calibration_file.exists() or not right_calibration_file.exists()): if not left_calibration_file.exists(): Console.warn( "Could not find a monocular calibration file " + str(left_calibration_file) + "...") if not right_calibration_file.exists(): Console.warn( "Could not find a monocular calibration file " + str(right_calibration_file) + "...") self.mono() if left_calibration_file.exists( ) and right_calibration_file.exists(): Console.info("Loading previous monocular calibrations at \ \n\t * {}\n\t * {}".format( str(left_calibration_file), str(right_calibration_file))) calibrate_stereo( left_name, left_filepaths, left_extension, left_calibration_file, right_name, right_filepaths, right_extension, right_calibration_file, self.calibration_config["camera_calibration"], calibration_file, self.fo, self.foa, ) calibration_file = self.output_path / str("stereo_" + c2["name"] + "_" + c0["name"] + ".yaml") calibrate_stereo( right_name, right_filepaths, right_extension, right_calibration_file, left_name, left_filepaths, left_extension, left_calibration_file, self.calibration_config["camera_calibration"], calibration_file, self.fo, self.foa, )
def hybis_to_oplab(args): if args.navigation_file is None: Console.error("Please provide a navigation file") Console.quit("Missing comandline arguments") if args.image_path is None: Console.error("Please provide an image path") Console.quit("Missing comandline arguments") if args.reference_lat is None: Console.error("Please provide a reference_lat") Console.quit("Missing comandline arguments") if args.reference_lon is None: Console.error("Please provide a reference_lon") Console.quit("Missing comandline arguments") if args.output_folder is None: Console.error("Please provide an output path") Console.quit("Missing comandline arguments") parser = HybisParser( args.navigation_file, args.image_path, args.reference_lat, args.reference_lon, ) force_overwite = args.force filepath = get_processed_folder(args.output_folder) start_datetime = epoch_to_datetime(parser.start_epoch) finish_datetime = epoch_to_datetime(parser.finish_epoch) # make path for processed outputs json_filename = ("json_renav_" + start_datetime[0:8] + "_" + start_datetime[8:14] + "_" + finish_datetime[0:8] + "_" + finish_datetime[8:14]) renavpath = filepath / json_filename if not renavpath.is_dir(): try: renavpath.mkdir() except Exception as e: print("Warning:", e) elif renavpath.is_dir() and not force_overwite: # Check if dataset has already been processed Console.error( "It looks like this dataset has already been processed for the", "specified time span.", ) Console.error( "The following directory already exist: {}".format(renavpath)) Console.error( "To overwrite the contents of this directory rerun auv_nav with", "the flag -F.", ) Console.error("Example: auv_nav process -F PATH") Console.quit("auv_nav process would overwrite json_renav files") output_dr_centre_path = renavpath / "csv" / "dead_reckoning" if not output_dr_centre_path.exists(): output_dr_centre_path.mkdir(parents=True) camera_name = "hybis_camera" output_dr_centre_path = output_dr_centre_path / ("auv_dr_" + camera_name + ".csv") parser.write(output_dr_centre_path) parser.write(output_dr_centre_path) Console.info("Output written to", output_dr_centre_path)
def acfr_to_oplab(args): csv_filepath = Path(args.output_folder) force_overwite = args.force if not args.vehicle_pose and not args.stereo_pose: Console.error("Please provide at least one file") Console.quit("Missing comandline arguments") if args.vehicle_pose: Console.info("Vehicle pose provided! Converting it to DR CSV...") vpp = AcfrVehiclePoseParser(args.vehicle_pose) dr = vpp.get_dead_reckoning() csv_filename = "auv_acfr_centre.csv" if (csv_filepath / csv_filename).exists() and not force_overwite: Console.error( "The DR file already exists. Use -F to force overwrite it") Console.quit("Default behaviour: not to overwrite results") else: write_csv(csv_filepath, dr, csv_filename, csv_flag=True) Console.info("Output vehicle pose written to", csv_filepath / csv_filename) if args.stereo_pose: Console.info("Stereo pose provided! Converting it to camera CSVs...") spp = AcfrStereoPoseParser(args.stereo_pose) cam1, cam2 = spp.get_cameras() if (csv_filepath / "auv_dr_LC.csv").exists() and not force_overwite: Console.error( "The camera files already exists. Use -F to force overwrite it" ) Console.quit("Default behaviour: not to overwrite results") else: write_csv(csv_filepath, cam1, "auv_acfr_LC", csv_flag=True) write_csv(csv_filepath, cam2, "auv_acfr_RC", csv_flag=True) Console.info("Output camera files written at", csv_filepath) if args.dive_folder is None: return # If the user provides a dive path, we can interpolate the ACFR navigation # to the laser timestamps path_processed = get_processed_folder(args.dive_folder) nav_standard_file = path_processed / "nav" / "nav_standard.json" nav_standard_file = get_processed_folder(nav_standard_file) Console.info("Loading json file {}".format(nav_standard_file)) start_datetime = "" finish_datetime = "" file3 = csv_filepath / "auv_acfr_laser.csv" if file3.exists() and not force_overwite: Console.error( "The laser file already exists. Use -F to force overwrite it") Console.quit("Default behaviour: not to overwrite results") with nav_standard_file.open("r") as nav_standard: parsed_json_data = json.load(nav_standard) if start_datetime == "": epoch_start_time = epoch_from_json(parsed_json_data[1]) start_datetime = epoch_to_datetime(epoch_start_time) else: epoch_start_time = string_to_epoch(start_datetime) if finish_datetime == "": epoch_finish_time = epoch_from_json(parsed_json_data[-1]) finish_datetime = epoch_to_datetime(epoch_finish_time) else: epoch_finish_time = string_to_epoch(finish_datetime) Console.info("Interpolating laser to ACFR stereo pose data...") fileout3 = file3.open( "w" ) # ToDo: Check if file exists and only overwrite if told ('-F') fileout3.write(cam1[0].get_csv_header()) for i in range(len(parsed_json_data)): Console.progress(i, len(parsed_json_data)) epoch_timestamp = parsed_json_data[i]["epoch_timestamp"] if (epoch_timestamp >= epoch_start_time and epoch_timestamp <= epoch_finish_time): if "laser" in parsed_json_data[i]["category"]: filename = parsed_json_data[i]["filename"] c3_interp = interpolate_camera(epoch_timestamp, cam1, filename) fileout3.write(c3_interp.to_csv_row()) Console.info("Done! Laser file available at", str(file3))
def main(args=None): # enable VT100 Escape Sequence for WINDOWS 10 for Console outputs # https://stackoverflow.com/questions/16755142/how-to-make-win32-console-recognize-ansi-vt100-escape-sequences # noqa os.system("") Console.banner() Console.info("Running correct_images version " + str(Console.get_version())) parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() # subparser correct subparser_correct = subparsers.add_parser( "correct", help= "Correct images for attenuation / distortion / gamma and debayering", # noqa ) subparser_correct.add_argument("path", help="Path to raw directory till dive.") subparser_correct.add_argument( "-F", "--Force", dest="force", action="store_true", help="Force overwrite if correction parameters already exist.", ) subparser_correct.add_argument( "--suffix", dest="suffix", default="", help= "Expected suffix for correct_images configuration and output folders.", ) subparser_correct.set_defaults(func=call_correct) # subparser parse subparser_parse = subparsers.add_parser( "parse", help="Compute the correction parameters") # subparser_parse.add_argument("path", help="Path to raw directory till dive.") subparser_parse.add_argument( "path", # default=".", nargs="+", help="Folderpath where the (raw) input data is. Needs to be a \ subfolder of 'raw' and contain the mission.yaml configuration file.", ) subparser_parse.add_argument( "-F", "--Force", dest="force", action="store_true", help="Force overwrite if correction parameters already exist.", ) subparser_parse.add_argument( "--suffix", dest="suffix", default="", help= "Expected suffix for correct_images configuration and output folders.", ) subparser_parse.set_defaults(func=call_parse) # subparser process subparser_process = subparsers.add_parser("process", help="Process image correction") subparser_process.add_argument("path", help="Path to raw directory till dive.") subparser_process.add_argument( "-F", "--Force", dest="force", action="store_true", help="Force overwrite if correction parameters already exist.", ) subparser_process.add_argument( "--suffix", dest="suffix", default="", help= "Expected suffix for correct_images configuration and output folders.", ) subparser_process.set_defaults(func=call_process) # subparser rescale image subparser_rescale = subparsers.add_parser("rescale", help="Rescale processed images") subparser_rescale.add_argument("path", help="Path to raw folder") subparser_rescale.add_argument( "--suffix", dest="suffix", default="", help= "Expected suffix for correct_images configuration and output folders.", ) subparser_rescale.set_defaults(func=call_rescale) if len(sys.argv) == 1 and args is None: # Show help if no args provided parser.print_help(sys.stderr) elif len(sys.argv) == 2 and not sys.argv[1] == "-h": args = parser.parse_args(["correct", sys.argv[1]]) print(args) if hasattr(args, "func"): args.func(args) else: args = parser.parse_args() # Check suffix is only text, digits, dash and underscores allowed_chars = string.ascii_letters + "-" + "_" + string.digits if all([c in allowed_chars for c in args.suffix]): args.func(args) else: Console.error( "Suffix must only contain letters, digits, dash and underscores" )
def oplab_to_acfr(args): if not args.dive_folder: Console.error("No dive folder provided. Exiting...") Console.quit("Missing comandline arguments") if not args.output_folder: Console.error("No output folder provided. Exiting...") Console.quit("Missing comandline arguments") output_folder = get_processed_folder(args.output_folder) vf = output_folder / "vehicle_pose_est.data" sf = output_folder / "stereo_pose_est.data" if (vf.exists() or sf.exists()) and not args.force: Console.error( "Output folder already exists. Use -F to overwrite. Exiting...") Console.quit("Default behaviour: not to overwrite results") Console.info("Loading mission.yaml") path_processed = get_processed_folder(args.dive_folder) mission_file = path_processed / "mission.yaml" mission = Mission(mission_file) vehicle_file = path_processed / "vehicle.yaml" vehicle = Vehicle(vehicle_file) origin_lat = mission.origin.latitude origin_lon = mission.origin.longitude json_list = list(path_processed.glob("json_*")) if len(json_list) == 0: Console.quit( "No navigation solution could be found. Please run ", "auv_nav parse and process first", ) navigation_path = path_processed / json_list[0] # Try if ekf exists: full_navigation_path = navigation_path / "csv" / "ekf" vpw = AcfrVehiclePoseWriter(vf, origin_lat, origin_lon) vehicle_navigation_file = full_navigation_path / "auv_ekf_centre.csv" dataframe = pd.read_csv(vehicle_navigation_file) dr_list = [] for _, row in dataframe.iterrows(): sb = SyncedOrientationBodyVelocity() sb.from_df(row) dr_list.append(sb) vpw.write(dr_list) Console.info("Vehicle pose written to", vf) if not mission.image.cameras: Console.warn("No cameras found in the mission file.") return if len(mission.image.cameras) == 1: Console.error("Only one camera found in the mission file. Exiting...") Console.quit("ACFR stereo pose est requires two cameras.") camera0_name = mission.image.cameras[0].name camera1_name = mission.image.cameras[1].name camera0_nav_file = full_navigation_path / ("auv_ekf_" + camera0_name + ".csv") dataframe = pd.read_csv(camera0_nav_file) cam1_list = [] for _, row in dataframe.iterrows(): sb = Camera() sb.from_df(row) cam1_list.append(sb) camera1_nav_file = full_navigation_path / ("auv_ekf_" + camera1_name + ".csv") dataframe = pd.read_csv(camera1_nav_file) cam2_list = [] for _, row in dataframe.iterrows(): sb = Camera() sb.from_df(row) cam2_list.append(sb) spw = AcfrStereoPoseWriter(sf, origin_lat, origin_lon) spw.write(cam1_list, cam2_list) Console.info("Stereo pose written to", sf) Console.info("Generating combined.RAW") nav_standard_file = path_processed / "nav" / "nav_standard.json" nav_standard_file = get_processed_folder(nav_standard_file) Console.info("Loading json file {}".format(nav_standard_file)) with nav_standard_file.open("r") as nav_standard: parsed_json_data = json.load(nav_standard) start_datetime = "" finish_datetime = "" # setup start and finish date time if start_datetime == "": epoch_start_time = epoch_from_json(parsed_json_data[1]) start_datetime = epoch_to_datetime(epoch_start_time) else: epoch_start_time = string_to_epoch(start_datetime) if finish_datetime == "": epoch_finish_time = epoch_from_json(parsed_json_data[-1]) finish_datetime = epoch_to_datetime(epoch_finish_time) else: epoch_finish_time = string_to_epoch(finish_datetime) sensors_std = { "usbl": { "model": "sensor" }, "dvl": { "model": "sensor" }, "depth": { "model": "sensor" }, "orientation": { "model": "sensor" }, } exporter = AcfrCombinedRawWriter(mission, vehicle, output_folder) # read in data from json file # i here is the number of the data packet for i in range(len(parsed_json_data)): Console.progress(i, len(parsed_json_data)) epoch_timestamp = parsed_json_data[i]["epoch_timestamp"] if epoch_timestamp >= epoch_start_time and epoch_timestamp <= epoch_finish_time: if "velocity" in parsed_json_data[i]["category"]: if "body" in parsed_json_data[i]["frame"]: # to check for corrupted data point which have inertial # frame data values if "epoch_timestamp_dvl" in parsed_json_data[i]: # confirm time stamps of dvl are aligned with main # clock (within a second) if (abs(parsed_json_data[i]["epoch_timestamp"] - parsed_json_data[i]["epoch_timestamp_dvl"]) ) < 1.0: velocity_body = BodyVelocity() velocity_body.from_json(parsed_json_data[i], sensors_std["dvl"]) exporter.add(velocity_body) if "inertial" in parsed_json_data[i]["frame"]: velocity_inertial = InertialVelocity() velocity_inertial.from_json(parsed_json_data[i]) exporter.add(velocity_inertial) if "orientation" in parsed_json_data[i]["category"]: orientation = Orientation() orientation.from_json(parsed_json_data[i], sensors_std["orientation"]) exporter.add(orientation) if "depth" in parsed_json_data[i]["category"]: depth = Depth() depth.from_json(parsed_json_data[i], sensors_std["depth"]) exporter.add(depth) if "altitude" in parsed_json_data[i]["category"]: altitude = Altitude() altitude.from_json(parsed_json_data[i]) exporter.add(altitude) if "usbl" in parsed_json_data[i]["category"]: usbl = Usbl() usbl.from_json(parsed_json_data[i], sensors_std["usbl"]) exporter.add(usbl) if "image" in parsed_json_data[i]["category"]: camera1 = Camera() # LC camera1.from_json(parsed_json_data[i], "camera1") exporter.add(camera1) camera2 = Camera() camera2.from_json(parsed_json_data[i], "camera2") exporter.add(camera2)
def parse_rosbag(mission, vehicle, category, output_format, outpath): """Parse rosbag files Parameters ---------- mission : Mission Mission object vehicle : Vehicle Vehicle object category : str Measurement category output_format : str Output format outpath : str Output path Returns ------- list Measurement data list """ if not ROSBAG_IS_AVAILABLE: Console.error("rosbag is not available") Console.error("install it with:") Console.error( "pip install --extra-index-url", "https://rospypi.github.io/simple/ rosbag", ) Console.quit( "rosbag is not available and required to parse ROS bagfiles.") # Get your data from a file using mission paths, for example depth_std_factor = mission.depth.std_factor velocity_std_factor = mission.velocity.std_factor velocity_std_offset = mission.velocity.std_offset altitude_std_factor = mission.altitude.std_factor usbl_std_factor = mission.usbl.std_factor usbl_std_offset = mission.usbl.std_offset # NED origin origin_latitude = mission.origin.latitude origin_longitude = mission.origin.longitude ins_heading_offset = vehicle.ins.yaw dvl_heading_offset = vehicle.dvl.yaw body_velocity = BodyVelocity( velocity_std_factor, velocity_std_offset, dvl_heading_offset, ) orientation = Orientation(ins_heading_offset) depth = Depth(depth_std_factor) altitude = Altitude(altitude_std_factor) usbl = Usbl( usbl_std_factor, usbl_std_offset, latitude_reference=origin_latitude, longitude_reference=origin_longitude, ) camera = Camera() camera.sensor_string = mission.image.cameras[0].name body_velocity.sensor_string = "rosbag" orientation.sensor_string = "rosbag" depth.sensor_string = "rosbag" altitude.sensor_string = "rosbag" usbl.sensor_string = "rosbag" # Adjust timezone offsets body_velocity.tz_offset_s = ( read_timezone(mission.velocity.timezone) * 60 + mission.velocity.timeoffset) orientation.tz_offset_s = ( read_timezone(mission.orientation.timezone) * 60 + mission.orientation.timeoffset) depth.tz_offset_s = (read_timezone(mission.depth.timezone) * 60 + mission.depth.timeoffset) altitude.tz_offset_s = (read_timezone(mission.altitude.timezone) * 60 + mission.altitude.timeoffset) usbl.tz_offset_s = (read_timezone(mission.usbl.timezone) * 60 + mission.usbl.timeoffset) camera.tz_offset_s = (read_timezone(mission.image.timezone) * 60 + mission.image.timeoffset) data_list = [] bagfile = None wanted_topic = None data_object = None filepath = None if category == Category.ORIENTATION: Console.info("... parsing orientation") filepath = get_raw_folder(mission.orientation.filepath) bagfile = mission.orientation.filename wanted_topic = mission.orientation.topic data_object = orientation elif category == Category.VELOCITY: Console.info("... parsing velocity") filepath = get_raw_folder(mission.velocity.filepath) bagfile = mission.velocity.filename wanted_topic = mission.velocity.topic data_object = body_velocity elif category == Category.DEPTH: Console.info("... parsing depth") filepath = get_raw_folder(mission.depth.filepath) bagfile = mission.depth.filename wanted_topic = mission.depth.topic data_object = depth elif category == Category.ALTITUDE: Console.info("... parsing altitude") filepath = get_raw_folder(mission.altitude.filepath) bagfile = mission.altitude.filename wanted_topic = mission.altitude.topic data_object = altitude elif category == Category.USBL: Console.info("... parsing position") filepath = get_raw_folder(mission.usbl.filepath) bagfile = mission.usbl.filename wanted_topic = mission.usbl.topic data_object = usbl elif category == Category.IMAGES: Console.info("... parsing images") filepath = get_raw_folder(mission.image.cameras[0].path) bagfile = "*.bag" wanted_topic = mission.image.cameras[0].topic data_object = camera else: Console.quit("Unknown category for ROS parser", category) bagfile_list = list(filepath.glob(bagfile)) outpath = Path(outpath).parent data_list = rosbag_topic_worker(bagfile_list, wanted_topic, data_object, data_list, output_format, outpath) Console.info("... parsed " + str(len(data_list)) + " " + category + " entries") return data_list
def call_parse(args): """Perform parsing of configuration yaml files and generate image correction parameters Parameters ----------- args : parse_args object User provided arguments for path of source images """ # Now args.path is a list of paths (str / os.PathLike objects) path_list = [Path(path).resolve() for path in args.path] if len(path_list) == 1: path = path_list[0] Console.info("Single path provided, normal single dive mode...") else: Console.warn( "Multiple paths provided [{}]. Checking each path...".format( len(path_list))) for path in path_list: # chec if path is valid if not path.exists(): Console.error("Path", path, "does not exist! Exiting...") # quit sys.exit(1) else: Console.info("\t", path, " [OK]") # Populating the configuration and camerasystem lists for each dive path # The camera system is pulled first from <config> folder if available, if not from <raw> folder correct_config_list, camerasystem_list = zip(*[ load_configuration_and_camera_system(path, args.suffix) for path in path_list ]) # correct_config <--- from correct_images.yaml (<config> folder) # camerasystem <--- from camera.yaml (<config> folder or from <raw> folder) # Let's check that both lists have the same length and are not empty (same number of dives) if len(correct_config_list) != len(camerasystem_list): Console.error("Number of [camerasystem] and [configuration] differ!") sys.exit(1) if len(correct_config_list) == 0: Console.error("No valid camerasystem/configuration found!") sys.exit(1) # When in multidive mode, check if all camerasystem are the same. For this we test camera_system.camera_system if len( camerasystem_list ) > 1: # this test is still valid for single dive mode, so we could remove this [if] sentence camera_system = camerasystem_list[0] for cs in camerasystem_list: # the first entry will be repeated, no problem with that # TODO: Extend is_equivalent() method allowing checking cameras in different orders # WARNING: We decide not to use equivalent() here, because it is not robust enough. Enforce same camera order if not camera_system.camera_system == cs.camera_system: Console.error("Camera systems differ!") Console.error("\tFirst camera system (reference) ", camera_system.camera_system) Console.error("\tWrong camera system (current) ", cs.camera_system) sys.exit(1) Console.warn( "Camera systems are the same for all dives.") # so far so good # Check if the correct_config_lists elements are the same (equivalent) if len(correct_config_list) > 1: correct_config = correct_config_list[0] for cc in correct_config_list: # Check if the relevant fields of the configuration are the same (including per-dive camera setup) if not correct_config.is_equivalent(cc): Console.error("Configurations [correct_config] do not match!") sys.exit(1) Console.warn("Configurations are equivalent for all dives.") camerasystem = camerasystem_list[0] for camera in camerasystem.cameras: # check if the camera also exists in the configuration if camera.name not in [ c.camera_name for c in correct_config.configs.camera_configs ]: # ignore if not present Console.warn( "Camera [", camera.name, "] defined in <camera.yaml> but not found in configuration. Skipping..." ) else: Console.info("Parsing for camera", camera.name) # Create a Corrector object for each camera with empty configuration # The configuration and the paths will be populated later on a per-dive basis corrector = Corrector(args.force, args.suffix, camera, correct_config=None) # call new list-compatible implementation of parse() corrector.parse(path_list, correct_config_list) Console.info( "Parse completed for all cameras. Please run process to develop ", "corrected images...", )
def rescale( image_array: np.ndarray, interpolate_method: str, target_pixel_size_m: float, altitude: float, f_x: float, f_y: float, maintain_pixels: bool, ) -> np.ndarray: image_shape = image_array.shape image_height = image_shape[0] image_width = image_shape[1] pixel_height = altitude / f_y pixel_width = altitude / f_x vertical_rescale = pixel_height / target_pixel_size_m horizontal_rescale = pixel_width / target_pixel_size_m method = None if interpolate_method == "bicubic": method = Image.BICUBIC elif interpolate_method == "bilinear": method = Image.BILINEAR elif interpolate_method == "nearest_neighbour": method = Image.NEAREST elif interpolate_method == "lanczos": method = Image.LANCZOS else: Console.error("The requested rescaling method is not implemented.") Console.error("Valid methods are: ") Console.error(" * bicubic") Console.error(" * bilinear") Console.error(" * nearest_neighbour") Console.error(" * lanczos") Console.quit("Rescaling method not implemented.") image_rgb = Image.fromarray(image_array, "RGB") if maintain_pixels: if vertical_rescale < 1 or horizontal_rescale < 1: size = ( int(image_width * horizontal_rescale), int(image_height * vertical_rescale), ) image_rgb = image_rgb.resize(size, resample=method) size = (image_width, image_height) image_rgb = image_rgb.resize(size, resample=method) else: crop_width = int((1 / horizontal_rescale) * image_width) crop_height = int((1 / vertical_rescale) * image_height) # find crop box dimensions box_left = int((image_width - crop_width) / 2) box_upper = int((image_height - crop_height) / 2) box_right = image_width - box_left box_lower = image_height - box_upper # crop the image to the center box = (box_left, box_upper, box_right, box_lower) cropped_image = image_rgb.crop(box) # resize the cropped image to the size of original image size = (image_width, image_height) image_rgb = cropped_image.resize(size, resample=method) else: size = ( int(image_width * horizontal_rescale), int(image_height * vertical_rescale), ) image_rgb = image_rgb.resize(size, resample=method) image = np.array(image_rgb, dtype=np.uint8) return image
def get_imagelist(self): """Generate list of source images""" # Store a copy of the currently stored image list in the Corrector object _original_image_list = self.camera_image_list _original_altitude_list = self.altitude_list # Replaces Corrector object's image_list with the camera image list # OLD: self.camera_image_list = self.camera.image_list # <---- Now this is done at the end (else condition) self.camera_image_list = [] self.altitude_list = [] # If using colour_correction, we need to read in the navigation if self.correction_method == "colour_correction": if self.distance_path == "json_renav_*": Console.info( "Picking first JSON folder as the default path to auv_nav", "csv files...", ) dir_ = self.path_processed json_list = list(dir_.glob("json_*")) if len(json_list) == 0: Console.quit( "No navigation solution could be found. Please run ", "auv_nav parse and process first", ) self.distance_path = json_list[0] metric_path = self.path_processed / self.distance_path # Try if ekf exists: full_metric_path = metric_path / "csv" / "ekf" metric_file = "auv_ekf_" + self.camera_name + ".csv" if not full_metric_path.exists(): full_metric_path = metric_path / "csv" / "dead_reckoning" metric_file = "auv_dr_" + self.camera_name + ".csv" self.altitude_csv_path = full_metric_path / metric_file # Check if file exists if not self.altitude_csv_path.exists(): Console.quit( "The navigation CSV file is not present. Run auv_nav first." ) # read dataframe for corresponding distance csv path dataframe = pd.read_csv(self.altitude_csv_path) # Deal with bagfiles: # if the extension is bag, the list is a list of bagfiles if self.camera.extension == "bag": self.camera.bagfile_list = copy.deepcopy(self.camera.image_list) self.loader.set_bagfile_list_and_topic( self.camera.bagfile_list, self.camera.topic ) # get imagelist for given camera object if self.user_specified_image_list != "none": path_file_list = Path(self.path_config) / self.user_specified_image_list trimmed_csv_file = "trimmed_csv_" + self.camera_name + ".csv" self.trimmed_csv_path = Path(self.path_config) / trimmed_csv_file if not self.altitude_csv_path.exists(): message = "Path to " + metric_file + " does not exist..." Console.quit(message) else: # create trimmed csv based on user's list of images dataframe = trim_csv_files( path_file_list, self.altitude_csv_path, self.trimmed_csv_path, ) # Check images exist: valid_idx = [] for idx, entry in enumerate(dataframe["relative_path"]): im_path = self.path_raw / entry if im_path.exists() or self.camera.extension == "bag": valid_idx.append(idx) filtered_dataframe = dataframe.iloc[valid_idx] filtered_dataframe.reset_index(drop=True) # WARNING: if the column does not contain any 'None' entry, it will be parsed as float, and the .str() accesor will fail filtered_dataframe["altitude [m]"] = filtered_dataframe[ "altitude [m]" ].astype("string") filtered_dataframe = filtered_dataframe[ ~filtered_dataframe["altitude [m]"].str.contains("None") ] # drop rows with None altitude distance_list = filtered_dataframe["altitude [m]"].tolist() for _, row in filtered_dataframe.iterrows(): alt = float(row["altitude [m]"]) if alt > self.altitude_min and alt < self.altitude_max: if self.camera.extension == "bag": timestamp_from_filename = Path(row["relative_path"]).stem if "\\" in timestamp_from_filename: timestamp_from_filename = timestamp_from_filename.split( "\\" )[-1] self.camera_image_list.append(timestamp_from_filename) else: self.camera_image_list.append( self.path_raw / row["relative_path"] ) self.altitude_list.append(alt) if len(distance_list) == 0: Console.error("No images exist / can be found!") Console.error( "Check the file", self.altitude_csv_path, "and make sure that the 'relative_path' column points to", "existing images relative to the raw mission folder (e.g.", self.path_raw, ")", ) Console.error("You may need to reprocess the dive with auv_nav") Console.quit("No images were found.") # WARNING: what happens in a multidive setup when the current dive has no images (but the rest of the dive does)? Console.info( len(self.altitude_list), "/", len(distance_list), "Images filtered as per altitude range...", ) if len(self.altitude_list) < 3: Console.quit( "Insufficient number of images to compute attenuation ", "parameters...", ) else: # Copy the images list from the camera self.camera_image_list = self.camera.image_list # Join the current image list with the original image list (copy) self.camera_image_list.extend(_original_image_list) # Show size of the extended image list Console.warn( ">> The camera image list is now", len(self.camera_image_list) ) # JC: I'm leaving this as it is informative for multidive # Join the current image list with the original image list (copy) self.altitude_list.extend(_original_altitude_list)
def load_states(path: Path, start_image_identifier: str, end_image_identifier: str) -> List[Camera]: """Loads states from csv between 2 timestamps Args: path (Union[str, Path]): Path of states csv file start_image_identifier (str): Identifier (path) from which onwards states are loaded end_image_identifier (str): Identifier (path) up to which states are loaded Returns: List[Camera] """ poses = pd.read_csv(path) path_covriance = path.parent / (path.stem + "_cov" + path.suffix) covariances = pd.read_csv(path_covriance) covariances.columns = covariances.columns.str.replace(" ", "") headers = list(covariances.columns.values) headers = [ s.strip() for s in headers ] # Strip whitespace in comma-space (instead of just comma)-separtated columns expected_headers = [ "relative_path", "cov_x_x", "cov_x_y", "cov_x_z", "cov_x_roll", "cov_x_pitch", "cov_x_yaw", "cov_y_x", "cov_y_y", "cov_y_z", "cov_y_roll", "cov_y_pitch", "cov_y_yaw", "cov_z_x", "cov_z_y", "cov_z_z", "cov_z_roll", "cov_z_pitch", "cov_z_yaw", "cov_roll_x", "cov_roll_y", "cov_roll_z", "cov_roll_roll", "cov_roll_pitch", "cov_roll_yaw", "cov_pitch_x", "cov_pitch_y", "cov_pitch_z", "cov_pitch_roll", "cov_pitch_pitch", "cov_pitch_yaw", "cov_yaw_x", "cov_yaw_y", "cov_yaw_z", "cov_yaw_roll", "cov_yaw_pitch", "cov_yaw_yaw", ] if len(headers) < 37 or headers[0:37] != expected_headers: Console.quit("Unexpected headers in covariance file", path) covariances.rename(columns={"relative_path": "relative_path_cov"}, inplace=True) # prevent same names after merging pd_states = pd.concat([poses, covariances], axis=1) use = False found_end_imge_identifier = False states = [] try: for index, row in pd_states.iterrows(): if row["relative_path"] == start_image_identifier: use = True if use: if row["relative_path"] != row["relative_path_cov"]: Console.error( "Different relative paths (" + row["relative_path"] + " and " + row["relative_path_cov"] + ") in corresponding lines in states and covariance csv" ) s = Camera() s.filename = row["relative_path"] s.northings = row["northing [m]"] s.eastings = row["easting [m]"] s.depth = row["depth [m]"] s.roll = row["roll [deg]"] s.pitch = row["pitch [deg]"] s.yaw = row["heading [deg]"] s.x_velocity = row["x_velocity [m/s]"] s.y_velocity = row["y_velocity [m/s]"] s.z_velocity = row["z_velocity [m/s]"] s.epoch_timestamp = row["timestamp [s]"] s.covariance = np.asarray( row["cov_x_x":"cov_yaw_yaw"]).reshape((6, 6)) states.append(s) if row["relative_path"] == end_image_identifier: found_end_imge_identifier = True break except KeyError as err: Console.error( "In", __file__, "line", sys._getframe().f_lineno, ": KeyError when parsing ", path, ": Key", err, "not found.", ) if use is False: Console.error("Did not find start image identifier (" + start_image_identifier + "). Check your input.") elif found_end_imge_identifier is False: Console.warn("Did not find end image identifier (" + end_image_identifier + "). Returning all states until end of file.") return states