def debayer(image: np.ndarray, pattern: str) -> np.ndarray: """Perform debayering of input image Parameters ----------- image : numpy.ndarray image data to be debayered pattern : string bayer pattern Returns ------- numpy.ndarray Debayered image """ # Make use of 16 bit debayering image16_float = image.astype(np.float32) * (2**16 - 1) image16 = image16_float.clip(0, 2**16 - 1).astype(np.uint16) corrected_rgb_img = None if pattern == "rggb" or pattern == "RGGB": corrected_rgb_img = cv2.cvtColor(image16, cv2.COLOR_BAYER_BG2RGB_EA) elif pattern == "grbg" or pattern == "GRBG": corrected_rgb_img = cv2.cvtColor(image16, cv2.COLOR_BAYER_GB2RGB_EA) elif pattern == "bggr" or pattern == "BGGR": corrected_rgb_img = cv2.cvtColor(image16, cv2.COLOR_BAYER_RG2RGB_EA) elif pattern == "gbrg" or pattern == "GBRG": corrected_rgb_img = cv2.cvtColor(image16, cv2.COLOR_BAYER_GR2RGB_EA) else: Console.quit("Bayer pattern not supported (", pattern, ")") # Scale down to unitary corrected_rgb_img = corrected_rgb_img.astype(np.float32) * (2**(-16)) return corrected_rgb_img
def save_cloud(filename, cloud): """Write list of 3D points to ply file Returns ------- None """ cloud_size = len(cloud) header_msg = "ply\n\ format ascii 1.0\n\ element vertex {0}\n\ property float x\n\ property float y\n\ property float z\n\ end_header\n".format( cloud_size ) Console.info("Saving cloud to " + str(filename)) with filename.open("w") as f: f.write(header_msg) for p in cloud: f.write("{0:.5f} {1:.5f} {2:.5f}\n".format(p[0][0], p[1][0], p[2][0]))
def call_rescale(args): path = Path(args.path).resolve() time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime()) Console.set_logging_file( get_processed_folder(path) / ("log/" + time_string + "_correct_images_rescale.log")) correct_config, camerasystem = load_configuration_and_camera_system( path, args.suffix) # install freeimage plugins if not installed imageio.plugins.freeimage.download() if correct_config.camerarescale is None: Console.error("Camera rescale configuration not found") Console.error( "Please populate the correct_images.yaml file with a rescale configuration" ) Console.quit("Malformed correct_images.yaml file") # obtain parameters for rescale from correct_config rescale_cameras = correct_config.camerarescale.rescale_cameras for camera in rescale_cameras: corrections.rescale_camera(path, camerasystem, camera) Console.info("Rescaling completed for all cameras ...")
def get_altitude_and_depth_maps(self): """Generate distance matrix numpy files and save them""" # read altitude / depth map depending on distance_metric if self.distance_metric == "uniform": Console.info("Null distance matrix created") self.depth_map_list = [] return elif self.distance_metric == "altitude": Console.info("Null distance matrix created") self.depth_map_list = [] return elif self.distance_metric == "depth_map": path_depth = self.path_processed / "depth_map" if not path_depth.exists(): Console.quit("Depth maps not found...") else: Console.info("Path to depth maps found...") depth_map_list = list(path_depth.glob("*.npy")) self.depth_map_list = [ Path(item) for item in depth_map_list for image_path in self.camera_image_list if Path(image_path).stem in Path(item).stem ] if len(self.camera_image_list) != len(self.depth_map_list): Console.quit( "The number of images does not coincide with the ", "number of depth maps.", )
def add(self, measurement): data = None if type(measurement) is BodyVelocity: if self.rdi_ready(): data = measurement.to_acfr(self.rdi_altitude, self.rdi_orientation) self.rdi_orientation = None self.rdi_altitude = None elif type(measurement) is InertialVelocity: pass elif type(measurement) is Altitude: self.rdi_altitude = measurement elif type(measurement) is Depth: data = measurement.to_acfr() elif type(measurement) is Usbl: data = measurement.to_acfr() elif type(measurement) is Orientation: data = measurement.to_acfr() self.rdi_orientation = measurement elif type(measurement) is Other: pass elif type(measurement) is Camera: # Get rid of laser images if "xxx" in measurement.filename: pass else: data = measurement.to_acfr() else: Console.error("AcfrConverter type {} not supported".format( type(measurement))) if data is not None: self.f.write(data)
def parse_interlacer(outpath, filename): data = array("f") value = [] data_ordered = [] filepath = outpath / filename try: with filepath.open("r") as json_file: data = json.load(json_file) for i, data_packet in enumerate(data): value.append(str(float(data_packet["epoch_timestamp"]))) except ValueError: Console.quit("Error: no data in JSON file") # sort data in order of epoch_timestamp sorted_index, sorted_items = sort_values(value) # store interlaced data in order of time for i in range(len(data)): data_ordered.append((data[sorted_index[i]])) # write out interlaced json file with filepath.open("w") as fileout: json.dump(data_ordered, fileout, indent=2) fileout.close()
def update_camera_list( camera_list: List[Camera], ekf_list: List[SyncedOrientationBodyVelocity], origin_offsets: List[float], camera1_offsets: List[float], latlon_reference: List[float], ) -> List[Camera]: ekf_idx = 0 c_idx = 0 while c_idx < len(camera_list) and ekf_idx < len(ekf_list): cam_ts = camera_list[c_idx].epoch_timestamp ekf_ts = ekf_list[ekf_idx].epoch_timestamp if cam_ts < ekf_ts: if not camera_list[c_idx].updated: Console.error( "There is a camera entry with index", c_idx, "that is not updated to EKF...", ) c_idx += 1 elif cam_ts > ekf_ts: ekf_idx += 1 elif cam_ts == ekf_ts: camera_list[c_idx].fromSyncedBodyVelocity( ekf_list[ekf_idx], origin_offsets, camera1_offsets, latlon_reference, ) c_idx += 1 ekf_idx += 1 return camera_list
def check_mahalanobis_distance(self, innovation, innovation_cov, measurement_time, indices): # print('innovation:', innovation.shape) # Console.info('innovation:', str(innovation.T).replace('\n', '')) # print('innovation_cov:', innovation_cov.shape) # print('innovation_cov:', innovation_cov) mahalanobis_distance2 = np.asscalar( np.dot(innovation.T, innovation_cov @ innovation)) if mahalanobis_distance2 >= self.mahalanobis_threshold**2: self.nb_exceeded_mahalanobis += 1 ici = innovation_cov @ innovation summands = [] for i in range(len(innovation)): summands.append(np.asscalar(innovation[i] * ici[i])) Console.warn_verbose( "Mahalanobis dist > threshold ({} time(s) so far in this " "dataset) for measurement at t={} of variable(s) with " "index(es): {}\nInnovation:\n{}\nMahalanobis distance: {} " "(squared: {})\nsummands:\n{}".format( self.nb_exceeded_mahalanobis, measurement_time, indices, str(innovation.T).replace("\n", ""), math.sqrt(mahalanobis_distance2), mahalanobis_distance2, str(summands).replace("\n", ""), )) # Console.warn("innovation_cov:\n{}".format( # str(innovation_cov).replace('\n', '').replace(']', ']\n')) # ) return False else: return True
def smooth(self, enable=True): if len(self.states_vector) < 2: return ns = len(self.states_vector) self.smoothed_states_vector = copy.deepcopy(self.states_vector) if enable: for i in range(ns): Console.progress(ns + i, 2 * ns) sf = self.smoothed_states_vector[ns - 1 - i] s = self.states_vector[ns - 2 - i] x_prior, p_prior = s.get() x_smoothed, p_smoothed = sf.get() delta = sf.get_time() - s.get_time() f = self.compute_transfer_function(delta, x_prior) A = self.compute_transfer_function_jacobian(delta, x_prior, f) p_prior_pred = A @ p_prior @ A.T + self.process_noise_covariance * delta J = p_prior * A.T * np.linalg.inv(p_prior_pred) innovation = x_smoothed - f @ x_prior # Wrap angles of the innovation_subset for idx in range(Index.DIM): if idx == Index.ROLL or idx == Index.PITCH or idx == Index.YAW: innovation[idx] = np.arctan2(np.sin(innovation[idx]), np.cos(innovation[idx])) x_prior_smoothed = x_prior + J @ innovation p_prior_smoothed = p_prior + J @ (p_smoothed - p_prior_pred) @ J.T self.smoothed_states_vector[ns - 2 - i].set( x_prior_smoothed, p_prior_smoothed)
def __init__(self, stereo_camera_model, config, overwrite=False): self.data = [] self.sc = stereo_camera_model self.camera_name = self.sc.left.name self.config = config self.overwrite = overwrite if config.get("filter") is not None: Console.quit( "The node 'filter' is no longer used in calibration.yaml. " "'stratify' is used instead." ) detection = config.get("detection", {}) stratify = config.get("stratify", {}) ransac = config.get("ransac", {}) # uncertainty_generation = config.get("uncertainty_generation", {}) self.k = detection.get("window_size", 5) self.min_greenness_ratio = detection.get("min_greenness_ratio", 0.01) self.num_columns = detection.get("num_columns", 1024) self.remap = detection.get("remap", True) self.start_row = detection.get("start_row", 0) self.end_row = detection.get("end_row", -1) self.start_row_b = detection.get("start_row_b", 0) self.end_row_b = detection.get("end_row_b", -1) self.two_lasers = detection.get("two_lasers", False) self.min_z_m = stratify.get("min_z_m", 1) self.max_z_m = stratify.get("max_z_m", 20) self.number_of_bins = stratify.get("number_of_bins", 5) self.max_points_per_bin = stratify.get("max_bin_elements", 300) self.max_point_cloud_size = ransac.get("max_cloud_size", 10000) self.mdt = ransac.get("min_distance_threshold", 0.002) self.ssp = ransac.get("sample_size_ratio", 0.8) self.gip = ransac.get("goal_inliers_ratio", 0.999) self.max_iterations = ransac.get("max_iterations", 5000) self.left_maps = cv2.initUndistortRectifyMap( self.sc.left.K, self.sc.left.d, self.sc.left.R, self.sc.left.P, (self.sc.left.image_width, self.sc.left.image_height), cv2.CV_32FC1, ) self.right_maps = cv2.initUndistortRectifyMap( self.sc.right.K, self.sc.right.d, self.sc.right.R, self.sc.right.P, (self.sc.right.image_width, self.sc.right.image_height), cv2.CV_32FC1, ) self.inliers_1 = None self.triples = [] self.uncertainty_planes = [] self.in_front_or_behind = []
def parse(self, line): """Parses a line of the ACFR stereo pose data file Parameters ---------- line : a string that contains a line of the document The string should contain 11 items separated by spaces. According to ACFR format, the items should be: 1) Pose identifier - integer value 2) Timestamp - in seconds 3) Latitude - in degrees 4) Longitude - in degrees 5) X position (North) - in meters, relative to local nav frame 6) Y position (East) - in meters, relative to local nav frame 7) Z position (Depth) - in meters, relative to local nav frame 8) X-axis Euler angle - in radians, relative to local nav frame 9) Y-axis Euler angle - in radians, relative to local nav frame 10) Z-axis Euler angle - in radians, relative to local nav frame 11) Vehicle altitude - in meters """ parts = line.split() if len(parts) != 11: Console.error( "The line passed to ACFR stereo pose parser is malformed.") self.id = int(parts[0]) self.stamp = float(parts[1]) self.latitude = float(parts[2]) self.longitude = float(parts[3]) self.x_north = float(parts[4]) self.y_east = float(parts[5]) self.z_depth = float(parts[6]) self.x_euler_angle = math.degrees(float(parts[7])) self.y_euler_angle = math.degrees(float(parts[8])) self.z_euler_angle = math.degrees(float(parts[9])) self.altitude = float(parts[10])
def write_csv( csv_filepath: Path, data_list: Union[List[BodyVelocity], List[Orientation], List[Depth], List[Altitude], List[Usbl], List[Tide], List[Other], List[Camera], List[SyncedOrientationBodyVelocity], ], csv_filename: str, csv_flag: Optional[bool] = True, mutex: Optional[Lock] = None, ): if csv_flag is True and len(data_list) > 1: csv_file = Path(csv_filepath) if not csv_file.exists(): if mutex is not None: mutex.acquire() csv_file.mkdir(parents=True, exist_ok=True) if mutex is not None: mutex.release() Console.info("Writing outputs to {}.csv ...".format(csv_filename)) file = csv_file / "{}.csv".format(csv_filename) covariance_file = csv_file / "{}_cov.csv".format(csv_filename) fileout = None fileout_cov = None if len(data_list) > 0: fileout = file.open("w") # write headers str_to_write = data_list[0].get_csv_header() fileout.write(str_to_write) if hasattr(data_list[0], "covariance") and hasattr( data_list[0], "get_csv_header_cov"): if data_list[0].covariance is not None: fileout_cov = covariance_file.open("w") str_to_write_cov = data_list[0].get_csv_header_cov() fileout_cov.write(str_to_write_cov) # Loop for each line in csv for i in range(len(data_list)): try: str_to_write = data_list[i].to_csv_row() if fileout_cov is not None: str_to_write_cov = data_list[i].to_csv_cov_row() fileout_cov.write(str_to_write_cov) fileout.write(str_to_write) except IndexError: Console.error( "There is something wrong with camera filenames and \ indexing for the file", csv_filename, ) Console.quit("Check write_csv function.") fileout.close() if fileout_cov is not None: fileout_cov.close() Console.info("... done writing {}.csv.".format(csv_filename)) else: Console.warn("Empty data list {}".format(str(csv_filename)))
def print_report(self): Console.info("EKF measurements report:") for key in self.measurements: Console.info( "\t", key, "measurements:", self.measurements[key].dropped, "/", self.measurements[key].total, "dropped", )
def check_pattern(config): if config["pattern"] == "Circles" or config["pattern"] == "circles": pattern = Patterns.Circles elif config["pattern"] == "ACircles" or config["pattern"] == "acircles": pattern = Patterns.ACircles elif config["pattern"] == "Chessboard" or config["pattern"] == "chessboard": pattern = Patterns.Chessboard else: Console.quit( "The available patterns are: Circles, Chessboard or ACircles.", "Please check you did not misspell the pattern type.", ) return pattern
def set_loader(self, loader_name): if loader_name == "xviii": self._loader = xviii.loader self._loader_name = "xviii" elif loader_name == "default": self._loader = default.loader self._loader_name = "default" elif loader_name == "rosbag": self._loader = rosbag.loader self._loader_name = "rosbag" else: Console.quit("The loader type", loader_name, "is not implemented.") Console.info("Loader set to", loader_name)
def __call__(self, img_file): if self.bit_depth is not None: if self._loader_name == "rosbag": return self._loader( img_file, self.topic, self.bagfile_list, self.tz_offset_s, self.bit_depth, ) else: return self._loader(img_file, src_bit=self.bit_depth) else: Console.quit("Set the bit_depth in the loader first.")
def line_is_valid(self, line, line_split): start_or_heading = (line[0] == PhinsHeaders.START or line[0] == PhinsHeaders.HEADING) if len(line_split) == 2 and start_or_heading: # Get timestamp # Do a check sum as a lot of broken packets are found in phins data check_sum = str(line_split[1]) # extract from $ to * as per phins manual string_to_check = ",".join(line) string_to_check = string_to_check[1:len( string_to_check)] # noqa E203 string_sum = 0 for i in range(len(string_to_check)): string_sum ^= ord(string_to_check[i]) if str(hex(string_sum)[2:].zfill(2).upper()) == check_sum.upper(): return True else: Console.warn("Broken packet: " + str(line)) Console.warn("Check sum calculated " + str(hex(string_sum).zfill(2).upper())) Console.warn("Does not match that provided " + str(check_sum.upper())) Console.warn("Ignore and move on") return False
def call_process(args): """Perform processing on source images using correction parameters generated in parse and outputs corrected images Parameters ----------- args : parse_args object User provided arguments for path of source images """ path = Path(args.path).resolve() time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime()) Console.set_logging_file( get_processed_folder(path) / ("log/" + time_string + "_correct_images_process.log")) correct_config, camerasystem = load_configuration_and_camera_system( path, args.suffix) for camera in camerasystem.cameras: Console.info("Processing for camera", camera.name) if len(camera.image_list) == 0: Console.info( "No images found for the camera at the path provided...") continue else: corrector = Corrector(args.force, args.suffix, camera, correct_config, path) if corrector.camera_found: corrector.process() Console.info("Process completed for all cameras...")
def parse_NOC_polpred(mission, vehicle, category, ftype, outpath): # parser meta data sensor_string = "autosub" category = category output_format = ftype if category == Category.TIDE: filepath = mission.tide.filepath timezone = mission.tide.timezone timeoffset = mission.tide.timeoffset timezone_offset = read_timezone(timezone) tide = Tide(mission.tide.std_offset) tide.sensor_string = sensor_string path = get_raw_folder(outpath / ".." / filepath) file_list = get_file_list(path) data_list = [] Console.info("... parsing NOC tide data") # Data format sample # Date Time Level Speed Direc'n # m m/s deg # 6/ 9/2019 00:00 0.74 0.14 51 # 6/ 9/2019 01:00 0.58 0.15 56 for file in file_list: with file.open("r", errors="ignore") as tide_file: for line in tide_file.readlines()[6:]: # we have to skip the first 5 rows of the file dd = int(line[0:2]) mm = int(line[3:5]) yyyy = int(line[6:10]) hour = int(line[12:14]) mins = int(line[15:17]) secs = 0 # current models only provide resolution in minutes msec = 0 epoch_time = date_time_to_epoch(yyyy, mm, dd, hour, mins, secs, timezone_offset) epoch_timestamp = epoch_time + msec / 1000 + timeoffset tide.epoch_timestamp = epoch_timestamp tide.height = float(line[22:28]) tide.height_std = tide.height * tide.height_std_factor data = tide.export(output_format) data_list.append(data) return data_list
def get_camera_idx(self): idx = [ i for i, camera_config in enumerate(self.cameraconfigs) if camera_config.camera_name == self.camera_name ] if len(idx) > 0: return idx[0] else: Console.warn( "The camera", self.camera_name, "could not be found in the correct_images.yaml", ) return None
def __init__(self, filename=None): """__init__ is the constructor function Parameters ----------- filename : Path path to the correct_images.yaml file """ if filename is None: return with filename.open("r") as stream: data = yaml.safe_load(stream) self.color_correction = None self.camerarescale = None if "version" not in data: Console.error( "It seems you are using an old correct_images.yaml.", "You will have to delete it and run this software again.", ) Console.error("Delete the file with:") Console.error(" rm ", filename) Console.quit("Wrong correct_images.yaml format") self.version = data["version"] valid_methods = ["manual_balance", "colour_correction"] self.method = data["method"] if self.method not in valid_methods: Console.quit( "The method requested (", self.method, ") is not supported or implemented. The valid methods", "are:", " ".join(m for m in valid_methods), ) if "colour_correction" in data: self.color_correction = ColourCorrection(data["colour_correction"]) node = data["cameras"] self.configs = CameraConfigs(node) node = data["output_settings"] self.output_settings = OutputSettings(node) if "rescale" in data: self.camerarescale = CameraRescale(data["rescale"])
def find_navigation_csv(base, distance_path="json_renav_*", solution="ekf", camera="Cam51707923"): base_processed = get_processed_folder(base) json_list = list(base_processed.glob(distance_path)) if len(json_list) == 0: Console.quit("No navigation solution could be found at", base_processed) nav_csv_filepath = json_list[0] / ("csv/" + solution) nav_csv_filepath = nav_csv_filepath / ( "auv_" + solution + "_" + camera + ".csv") # noqa if not nav_csv_filepath.exists(): Console.quit("No navigation solution could be found at", nav_csv_filepath) return nav_csv_filepath
def laser(self): if "laser_calibration" not in self.calibration_config["cameras"][0]: Console.quit( 'There is no field "laser_calibration" for the first', "camera in the calibration.yaml", ) if "laser_calibration" not in self.calibration_config["cameras"][1]: Console.quit( 'There is no field "laser_calibration" for the second', "camera in the calibration.yaml", ) c0 = self.calibration_config["cameras"][0] c1 = self.calibration_config["cameras"][1] self.laser_imp(c1, c0) if len(self.calibration_config["cameras"]) > 2: c1 = self.calibration_config["cameras"][2] self.laser_imp(c0, c1)
def find_image_path( base, correct_images="attenuation_correction/developed_*", correction="altitude_corrected/m30_std10", ): correct_images_list = list(base.glob(correct_images)) if len(correct_images_list) == 0: Console.quit("No correct_images solution could be found at", base) image_folder = correct_images_list[0] / correction if not image_folder.exists(): Console.quit( "No correct_images solution could \ be found at", image_folder, ) return image_folder
def parse(self): # parse phins data Console.info("... parsing phins standard data") data_list = [] path = get_raw_folder(self.outpath / ".." / self.filepath / self.filename) with path.open("r", encoding="utf-8", errors="ignore") as filein: for complete_line in filein.readlines(): line_and_md5 = complete_line.strip().split("*") line = line_and_md5[0].strip().split(",") if not self.line_is_valid(line, line_and_md5): continue header = line[1] data = self.process_line(header, line) if data is not None: data_list.append(data) return data_list
def epoch_timestamp_from_phins(self, line): epoch_timestamp = None time_string = str(line[2]) if len(time_string) == 10: hour = int(time_string[0:2]) mins = int(time_string[2:4]) try: secs = int(time_string[4:6]) # phins sometimes returns 60s... if secs < 60: msec = int(time_string[7:10]) epoch_timestamp = self.get(hour, mins, secs, msec) except Exception as exc: Console.warn("Badly formatted packet (PHINS TIME): " + time_string + " Exception: " + str(exc)) else: Console.warn("Badly formatted packet (PHINS TIME): " + str(line)) return epoch_timestamp
def rosbag_topic_worker(bagfile_list, wanted_topic, data_object, data_list, output_format, output_dir): """Process a topic from a rosbag calling a method from an object Parameters ---------- bagfile_list : list list of paths to rosbags wanted_topic : str Wanted topic data_object : object Object that has the data_method implemented data_method : std Name of the method to call (e.g. data_object.data_method(msg)) data_list : list List of data output output_format : str Output format output_dir: str Output directory Returns ------- list Processed data list """ if wanted_topic is None: Console.quit("data_method for bagfile is not specified for topic", wanted_topic) for bagfile in bagfile_list: bag = rosbag.Bag(bagfile, "r") for topic, msg, _ in bag.read_messages(topics=[wanted_topic]): if topic == wanted_topic: func = getattr(data_object, "from_ros") type_str = str(type(msg)) # rosbag library does not store a clean message type, # so we need to make it ourselves from a dirty string msg_type = type_str.split(".")[1][1:-2].replace("__", "/") func(msg, msg_type, output_dir) if data_object.valid(): data = data_object.export(output_format) data_list.append(data) return data_list
def parse(self, line): """Parses a line of the ACFR stereo pose data file Parameters ---------- line : a string that contains a line of the document The string should contain 15 items separated by spaces. According to ACFR format, # noqa the items should be: 1) Pose identifier - integer value 2) Timestamp - in seconds 3) Latitude - in degrees 4) Longitude - in degrees 5) X position (North) - in meters, relative to local nav frame # noqa 6) Y position (East) - in meters, relative to local nav frame # noqa 7) Z position (Depth) - in meters, relative to local nav frame # noqa 8) X-axis Euler angle - in radians, relative to local nav frame # noqa 9) Y-axis Euler angle - in radians, relative to local nav frame # noqa 10) Z-axis Euler angle - in radians, relative to local nav frame # noqa 11) Left image name 12) Right image name 13) Vehicle altitude - in meters 14) Approx. bounding image radius - in meters 15) Likely trajectory cross-over point - 1 for true, 0 for false """ parts = line.split() if len(parts) != 15: Console.error( "The line passed to ACFR stereo pose parser is malformed.") self.id = int(parts[0]) self.stamp = float(parts[1]) self.latitude = float(parts[2]) self.longitude = float(parts[3]) self.x_north = float(parts[4]) self.y_east = float(parts[5]) self.z_depth = float(parts[6]) self.x_euler_angle = math.degrees(float(parts[7])) self.y_euler_angle = math.degrees(float(parts[8])) self.z_euler_angle = math.degrees(float(parts[9])) self.left_image_name = str(parts[10]) self.right_image_name = str(parts[11]) self.altitude = float(parts[12]) self.bounding_image_radius = float(parts[13]) self.crossover_likelihood = int(parts[14])
def load_memmap_from_numpyfilelist(filepath, numpyfilelist: list): """Generate memmaps from numpy arrays Parameters ----------- filepath : Path path to output memmap folder numpyfilelist : list list of paths to numpy files Returns -------- Path, numpy.ndarray memmap_path and memmap_handle """ image = np.load(str(numpyfilelist[0])) list_shape = [len(numpyfilelist)] list_shape = list_shape + list(image.shape) filename_map = "memmap_" + str(uuid.uuid4()) + ".map" memmap_path = Path(filepath) / filename_map memmap_handle = np.memmap( filename=memmap_path, mode="w+", shape=tuple(list_shape), dtype=np.float32, ) Console.info("Loading memmaps from numpy files...") def memmap_loader(numpyfilelist, memmap_handle, idx): memmap_handle[idx, ...] = np.load(numpyfilelist[idx]) with tqdm_joblib( tqdm(desc="numpy images to memmap", total=len(numpyfilelist))): joblib.Parallel(n_jobs=-2, verbose=0)( joblib.delayed(memmap_loader)(numpyfilelist, memmap_handle, idx) for idx in range(len(numpyfilelist))) return memmap_path, memmap_handle
def clamp_rotation(self, rotation): # rotation = (rotation % 2*math.pi) if abs(rotation) > 3 * math.pi: Console.warn( "Absolute value of angle (", rotation, ") > 3*pi. " "This is not supposed to happen. This tends to happen when " "there are no sensor readings for long periods of time, " "either because there aren't any in this span of time, or due " "to the Mahalanobis Distance threshold being exceeded " "repeatedly. Check the covariance matrices and the sensor " "uncertainties used for the EKF (probably need to bigger), " "or, as a workaround, use a larger Mahalanobis Distance " "threshold.", ) while rotation > math.pi: rotation -= 2 * math.pi while rotation < -math.pi: rotation += 2 * math.pi return rotation