def __init__(self, stereo_camera_model, config, overwrite=False):
        self.data = []

        self.sc = stereo_camera_model
        self.camera_name = self.sc.left.name
        self.config = config
        self.overwrite = overwrite

        if config.get("filter") is not None:
            Console.quit(
                "The node 'filter' is no longer used in calibration.yaml. "
                "'stratify' is used instead."
            )
        detection = config.get("detection", {})
        stratify = config.get("stratify", {})
        ransac = config.get("ransac", {})
        # uncertainty_generation = config.get("uncertainty_generation", {})

        self.k = detection.get("window_size", 5)
        self.min_greenness_ratio = detection.get("min_greenness_ratio", 0.01)
        self.num_columns = detection.get("num_columns", 1024)
        self.remap = detection.get("remap", True)
        self.start_row = detection.get("start_row", 0)
        self.end_row = detection.get("end_row", -1)
        self.start_row_b = detection.get("start_row_b", 0)
        self.end_row_b = detection.get("end_row_b", -1)
        self.two_lasers = detection.get("two_lasers", False)

        self.min_z_m = stratify.get("min_z_m", 1)
        self.max_z_m = stratify.get("max_z_m", 20)
        self.number_of_bins = stratify.get("number_of_bins", 5)
        self.max_points_per_bin = stratify.get("max_bin_elements", 300)

        self.max_point_cloud_size = ransac.get("max_cloud_size", 10000)
        self.mdt = ransac.get("min_distance_threshold", 0.002)
        self.ssp = ransac.get("sample_size_ratio", 0.8)
        self.gip = ransac.get("goal_inliers_ratio", 0.999)
        self.max_iterations = ransac.get("max_iterations", 5000)

        self.left_maps = cv2.initUndistortRectifyMap(
            self.sc.left.K,
            self.sc.left.d,
            self.sc.left.R,
            self.sc.left.P,
            (self.sc.left.image_width, self.sc.left.image_height),
            cv2.CV_32FC1,
        )
        self.right_maps = cv2.initUndistortRectifyMap(
            self.sc.right.K,
            self.sc.right.d,
            self.sc.right.R,
            self.sc.right.P,
            (self.sc.right.image_width, self.sc.right.image_height),
            cv2.CV_32FC1,
        )

        self.inliers_1 = None
        self.triples = []
        self.uncertainty_planes = []
        self.in_front_or_behind = []
def call_rescale(args):
    path = Path(args.path).resolve()

    time_string = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    Console.set_logging_file(
        get_processed_folder(path) /
        ("log/" + time_string + "_correct_images_rescale.log"))

    correct_config, camerasystem = load_configuration_and_camera_system(
        path, args.suffix)

    # install freeimage plugins if not installed
    imageio.plugins.freeimage.download()

    if correct_config.camerarescale is None:
        Console.error("Camera rescale configuration not found")
        Console.error(
            "Please populate the correct_images.yaml file with a rescale configuration"
        )
        Console.quit("Malformed correct_images.yaml file")

    # obtain parameters for rescale from correct_config
    rescale_cameras = correct_config.camerarescale.rescale_cameras

    for camera in rescale_cameras:
        corrections.rescale_camera(path, camerasystem, camera)
    Console.info("Rescaling completed for all cameras ...")
Example #3
0
    def get_altitude_and_depth_maps(self):
        """Generate distance matrix numpy files and save them"""
        # read altitude / depth map depending on distance_metric
        if self.distance_metric == "uniform":
            Console.info("Null distance matrix created")
            self.depth_map_list = []
            return
        elif self.distance_metric == "altitude":
            Console.info("Null distance matrix created")
            self.depth_map_list = []
            return
        elif self.distance_metric == "depth_map":
            path_depth = self.path_processed / "depth_map"
            if not path_depth.exists():
                Console.quit("Depth maps not found...")
            else:
                Console.info("Path to depth maps found...")
                depth_map_list = list(path_depth.glob("*.npy"))
                self.depth_map_list = [
                    Path(item)
                    for item in depth_map_list
                    for image_path in self.camera_image_list
                    if Path(image_path).stem in Path(item).stem
                ]

                if len(self.camera_image_list) != len(self.depth_map_list):
                    Console.quit(
                        "The number of images does not coincide with the ",
                        "number of depth maps.",
                    )
def parse_interlacer(outpath, filename):
    data = array("f")
    value = []
    data_ordered = []

    filepath = outpath / filename

    try:
        with filepath.open("r") as json_file:
            data = json.load(json_file)
            for i, data_packet in enumerate(data):
                value.append(str(float(data_packet["epoch_timestamp"])))

    except ValueError:
        Console.quit("Error: no data in JSON file")

    # sort data in order of epoch_timestamp
    sorted_index, sorted_items = sort_values(value)

    # store interlaced data in order of time
    for i in range(len(data)):
        data_ordered.append((data[sorted_index[i]]))

    # write out interlaced json file
    with filepath.open("w") as fileout:
        json.dump(data_ordered, fileout, indent=2)
        fileout.close()
Example #5
0
def memmap_loader(
    image_list,
    memmap_handle,
    idx,
    loader=default.loader,
    new_width=None,
    new_height=None,
):
    if image_list[idx] is None:
        Console.error("Image at idx", idx, "is None")
        Console.error(
            "Please check your navigation CSV for any missing values")
        Console.quit("Image list is malformed")
    np_im = loader(image_list[idx]).astype(np.float32)

    dimensions = np_im.shape

    if new_height is not None and new_width is not None:
        same_dimensions = (new_width == dimensions[1]) and (new_height
                                                            == dimensions[0])
        if not same_dimensions:
            im2 = cv2.resize(np_im, (new_width, new_height), cv2.INTER_CUBIC)
            memmap_handle[idx, ...] = im2
        else:
            memmap_handle[idx, ...] = np_im
    else:
        memmap_handle[idx, ...] = np_im
    def __init__(self, node):
        """__init__ is the constructor function

        Parameters
        -----------
        node : cdict
            dictionary object for an entry in correct_images.yaml file
        """
        valid_distance_metrics = ["uniform", "altitude", "depth_map"]
        self.distance_metric = node["distance_metric"]

        if self.distance_metric == "none":
            self.distance_metric = "uniform"
            Console.warn("Distance metric is set to none. This is deprecated.",
                "Please use uniform instead.")
        if self.distance_metric not in valid_distance_metrics:
            Console.error("Distance metric not valid. Please use one of the following:",
                valid_distance_metrics)
            Console.quit("Invalid distance metric: {}".format(self.distance_metric))

        self.metric_path = node["metric_path"]
        self.altitude_max = node["altitude_filter"]["max_m"]
        self.altitude_min = node["altitude_filter"]["min_m"]
        self.smoothing = node["smoothing"]
        self.window_size = node["window_size"]
        self.outlier_reject = node["curve_fitting_outlier_rejection"]
def debayer(image: np.ndarray, pattern: str) -> np.ndarray:
    """Perform debayering of input image

    Parameters
    -----------
    image : numpy.ndarray
        image data to be debayered
    pattern : string
        bayer pattern

    Returns
    -------
    numpy.ndarray
        Debayered image
    """

    # Make use of 16 bit debayering
    image16_float = image.astype(np.float32) * (2**16 - 1)
    image16 = image16_float.clip(0, 2**16 - 1).astype(np.uint16)

    corrected_rgb_img = None
    if pattern == "rggb" or pattern == "RGGB":
        corrected_rgb_img = cv2.cvtColor(image16, cv2.COLOR_BAYER_BG2RGB_EA)
    elif pattern == "grbg" or pattern == "GRBG":
        corrected_rgb_img = cv2.cvtColor(image16, cv2.COLOR_BAYER_GB2RGB_EA)
    elif pattern == "bggr" or pattern == "BGGR":
        corrected_rgb_img = cv2.cvtColor(image16, cv2.COLOR_BAYER_RG2RGB_EA)
    elif pattern == "gbrg" or pattern == "GBRG":
        corrected_rgb_img = cv2.cvtColor(image16, cv2.COLOR_BAYER_GR2RGB_EA)
    else:
        Console.quit("Bayer pattern not supported (", pattern, ")")

    # Scale down to unitary
    corrected_rgb_img = corrected_rgb_img.astype(np.float32) * (2**(-16))
    return corrected_rgb_img
Example #8
0
def write_csv(
    csv_filepath: Path,
    data_list: Union[List[BodyVelocity], List[Orientation], List[Depth],
                     List[Altitude], List[Usbl], List[Tide], List[Other],
                     List[Camera], List[SyncedOrientationBodyVelocity], ],
    csv_filename: str,
    csv_flag: Optional[bool] = True,
    mutex: Optional[Lock] = None,
):
    if csv_flag is True and len(data_list) > 1:
        csv_file = Path(csv_filepath)

        if not csv_file.exists():
            if mutex is not None:
                mutex.acquire()
            csv_file.mkdir(parents=True, exist_ok=True)
            if mutex is not None:
                mutex.release()

        Console.info("Writing outputs to {}.csv ...".format(csv_filename))
        file = csv_file / "{}.csv".format(csv_filename)
        covariance_file = csv_file / "{}_cov.csv".format(csv_filename)

        fileout = None
        fileout_cov = None
        if len(data_list) > 0:
            fileout = file.open("w")
            # write headers
            str_to_write = data_list[0].get_csv_header()
            fileout.write(str_to_write)
            if hasattr(data_list[0], "covariance") and hasattr(
                    data_list[0], "get_csv_header_cov"):
                if data_list[0].covariance is not None:
                    fileout_cov = covariance_file.open("w")
                    str_to_write_cov = data_list[0].get_csv_header_cov()
                    fileout_cov.write(str_to_write_cov)

            # Loop for each line in csv
            for i in range(len(data_list)):
                try:
                    str_to_write = data_list[i].to_csv_row()
                    if fileout_cov is not None:
                        str_to_write_cov = data_list[i].to_csv_cov_row()
                        fileout_cov.write(str_to_write_cov)
                    fileout.write(str_to_write)
                except IndexError:
                    Console.error(
                        "There is something wrong with camera filenames and \
                        indexing for the file",
                        csv_filename,
                    )
                    Console.quit("Check write_csv function.")
            fileout.close()
            if fileout_cov is not None:
                fileout_cov.close()
            Console.info("... done writing {}.csv.".format(csv_filename))
        else:
            Console.warn("Empty data list {}".format(str(csv_filename)))
Example #9
0
 def set_loader(self, loader_name):
     if loader_name == "xviii":
         self._loader = xviii.loader
         self._loader_name = "xviii"
     elif loader_name == "default":
         self._loader = default.loader
         self._loader_name = "default"
     elif loader_name == "rosbag":
         self._loader = rosbag.loader
         self._loader_name = "rosbag"
     else:
         Console.quit("The loader type", loader_name, "is not implemented.")
     Console.info("Loader set to", loader_name)
def check_pattern(config):
    if config["pattern"] == "Circles" or config["pattern"] == "circles":
        pattern = Patterns.Circles
    elif config["pattern"] == "ACircles" or config["pattern"] == "acircles":
        pattern = Patterns.ACircles
    elif config["pattern"] == "Chessboard" or config["pattern"] == "chessboard":
        pattern = Patterns.Chessboard
    else:
        Console.quit(
            "The available patterns are: Circles, Chessboard or ACircles.",
            "Please check you did not misspell the pattern type.",
        )
    return pattern
Example #11
0
 def __call__(self, img_file):
     if self.bit_depth is not None:
         if self._loader_name == "rosbag":
             return self._loader(
                 img_file,
                 self.topic,
                 self.bagfile_list,
                 self.tz_offset_s,
                 self.bit_depth,
             )
         else:
             return self._loader(img_file, src_bit=self.bit_depth)
     else:
         Console.quit("Set the bit_depth in the loader first.")
Example #12
0
    def __init__(self, filename=None):

        """__init__ is the constructor function

        Parameters
        -----------
        filename : Path
            path to the correct_images.yaml file
        """

        if filename is None:
            return
        with filename.open("r") as stream:
            data = yaml.safe_load(stream)

        self.color_correction = None
        self.camerarescale = None

        if "version" not in data:
            Console.error(
                "It seems you are using an old correct_images.yaml.",
                "You will have to delete it and run this software again.",
            )
            Console.error("Delete the file with:")
            Console.error("    rm ", filename)
            Console.quit("Wrong correct_images.yaml format")
        self.version = data["version"]

        valid_methods = ["manual_balance", "colour_correction"]
        self.method = data["method"]

        if self.method not in valid_methods:
            Console.quit(
                "The method requested (",
                self.method,
                ") is not supported or implemented. The valid methods",
                "are:",
                " ".join(m for m in valid_methods),
            )

        if "colour_correction" in data:
            self.color_correction = ColourCorrection(data["colour_correction"])
        node = data["cameras"]
        self.configs = CameraConfigs(node)
        node = data["output_settings"]
        self.output_settings = OutputSettings(node)
        if "rescale" in data:
            self.camerarescale = CameraRescale(data["rescale"])
Example #13
0
def find_navigation_csv(base,
                        distance_path="json_renav_*",
                        solution="ekf",
                        camera="Cam51707923"):
    base_processed = get_processed_folder(base)
    json_list = list(base_processed.glob(distance_path))
    if len(json_list) == 0:
        Console.quit("No navigation solution could be found at",
                     base_processed)
    nav_csv_filepath = json_list[0] / ("csv/" + solution)
    nav_csv_filepath = nav_csv_filepath / (
        "auv_" + solution + "_" + camera + ".csv")  # noqa
    if not nav_csv_filepath.exists():
        Console.quit("No navigation solution could be found at",
                     nav_csv_filepath)
    return nav_csv_filepath
def calibrate_mono(
    name,
    filepaths,
    extension,
    config,
    output_file,
    fo,
    foa,
    target_width=None,
    target_height=None,
):
    if not check_dirs_exist(filepaths):
        filepaths = get_raw_folders(filepaths)
    Console.info("Looking for {} calibration images in {}".format(
        extension, str(filepaths)))
    image_list = collect_image_files(filepaths, extension)
    Console.info("Found " + str(len(image_list)) + " images.")
    if len(image_list) < 8:
        Console.quit("Too few images. Try to get more.")

    mc = MonoCalibrator(
        boards=[
            ChessboardInfo(config["rows"], config["cols"], config["size"])
        ],
        pattern=check_pattern(config),
        invert=config["invert"],
        name=name,
        target_width=target_width,
        target_height=target_height,
    )
    try:
        image_list_file = output_file.with_suffix(".json")
        if foa or not image_list_file.exists():
            mc.cal(image_list)
            with image_list_file.open("w") as f:
                Console.info("Writing JSON to "
                             "'{}'"
                             "".format(image_list_file))
                json.dump(mc.json, f)
        else:
            mc.cal_from_json(image_list_file, image_list)
        mc.report()
        Console.info("Writing calibration to " "'{}'" "".format(output_file))
        with output_file.open("w") as f:
            f.write(mc.yaml())
    except CalibrationException:
        Console.error("The calibration pattern was not found in the images.")
Example #15
0
def merge_json_files(json_file_list):
    # Check that all origins are the same
    origin_lat = None
    origin_lon = None
    data_list = []
    for fn in json_file_list:
        filepath = Path(fn)
        data = []
        with filepath.open("r") as json_file:
            data = json.load(json_file)
        # Origins are by default at the top of the json file
        lat = data[0]["data"][0]["latitude"]
        lon = data[0]["data"][0]["longitude"]
        if origin_lat is None:
            origin_lat = lat
            origin_lon = lon
            data_list.append(data[0])
        elif origin_lat != lat or origin_lon != lon:
            Console.error(
                "The datasets you want to merge do not belong to the same",
                "origin.",
            )
            Console.error(
                "Change the origins to be identical and parse them again.")
            Console.quit("Invalid origins for merging datasets.")

        # Get dive name
        # json_file_list =   .../DIVE_NAME/nav/nav_standard.json
        dive_prefix = filepath.parents[1].name + "/"

        # Preceed all filenames with the dive name
        # And skip the origin
        for item in data[1:]:
            if "camera1" in item:
                item["camera1"][0]["filename"] = (
                    dive_prefix + item["camera1"][0]["filename"])
            if "camera2" in item:
                item["camera2"][0]["filename"] = (
                    dive_prefix + item["camera2"][0]["filename"])
            if "camera3" in item:
                item["camera3"][0]["filename"] = (
                    dive_prefix + item["camera3"][0]["filename"])
            if item["category"] == "laser":
                item["filename"] = dive_prefix + item["filename"]
            data_list.append(item)

    return data_list
 def laser(self):
     if "laser_calibration" not in self.calibration_config["cameras"][0]:
         Console.quit(
             'There is no field "laser_calibration" for the first',
             "camera in the calibration.yaml",
         )
     if "laser_calibration" not in self.calibration_config["cameras"][1]:
         Console.quit(
             'There is no field "laser_calibration" for the second',
             "camera in the calibration.yaml",
         )
     c0 = self.calibration_config["cameras"][0]
     c1 = self.calibration_config["cameras"][1]
     self.laser_imp(c1, c0)
     if len(self.calibration_config["cameras"]) > 2:
         c1 = self.calibration_config["cameras"][2]
         self.laser_imp(c0, c1)
Example #17
0
def find_image_path(
    base,
    correct_images="attenuation_correction/developed_*",
    correction="altitude_corrected/m30_std10",
):
    correct_images_list = list(base.glob(correct_images))
    if len(correct_images_list) == 0:
        Console.quit("No correct_images solution could be found at", base)

    image_folder = correct_images_list[0] / correction
    if not image_folder.exists():
        Console.quit(
            "No correct_images solution could \
                     be found at",
            image_folder,
        )
    return image_folder
def rosbag_topic_worker(bagfile_list, wanted_topic, data_object, data_list,
                        output_format, output_dir):
    """Process a topic from a rosbag calling a method from an object

    Parameters
    ----------
    bagfile_list : list
        list of paths to rosbags
    wanted_topic : str
        Wanted topic
    data_object : object
        Object that has the data_method implemented
    data_method : std
        Name of the method to call (e.g. data_object.data_method(msg))
    data_list : list
        List of data output
    output_format : str
        Output format
    output_dir: str
        Output directory

    Returns
    -------
    list
        Processed data list
    """
    if wanted_topic is None:
        Console.quit("data_method for bagfile is not specified for topic",
                     wanted_topic)
    for bagfile in bagfile_list:
        bag = rosbag.Bag(bagfile, "r")
        for topic, msg, _ in bag.read_messages(topics=[wanted_topic]):
            if topic == wanted_topic:
                func = getattr(data_object, "from_ros")
                type_str = str(type(msg))
                # rosbag library does not store a clean message type,
                # so we need to make it ourselves from a dirty string
                msg_type = type_str.split(".")[1][1:-2].replace("__", "/")
                func(msg, msg_type, output_dir)
                if data_object.valid():
                    data = data_object.export(output_format)
                    data_list.append(data)
    return data_list
    def __init__(self, filepath, force_overwite=False, overwrite_all=False):
        filepath = Path(filepath).resolve()
        self.filepath = get_raw_folder(filepath)
        self.fo = force_overwite
        self.foa = overwrite_all

        if self.foa:
            self.fo = True

        self.configuration_path = (get_config_folder(self.filepath.parent) /
                                   "calibration")
        calibration_config_file = self.configuration_path / "calibration.yaml"
        if calibration_config_file.exists():
            Console.info("Loading existing calibration.yaml at {}".format(
                calibration_config_file))
        else:
            root = Path(__file__).parents[1]
            default_file = root / "auv_cal/default_yaml" / "default_calibration.yaml"
            Console.warn("Cannot find {}, generating default from {}".format(
                calibration_config_file, default_file))
            # save localisation yaml to processed directory
            default_file.copy(calibration_config_file)

            Console.warn("Edit the file at: \n\t" +
                         str(calibration_config_file))
            Console.warn(
                "Try to use relative paths to the calibration datasets")
            Console.quit(
                "Modify the file calibration.yaml and run this code again.")

        with calibration_config_file.open("r") as stream:
            self.calibration_config = yaml.safe_load(stream)

        # Create the calibration folder at the same level as the dives
        self.output_path = get_processed_folder(
            self.filepath.parent) / "calibration"
        if not self.output_path.exists():
            self.output_path.mkdir(parents=True)
        if not self.configuration_path.exists():
            self.configuration_path.mkdir(parents=True)
    def mono(self):
        for c in self.calibration_config["cameras"]:
            cam_name = c["name"]
            # Find if the calibration file exists
            calibration_file = self.output_path / str("mono_" + cam_name +
                                                      ".yaml")
            Console.info("Looking for a calibration file at " +
                         str(calibration_file))
            if calibration_file.exists() and not self.fo:
                Console.warn(
                    "The camera " + c["name"] +
                    " has already been calibrated. If you want to overwrite " +
                    "the JSON, use the -F flag.")
            else:
                Console.info(
                    "The camera is not calibrated, running mono calibration..."
                )
                filepaths = build_filepath(
                    get_processed_folder(self.filepath),
                    c["camera_calibration"]["path"],
                )

                if "glob_pattern" not in c["camera_calibration"]:
                    Console.error(
                        "Could not find the key glob_pattern for the camera ",
                        c["name"],
                    )
                    Console.quit("glob_pattern expected in calibration.yaml")

                calibrate_mono(
                    cam_name,
                    filepaths,
                    str(c["camera_calibration"]["glob_pattern"]),
                    self.calibration_config["camera_calibration"],
                    calibration_file,
                    self.fo,
                    self.foa,
                )
Example #21
0
def parse_single(filepath, force_overwrite):
    # initiate data and processing flags
    filepath = Path(filepath).resolve()
    filepath = get_raw_folder(filepath)

    if not force_overwrite:
        existing_files = check_output_files_exist(
            get_processed_folder(filepath))
        if existing_files:
            msg = ("It looks like this dataset has already been parsed.\n" +
                   "The following file(s) already exist:\n" + existing_files +
                   "If you would like auv_nav to overwrite existing file," +
                   " rerun it with the flag -F.\n" +
                   "Example:   auv_nav parse -F PATH")
            Console.warn(msg)
            Console.warn("Dataset skipped")
            return

    ftype = "oplab"

    # load mission.yaml config file

    mission_file = filepath / "mission.yaml"
    vehicle_file = filepath / "vehicle.yaml"
    mission_file = get_raw_folder(mission_file)
    vehicle_file = get_raw_folder(vehicle_file)
    Console.info("Loading mission.yaml at", mission_file)
    mission = Mission(mission_file)

    Console.info("Loading vehicle.yaml at", vehicle_file)
    vehicle = Vehicle(vehicle_file)

    # copy mission.yaml and vehicle.yaml to processed folder for process step
    mission_processed = get_processed_folder(mission_file)
    vehicle_processed = get_processed_folder(vehicle_file)

    # Write mission with metadata (username, date and hostname)
    mission.write(mission_processed)
    # mission_file.copy(mission_processed)
    vehicle.write(vehicle_processed)

    # check for recognised formats and create nav file
    outpath = get_processed_folder(filepath)
    outpath = outpath / "nav"
    filename = "nav_standard.json"

    # make file path if not exist
    if not outpath.is_dir():
        try:
            outpath.mkdir()
        except Exception as e:
            print("Warning:", e)

    Console.info("Loading raw data...")

    if multiprocessing.cpu_count() < 4:
        cpu_to_use = 1
    else:
        cpu_to_use = multiprocessing.cpu_count() - 2

    try:
        pool = multiprocessing.Pool(cpu_to_use)
    except AttributeError as e:
        print(
            "Error: ",
            e,
            "\n===============\nThis error is known to \
                happen when running the code more than once from the same \
                console in Spyder. Please run the code from a new console \
                to prevent this error from happening. You may close the \
                current console.\n==============",
        )
    pool_list = []

    # read in, parse data and write data
    if not mission.image.empty():
        if mission.image.format == "acfr_standard" or mission.image.format == "unagi":
            pool_list.append(
                pool.apply_async(
                    parse_acfr_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "seaxerocks_3":
            pool_list.append(
                pool.apply_async(
                    parse_seaxerocks_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif (mission.image.format == "biocam"
              or mission.image.format == "biocam4000_15c"):
            pool_list.append(
                pool.apply_async(
                    parse_biocam_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "ntnu_stereo":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_stereo_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "rosbag_extracted_images":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag_extracted_images,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        elif mission.image.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "images", ftype, outpath],
                ))
        else:
            Console.quit("Mission image format", mission.image.format,
                         "not supported.")
    if not mission.usbl.empty():
        if mission.usbl.format == "gaps":
            pool_list.append(
                pool.apply_async(parse_gaps,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "usbl_dump":
            pool_list.append(
                pool.apply_async(parse_usbl_dump,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "NOC_nmea":
            pool_list.append(
                pool.apply_async(parse_NOC_nmea,
                                 [mission, vehicle, "usbl", ftype, outpath]))
        elif mission.usbl.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "usbl", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "usbl", ftype, outpath],
                ))
        else:
            Console.quit("Mission usbl format", mission.usbl.format,
                         "not supported.")

    if not mission.velocity.empty():
        if mission.velocity.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "velocity", ftype, outpath]))
        elif mission.velocity.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.velocity.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi, [mission, vehicle, "velocity", ftype, outpath]))
        elif mission.velocity.format == "ntnu_dvl":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_dvl,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "velocity", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission velocity format",
                mission.velocity.format,
                "not supported.",
            )

    if not mission.orientation.empty():
        if mission.orientation.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.orientation.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "orientation", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission orientation format",
                mission.orientation.format,
                "not supported.",
            )

    if not mission.depth.empty():
        if mission.depth.format == "phins":
            pool_list.append(
                pool.apply_async(parse_phins,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "ae2000":
            pool_list.append(
                pool.apply_async(parse_ae2000,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "alr":
            pool_list.append(
                pool.apply_async(parse_alr,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "autosub":
            pool_list.append(
                pool.apply_async(parse_autosub,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "gaps":
            pool_list.append(
                pool.apply_async(parse_gaps,
                                 [mission, vehicle, "depth", ftype, outpath]))
        elif mission.depth.format == "eiva_navipac":
            pool_list.append(
                pool.apply_async(
                    parse_eiva_navipac,
                    [mission, vehicle, "depth", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "depth", ftype, outpath],
                ))
        else:
            Console.quit("Mission depth format", mission.depth.format,
                         "not supported.")

    if not mission.altitude.empty():
        if mission.altitude.format == "phins":
            pool_list.append(
                pool.apply_async(
                    parse_phins,
                    [mission, vehicle, "altitude", ftype, outpath]))
        elif mission.altitude.format == "ae2000":
            pool_list.append(
                pool.apply_async(
                    parse_ae2000,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "alr":
            pool_list.append(
                pool.apply_async(
                    parse_alr,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "autosub":
            pool_list.append(
                pool.apply_async(
                    parse_autosub,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.altitude.format == "rdi":
            pool_list.append(
                pool.apply_async(
                    parse_rdi, [mission, vehicle, "altitude", ftype, outpath]))
        elif mission.altitude.format == "ntnu_dvl":
            pool_list.append(
                pool.apply_async(
                    parse_ntnu_dvl,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        elif mission.usbl.format == "rosbag":
            pool_list.append(
                pool.apply_async(
                    parse_rosbag,
                    [mission, vehicle, "altitude", ftype, outpath],
                ))
        else:
            Console.quit(
                "Mission altitude format",
                mission.altitude.format,
                "not supported.",
            )

    if not mission.tide.empty():
        if mission.tide.format == "NOC_polpred":
            tide_list = parse_NOC_polpred(mission, vehicle, "tide", ftype,
                                          outpath)
        else:
            Console.quit("Mission tide format", mission.tide.format,
                         "not supported.")
    else:
        tide_list = None

    pool.close()
    pool.join()

    Console.info("...done loading raw data.")
    Console.info("Compile data list...")

    data_list = [[{
        "epoch_timestamp":
        0.0,
        "class":
        "origin",
        "category":
        "origin",
        "data": [{
            "latitude": mission.origin.latitude,
            "longitude": mission.origin.longitude,
            "crs": mission.origin.crs,
            "date": mission.origin.date,
        }],
    }]]

    # Set advance True/False flag so not comparing for every i in pool_list
    if mission.image.format == "biocam":
        correcting_timestamps = True
    else:
        correcting_timestamps = False
    for i in pool_list:
        results = i.get()
        # If current retrieved data is DEPTH
        # and if TIDE data is available
        if len(results) < 1:
            continue
        if results[0] is None:
            Console.warn(
                "Some results are empty. Please check whether this is correct or not"
            )
            continue
        if correcting_timestamps:
            if results[0]["category"] == "image":
                Console.info("Correction of BioCam cpu timestamps...")
                results = correct_timestamps(results)
        if (results[0]["category"] == Category.DEPTH
                or results[0]["category"] == Category.USBL):
            if not mission.tide.empty():
                # proceed to tidal correction
                Console.info("Tidal correction of depth vector...")
                # Offset depth to acknowledge for tides
                j = 0
                for k in range(len(results)):
                    while (j < len(tide_list)
                           and tide_list[j]["epoch_timestamp"] <
                           results[k]["epoch_timestamp"]):
                        j = j + 1

                    if j >= 1:
                        _result = interpolate(
                            results[k]["epoch_timestamp"],
                            tide_list[j - 1]["epoch_timestamp"],
                            tide_list[j]["epoch_timestamp"],
                            tide_list[j - 1]["data"][0]["height"],
                            tide_list[j]["data"][0]["height"],
                        )
                        if results[0]["category"] == Category.DEPTH:
                            results[k]["data"][0]["depth"] = (
                                results[k]["data"][0]["depth"] - _result)
                        elif results[0]["category"] == Category.USBL:
                            results[k]["data_target"][4]["depth"] = (
                                results[k]["data_target"][4]["depth"] -
                                _result)
        data_list.append(results)

    Console.info("...done compiling data list.")

    Console.info("Writing to output file...")
    data_list_temp = []
    for i in data_list:
        data_list_temp += i

    # create file (overwrite if exists)
    nav_file = outpath / filename
    with nav_file.open("w") as fileout:
        json.dump(data_list_temp, fileout, indent=2)
    fileout.close()
    Console.info("...done writing to output file.")

    del data_list_temp
    del data_list

    # interlace the data based on timestamps
    Console.info("Interlacing data...")
    parse_interlacer(outpath, filename)
    Console.info("...done interlacing data. Output saved to",
                 outpath / filename)
    plot_parse_data(outpath, ftype)
    Console.info("Complete parse data")
Example #22
0
def acfr_to_oplab(args):
    csv_filepath = Path(args.output_folder)
    force_overwite = args.force
    if not args.vehicle_pose and not args.stereo_pose:
        Console.error("Please provide at least one file")
        Console.quit("Missing comandline arguments")
    if args.vehicle_pose:
        Console.info("Vehicle pose provided! Converting it to DR CSV...")
        vpp = AcfrVehiclePoseParser(args.vehicle_pose)
        dr = vpp.get_dead_reckoning()
        csv_filename = "auv_acfr_centre.csv"

        if (csv_filepath / csv_filename).exists() and not force_overwite:
            Console.error(
                "The DR file already exists. Use -F to force overwrite it")
            Console.quit("Default behaviour: not to overwrite results")
        else:
            write_csv(csv_filepath, dr, csv_filename, csv_flag=True)
            Console.info("Output vehicle pose written to",
                         csv_filepath / csv_filename)
    if args.stereo_pose:
        Console.info("Stereo pose provided! Converting it to camera CSVs...")
        spp = AcfrStereoPoseParser(args.stereo_pose)
        cam1, cam2 = spp.get_cameras()
        if (csv_filepath / "auv_dr_LC.csv").exists() and not force_overwite:
            Console.error(
                "The camera files already exists. Use -F to force overwrite it"
            )
            Console.quit("Default behaviour: not to overwrite results")
        else:
            write_csv(csv_filepath, cam1, "auv_acfr_LC", csv_flag=True)
            write_csv(csv_filepath, cam2, "auv_acfr_RC", csv_flag=True)
            Console.info("Output camera files written at", csv_filepath)

        if args.dive_folder is None:
            return

        # If the user provides a dive path, we can interpolate the ACFR navigation
        # to the laser timestamps
        path_processed = get_processed_folder(args.dive_folder)
        nav_standard_file = path_processed / "nav" / "nav_standard.json"
        nav_standard_file = get_processed_folder(nav_standard_file)
        Console.info("Loading json file {}".format(nav_standard_file))

        start_datetime = ""
        finish_datetime = ""

        file3 = csv_filepath / "auv_acfr_laser.csv"
        if file3.exists() and not force_overwite:
            Console.error(
                "The laser file already exists. Use -F to force overwrite it")
            Console.quit("Default behaviour: not to overwrite results")

        with nav_standard_file.open("r") as nav_standard:
            parsed_json_data = json.load(nav_standard)

            if start_datetime == "":
                epoch_start_time = epoch_from_json(parsed_json_data[1])
                start_datetime = epoch_to_datetime(epoch_start_time)
            else:
                epoch_start_time = string_to_epoch(start_datetime)
            if finish_datetime == "":
                epoch_finish_time = epoch_from_json(parsed_json_data[-1])
                finish_datetime = epoch_to_datetime(epoch_finish_time)
            else:
                epoch_finish_time = string_to_epoch(finish_datetime)

            Console.info("Interpolating laser to ACFR stereo pose data...")
            fileout3 = file3.open(
                "w"
            )  # ToDo: Check if file exists and only overwrite if told ('-F')
            fileout3.write(cam1[0].get_csv_header())
            for i in range(len(parsed_json_data)):
                Console.progress(i, len(parsed_json_data))
                epoch_timestamp = parsed_json_data[i]["epoch_timestamp"]
                if (epoch_timestamp >= epoch_start_time
                        and epoch_timestamp <= epoch_finish_time):
                    if "laser" in parsed_json_data[i]["category"]:
                        filename = parsed_json_data[i]["filename"]
                        c3_interp = interpolate_camera(epoch_timestamp, cam1,
                                                       filename)
                        fileout3.write(c3_interp.to_csv_row())
            Console.info("Done! Laser file available at", str(file3))
Example #23
0
def hybis_to_oplab(args):
    if args.navigation_file is None:
        Console.error("Please provide a navigation file")
        Console.quit("Missing comandline arguments")
    if args.image_path is None:
        Console.error("Please provide an image path")
        Console.quit("Missing comandline arguments")
    if args.reference_lat is None:
        Console.error("Please provide a reference_lat")
        Console.quit("Missing comandline arguments")
    if args.reference_lon is None:
        Console.error("Please provide a reference_lon")
        Console.quit("Missing comandline arguments")
    if args.output_folder is None:
        Console.error("Please provide an output path")
        Console.quit("Missing comandline arguments")
    parser = HybisParser(
        args.navigation_file,
        args.image_path,
        args.reference_lat,
        args.reference_lon,
    )
    force_overwite = args.force
    filepath = get_processed_folder(args.output_folder)
    start_datetime = epoch_to_datetime(parser.start_epoch)
    finish_datetime = epoch_to_datetime(parser.finish_epoch)

    # make path for processed outputs
    json_filename = ("json_renav_" + start_datetime[0:8] + "_" +
                     start_datetime[8:14] + "_" + finish_datetime[0:8] + "_" +
                     finish_datetime[8:14])
    renavpath = filepath / json_filename
    if not renavpath.is_dir():
        try:
            renavpath.mkdir()
        except Exception as e:
            print("Warning:", e)
    elif renavpath.is_dir() and not force_overwite:
        # Check if dataset has already been processed
        Console.error(
            "It looks like this dataset has already been processed for the",
            "specified time span.",
        )
        Console.error(
            "The following directory already exist: {}".format(renavpath))
        Console.error(
            "To overwrite the contents of this directory rerun auv_nav with",
            "the flag -F.",
        )
        Console.error("Example:   auv_nav process -F PATH")
        Console.quit("auv_nav process would overwrite json_renav files")

    output_dr_centre_path = renavpath / "csv" / "dead_reckoning"
    if not output_dr_centre_path.exists():
        output_dr_centre_path.mkdir(parents=True)
    camera_name = "hybis_camera"
    output_dr_centre_path = output_dr_centre_path / ("auv_dr_" + camera_name +
                                                     ".csv")
    parser.write(output_dr_centre_path)
    parser.write(output_dr_centre_path)
    Console.info("Output written to", output_dr_centre_path)
Example #24
0
def oplab_to_acfr(args):
    if not args.dive_folder:
        Console.error("No dive folder provided. Exiting...")
        Console.quit("Missing comandline arguments")
    if not args.output_folder:
        Console.error("No output folder provided. Exiting...")
        Console.quit("Missing comandline arguments")

    output_folder = get_processed_folder(args.output_folder)
    vf = output_folder / "vehicle_pose_est.data"
    sf = output_folder / "stereo_pose_est.data"
    if (vf.exists() or sf.exists()) and not args.force:
        Console.error(
            "Output folder already exists. Use -F to overwrite. Exiting...")
        Console.quit("Default behaviour: not to overwrite results")

    Console.info("Loading mission.yaml")
    path_processed = get_processed_folder(args.dive_folder)
    mission_file = path_processed / "mission.yaml"
    mission = Mission(mission_file)

    vehicle_file = path_processed / "vehicle.yaml"
    vehicle = Vehicle(vehicle_file)

    origin_lat = mission.origin.latitude
    origin_lon = mission.origin.longitude

    json_list = list(path_processed.glob("json_*"))
    if len(json_list) == 0:
        Console.quit(
            "No navigation solution could be found. Please run ",
            "auv_nav parse and process first",
        )
    navigation_path = path_processed / json_list[0]
    # Try if ekf exists:
    full_navigation_path = navigation_path / "csv" / "ekf"

    vpw = AcfrVehiclePoseWriter(vf, origin_lat, origin_lon)
    vehicle_navigation_file = full_navigation_path / "auv_ekf_centre.csv"
    dataframe = pd.read_csv(vehicle_navigation_file)
    dr_list = []
    for _, row in dataframe.iterrows():
        sb = SyncedOrientationBodyVelocity()
        sb.from_df(row)
        dr_list.append(sb)
    vpw.write(dr_list)
    Console.info("Vehicle pose written to", vf)

    if not mission.image.cameras:
        Console.warn("No cameras found in the mission file.")
        return
    if len(mission.image.cameras) == 1:
        Console.error("Only one camera found in the mission file. Exiting...")
        Console.quit("ACFR stereo pose est requires two cameras.")

    camera0_name = mission.image.cameras[0].name
    camera1_name = mission.image.cameras[1].name

    camera0_nav_file = full_navigation_path / ("auv_ekf_" + camera0_name +
                                               ".csv")
    dataframe = pd.read_csv(camera0_nav_file)
    cam1_list = []
    for _, row in dataframe.iterrows():
        sb = Camera()
        sb.from_df(row)
        cam1_list.append(sb)

    camera1_nav_file = full_navigation_path / ("auv_ekf_" + camera1_name +
                                               ".csv")

    dataframe = pd.read_csv(camera1_nav_file)
    cam2_list = []
    for _, row in dataframe.iterrows():
        sb = Camera()
        sb.from_df(row)
        cam2_list.append(sb)

    spw = AcfrStereoPoseWriter(sf, origin_lat, origin_lon)

    spw.write(cam1_list, cam2_list)
    Console.info("Stereo pose written to", sf)

    Console.info("Generating combined.RAW")
    nav_standard_file = path_processed / "nav" / "nav_standard.json"
    nav_standard_file = get_processed_folder(nav_standard_file)
    Console.info("Loading json file {}".format(nav_standard_file))

    with nav_standard_file.open("r") as nav_standard:
        parsed_json_data = json.load(nav_standard)

    start_datetime = ""
    finish_datetime = ""

    # setup start and finish date time
    if start_datetime == "":
        epoch_start_time = epoch_from_json(parsed_json_data[1])
        start_datetime = epoch_to_datetime(epoch_start_time)
    else:
        epoch_start_time = string_to_epoch(start_datetime)
    if finish_datetime == "":
        epoch_finish_time = epoch_from_json(parsed_json_data[-1])
        finish_datetime = epoch_to_datetime(epoch_finish_time)
    else:
        epoch_finish_time = string_to_epoch(finish_datetime)

    sensors_std = {
        "usbl": {
            "model": "sensor"
        },
        "dvl": {
            "model": "sensor"
        },
        "depth": {
            "model": "sensor"
        },
        "orientation": {
            "model": "sensor"
        },
    }

    exporter = AcfrCombinedRawWriter(mission, vehicle, output_folder)

    # read in data from json file
    # i here is the number of the data packet
    for i in range(len(parsed_json_data)):
        Console.progress(i, len(parsed_json_data))
        epoch_timestamp = parsed_json_data[i]["epoch_timestamp"]
        if epoch_timestamp >= epoch_start_time and epoch_timestamp <= epoch_finish_time:
            if "velocity" in parsed_json_data[i]["category"]:
                if "body" in parsed_json_data[i]["frame"]:
                    # to check for corrupted data point which have inertial
                    # frame data values
                    if "epoch_timestamp_dvl" in parsed_json_data[i]:
                        # confirm time stamps of dvl are aligned with main
                        # clock (within a second)
                        if (abs(parsed_json_data[i]["epoch_timestamp"] -
                                parsed_json_data[i]["epoch_timestamp_dvl"])
                            ) < 1.0:
                            velocity_body = BodyVelocity()
                            velocity_body.from_json(parsed_json_data[i],
                                                    sensors_std["dvl"])
                            exporter.add(velocity_body)
                if "inertial" in parsed_json_data[i]["frame"]:
                    velocity_inertial = InertialVelocity()
                    velocity_inertial.from_json(parsed_json_data[i])
                    exporter.add(velocity_inertial)

            if "orientation" in parsed_json_data[i]["category"]:
                orientation = Orientation()
                orientation.from_json(parsed_json_data[i],
                                      sensors_std["orientation"])
                exporter.add(orientation)

            if "depth" in parsed_json_data[i]["category"]:
                depth = Depth()
                depth.from_json(parsed_json_data[i], sensors_std["depth"])
                exporter.add(depth)

            if "altitude" in parsed_json_data[i]["category"]:
                altitude = Altitude()
                altitude.from_json(parsed_json_data[i])
                exporter.add(altitude)

            if "usbl" in parsed_json_data[i]["category"]:
                usbl = Usbl()
                usbl.from_json(parsed_json_data[i], sensors_std["usbl"])
                exporter.add(usbl)

            if "image" in parsed_json_data[i]["category"]:
                camera1 = Camera()
                # LC
                camera1.from_json(parsed_json_data[i], "camera1")
                exporter.add(camera1)
                camera2 = Camera()
                camera2.from_json(parsed_json_data[i], "camera2")
                exporter.add(camera2)
Example #25
0
    def generate_attenuation_correction_parameters(self):
        """Generates image stats and attenuation coefficients and saves the
        parameters for process"""

        if len(self.altitude_list) < 3:
            Console.quit(
                "Insufficient number of images to compute attenuation ",
                "parameters...",
            )

        # create empty matrices to store image correction parameters
        self.image_raw_mean = np.empty(
            (self.image_channels, self.image_height, self.image_width),
            dtype=np.float32,
        )
        self.image_raw_std = np.empty(
            (self.image_channels, self.image_height, self.image_width),
            dtype=np.float32,
        )
        self.image_attenuation_parameters = np.empty(
            (self.image_channels, self.image_height, self.image_width, 3),
            dtype=np.float32,
        )
        self.image_corrected_mean = np.empty(
            (self.image_channels, self.image_height, self.image_width),
            dtype=np.float32,
        )
        self.image_corrected_std = np.empty(
            (self.image_channels, self.image_height, self.image_width),
            dtype=np.float32,
        )
        self.correction_gains = np.empty(
            (self.image_channels, self.image_height, self.image_width),
            dtype=np.float32,
        )

        image_size_gb = (
            self.image_channels
            * self.image_height
            * self.image_width
            * 4.0
            / (1024.0 ** 3)
        )
        max_bin_size_gb = 50.0
        max_bin_size = int(max_bin_size_gb / image_size_gb)

        self.bin_band = 0.1
        hist_bins = np.arange(self.altitude_min, self.altitude_max, self.bin_band)
        # Watch out: need to substract 1 to get the correct number of bins
        # because the last bin is not included in the range

        images_fn, images_map = open_memmap(
            shape=(
                len(hist_bins) - 1,
                self.image_height * self.image_width,
                self.image_channels,
            ),
            dtype=np.float32,
        )

        distances_fn, distances_map = open_memmap(
            shape=(len(hist_bins) - 1, self.image_height * self.image_width),
            dtype=np.float32,
        )

        distance_vector = None

        if self.depth_map_list and self.distance_metric == "depth_map":
            Console.info("Computing depth map histogram with", hist_bins.size, " bins")

            distance_vector = np.zeros((len(self.depth_map_list), 1))
            for i, dm_file in enumerate(self.depth_map_list):
                dm_np = depth_map.loader(dm_file, self.image_width, self.image_height)
                distance_vector[i] = dm_np.mean(axis=1)

        elif self.altitude_list and self.distance_metric == "altitude":
            Console.info("Computing altitude histogram with", hist_bins.size, " bins")
            distance_vector = np.array(self.altitude_list)

        if distance_vector is not None:
            idxs = np.digitize(distance_vector, hist_bins) - 1

            # Display histogram in console
            for idx_bin in range(hist_bins.size - 1):
                tmp_idxs = np.where(idxs == idx_bin)[0]
                Console.info(
                    "  Bin",
                    format(idx_bin, "02d"),
                    "(",
                    round(hist_bins[idx_bin], 1),
                    "m < x <",
                    round(hist_bins[idx_bin + 1], 1),
                    "m):",
                    len(tmp_idxs),
                    "images",
                )

            with tqdm_joblib(
                tqdm(desc="Computing altitude histogram", total=hist_bins.size - 1,)
            ):
                joblib.Parallel(n_jobs=-2, verbose=0)(
                    joblib.delayed(self.compute_distance_bin)(
                        idxs,
                        idx_bin,
                        images_map,
                        distances_map,
                        max_bin_size,
                        max_bin_size_gb,
                        distance_vector,
                    )
                    for idx_bin in range(hist_bins.size - 1)
                )

            # calculate attenuation parameters per channel
            self.image_attenuation_parameters = corrections.calculate_attenuation_parameters(
                images_map,
                distances_map,
                self.image_height,
                self.image_width,
                self.image_channels,
            )

            # delete memmap handles
            del images_map
            os.remove(images_fn)
            del distances_map
            os.remove(distances_fn)

            # Save attenuation parameter results.
            np.save(
                self.attenuation_params_filepath, self.image_attenuation_parameters,
            )

            corrections.save_attenuation_plots(
                self.attenuation_parameters_folder,
                attn=self.image_attenuation_parameters,
            )

            # compute correction gains per channel
            target_altitude = distance_vector.mean()
            Console.info(
                "Computing correction gains for target altitude", target_altitude,
            )
            self.correction_gains = corrections.calculate_correction_gains(
                target_altitude,
                self.image_attenuation_parameters,
                self.image_height,
                self.image_width,
                self.image_channels,
            )
            Console.warn("Saving correction gains")
            # Save correction gains
            np.save(self.correction_gains_filepath, self.correction_gains)

            corrections.save_attenuation_plots(
                self.attenuation_parameters_folder, gains=self.correction_gains,
            )

            # Useful if fails, to reload precomputed numpyfiles.
            # TODO: offer as a new step.
            # self.image_attenuation_parameters = np.load(
            #   self.attenuation_params_filepath)
            # self.correction_gains = np.load(self.correction_gains_filepath)

            # apply gains to images
            Console.info("Applying attenuation corrections to images...")
            image_properties = [
                self.image_height,
                self.image_width,
                self.image_channels,
            ]
            runner = RunningMeanStd(image_properties)

            memmap_filename, memmap_handle = open_memmap(
                shape=(
                    len(self.camera_image_list),
                    self.image_height,
                    self.image_width,
                    self.image_channels,
                ),
                dtype=np.float32,
            )

            # DEBUG: can be removed
            # Console.error("depth_map_list size", len(self.depth_map_list))
            # Console.error("camera_image_list size", len(self.camera_image_list))
            ###################################################################################################
            for i in trange(len(self.camera_image_list)):
                # Load the image
                img = self.loader(self.camera_image_list[i])

                # Load the distance matrix
                if not self.depth_map_list:
                    # TODO: Show the depth_map creation
                    # if self.depth_map_list is None:
                    # Generate matrices on the fly
                    distance = distance_vector[i]
                    distance_mtx = np.empty((self.image_height, self.image_width))
                    distance_mtx.fill(distance)
                else:
                    distance_mtx = depth_map.loader(
                        self.depth_map_list[i], self.image_width, self.image_height,
                    )
                # TODO: Show the size of the produced distance_mtx
                # Correct the image
                corrected_img = corrections.attenuation_correct(
                    img,
                    distance_mtx,
                    self.image_attenuation_parameters,
                    self.correction_gains,
                )
                # TODO: Inspect the corrected image after attenuation correction
                # Before calling compute, let's show the corrected_img dimensions
                # Console.error("corrected_img.shape", corrected_img.shape)
                runner.compute(corrected_img)
                memmap_handle[i] = corrected_img.reshape(
                    self.image_height, self.image_width, self.image_channels
                )

            image_corrected_mean = runner.mean.reshape(
                self.image_height, self.image_width, self.image_channels
            )
            image_corrected_std = runner.std.reshape(
                self.image_height, self.image_width, self.image_channels
            )

            # save parameters for process
            np.save(
                self.corrected_mean_filepath, image_corrected_mean
            )  # TODO: make member
            np.save(
                self.corrected_std_filepath, image_corrected_std
            )  # TODO: make member

            corrections.save_attenuation_plots(
                self.attenuation_parameters_folder,
                img_mean=image_corrected_mean,
                img_std=image_corrected_std,
            )

        else:
            Console.info(
                "No altitude or depth maps available.", "Computing raw mean and std.",
            )

            image_raw_mean, image_raw_std = running_mean_std(
                self.camera_image_list, self.loader
            )
            np.save(self.raw_mean_filepath, image_raw_mean)
            np.save(self.raw_std_filepath, image_raw_std)

            # image_raw_mean = np.load(self.raw_mean_filepath)
            # image_raw_std = np.load(self.raw_std_filepath)

            ch = image_raw_mean.shape[0]
            if ch == 3:
                image_raw_mean = image_raw_mean.transpose((1, 2, 0))
                image_raw_std = image_raw_std.transpose((1, 2, 0))

            imageio.imwrite(
                Path(self.attenuation_parameters_folder) / "image_raw_mean.png",
                image_raw_mean,
            )
            imageio.imwrite(
                Path(self.attenuation_parameters_folder) / "image_raw_std.png",
                image_raw_std,
            )

            corrections.save_attenuation_plots(
                self.attenuation_parameters_folder,
                img_mean=image_raw_mean,
                img_std=image_raw_std,
            )

        Console.info("Correction parameters saved...")
Example #26
0
    def __new__(
        self,
        usbl_data,
        dvl_imu_data,
        N,
        sensors_std,
        dvl_noise_sigma_factor,
        imu_noise_sigma_factor,
        usbl_noise_sigma_factor,
        measurement_update_flag=True,
    ):
        # self.dvl_noise_sigma_factor = dvl_noise_sigma_factor
        # self.imu_noise_sigma_factor = imu_noise_sigma_factor
        # self.usbl_noise_sigma_factor = usbl_noise_sigma_factor
        """
        def eval(r, p):
            sum = 0.0
            for i in range(len(p)):  # calculate mean error
                dx = (
                    p[i].eastings[-1] - r.eastings[-1] + (world_size / 2.0)
                ) % world_size - (world_size / 2.0)
                dy = (
                    p[i].northings[-1] - r.northings[-1] + (world_size / 2.0)
                ) % world_size - (world_size / 2.0)
                err = math.sqrt(dx * dx + dy * dy)
                sum += err
            return sum / float(len(p))
        """

        # ========== Start Noise models ========== #
        def usbl_noise(usbl_datapoint):  # measurement noise
            # distance = usbl_datapoint.distance_to_ship # lateral_distance,bearing = latlon_to_metres(usbl_datapoint.latitude, usbl_datapoint.longitude, usbl_datapoint.latitude_ship, usbl_datapoint.longitude_ship)  # noqa
            # distance = math.sqrt(lateral_distance**2 + usbl_datapoint.depth**2) # noqa
            # error = usbl_noise_sigma_factor*(usbl_noise_std_offset + usbl_noise_std_factor*distance) # 5 is for the differential GPS, and the distance std factor 0.01 is used as 0.006 is too sall and unrealistic # This is moved to parse_gaps and parse_usbl_dump # noqa
            if usbl_datapoint.northings_std != 0:
                error = usbl_datapoint.northings_std * usbl_noise_sigma_factor
            else:
                usbl_noise_std_offset = 5
                usbl_noise_std_factor = 0.01
                distance = math.sqrt(usbl_datapoint.northings**2 +
                                     usbl_datapoint.eastings**2 +
                                     usbl_datapoint.depth**2)
                error = usbl_noise_sigma_factor * (
                    usbl_noise_std_offset + usbl_noise_std_factor * distance)
            return error

        def dvl_noise(dvl_imu_datapoint, mode="estimate"):  # sensor1 noise
            # Vinnay's dvl_noise model: velocity_std = (-0.0125*((velocity)**2)+0.2*(velocity)+0.2125)/100) assuming noise of x_velocity = y_velocity = z_velocity # noqa
            velocity_std_factor = 0.001  # from catalogue rdi whn1200/600. # should read this in from somewhere else, e.g. json # noqa
            velocity_std_offset = 0.002  # 0.02 # 0.2 #from catalogue rdi whn1200/600. # should read this in from somewhere else # noqa
            x_velocity_std = (
                abs(dvl_imu_datapoint.x_velocity) * velocity_std_factor +
                velocity_std_offset
            )  # (-0.0125*((dvl_imu_datapoint.x_velocity)**2)+0.2*(dvl_imu_datapoint.x_velocity)+0.2125)/100 # noqa
            y_velocity_std = (
                abs(dvl_imu_datapoint.y_velocity) * velocity_std_factor +
                velocity_std_offset
            )  # (-0.0125*((dvl_imu_datapoint.y_velocity)**2)+0.2*(dvl_imu_datapoint.y_velocity)+0.2125)/100 # noqa
            z_velocity_std = (
                abs(dvl_imu_datapoint.z_velocity) * velocity_std_factor +
                velocity_std_offset
            )  # (-0.0125*((dvl_imu_datapoint.z_velocity)**2)+0.2*(dvl_imu_datapoint.z_velocity)+0.2125)/100 # noqa
            if mode == "estimate":
                x_velocity_estimate = random.gauss(
                    dvl_imu_datapoint.x_velocity,
                    dvl_noise_sigma_factor * x_velocity_std,
                )
                y_velocity_estimate = random.gauss(
                    dvl_imu_datapoint.y_velocity,
                    dvl_noise_sigma_factor * y_velocity_std,
                )
                z_velocity_estimate = random.gauss(
                    dvl_imu_datapoint.z_velocity,
                    dvl_noise_sigma_factor * z_velocity_std,
                )
                return (
                    x_velocity_estimate,
                    y_velocity_estimate,
                    z_velocity_estimate,
                )
            elif mode == "std":
                return max([x_velocity_std, y_velocity_std, z_velocity_std])

        def imu_noise(
            previous_dvlimu_data_point,
            current_dvlimu_data_point,
            particle_list_data,
        ):  # sensor2 noise
            imu_noise = (
                0.003 * imu_noise_sigma_factor
            )  # each time_step + 0.003. assuming noise of roll = pitch = yaw
            if particle_list_data == 0:  # for initiation
                roll_estimate = random.gauss(current_dvlimu_data_point.roll,
                                             imu_noise)
                pitch_estimate = random.gauss(current_dvlimu_data_point.pitch,
                                              imu_noise)
                yaw_estimate = random.gauss(current_dvlimu_data_point.yaw,
                                            imu_noise)
            else:  # for propagation
                roll_estimate = particle_list_data.roll[-1] + random.gauss(
                    current_dvlimu_data_point.roll -
                    previous_dvlimu_data_point.roll,
                    imu_noise,
                )
                pitch_estimate = particle_list_data.pitch[-1] + random.gauss(
                    current_dvlimu_data_point.pitch -
                    previous_dvlimu_data_point.pitch,
                    imu_noise,
                )
                yaw_estimate = particle_list_data.yaw[-1] + random.gauss(
                    current_dvlimu_data_point.yaw -
                    previous_dvlimu_data_point.yaw,
                    imu_noise,
                )
            if yaw_estimate < 0:
                yaw_estimate += 360
            elif yaw_estimate > 360:
                yaw_estimate -= 360
            return roll_estimate, pitch_estimate, yaw_estimate

        # ========== End Noise models ========== #

        def initialize_particles(N, usbl_datapoint, dvl_imu_datapoint,
                                 init_dvl_imu_datapoint):
            particles = []
            northings_estimate = (usbl_datapoint.northings -
                                  dvl_imu_datapoint.northings +
                                  init_dvl_imu_datapoint.northings)
            eastings_estimate = (usbl_datapoint.eastings -
                                 dvl_imu_datapoint.eastings +
                                 init_dvl_imu_datapoint.eastings)
            for i in range(N):
                temp_particle = Particle()

                roll_estimate, pitch_estimate, yaw_estimate = imu_noise(
                    0, init_dvl_imu_datapoint, 0)
                (
                    x_velocity_estimate,
                    y_velocity_estimate,
                    z_velocity_estimate,
                ) = dvl_noise(init_dvl_imu_datapoint)

                usbl_uncertainty = usbl_noise(usbl_datapoint)
                # usbl_uncertainty = 0

                temp_particle.set(
                    random.gauss(eastings_estimate, usbl_uncertainty),
                    random.gauss(northings_estimate, usbl_uncertainty),
                    init_dvl_imu_datapoint.epoch_timestamp,
                    x_velocity_estimate,
                    y_velocity_estimate,
                    z_velocity_estimate,
                    roll_estimate,
                    pitch_estimate,
                    yaw_estimate,
                    init_dvl_imu_datapoint.altitude,
                    init_dvl_imu_datapoint.depth,
                )
                temp_particle.set_weight(1)
                particles.append(temp_particle)

            # Normalize weights
            weights_list = []
            for i in particles:
                weights_list.append(i.weight)
            normalized_weights = normalize_weights(weights_list)
            for index, particle_ in enumerate(particles):
                particle_.weight = normalized_weights[index]

            return particles

        def normalize_weights(weights_list):
            normalized_weights = []
            for i in weights_list:
                normalized_weights.append(i / sum(weights_list))
            return normalized_weights

        def propagate_particles(particles, previous_data_point,
                                current_data_point):
            for i in particles:
                # Propagation error model
                time_difference = (current_data_point.epoch_timestamp -
                                   previous_data_point.epoch_timestamp)

                roll_estimate, pitch_estimate, yaw_estimate = imu_noise(
                    previous_data_point, current_data_point, i)
                (
                    x_velocity_estimate,
                    y_velocity_estimate,
                    z_velocity_estimate,
                ) = dvl_noise(current_data_point)

                [
                    north_velocity_estimate,
                    east_velocity_estimate,
                    down_velocity_estimate,
                ] = body_to_inertial(
                    roll_estimate,
                    pitch_estimate,
                    yaw_estimate,
                    x_velocity_estimate,
                    y_velocity_estimate,
                    z_velocity_estimate,
                )
                [
                    previous_north_velocity_estimate,
                    previous_east_velocity_estimate,
                    previous_down_velocity_estimate,
                ] = body_to_inertial(
                    i.roll[-1],
                    i.pitch[-1],
                    i.yaw[-1],
                    i.x_velocity[-1],
                    i.y_velocity[-1],
                    i.z_velocity[-1],
                )
                # DR motion model
                northing_estimate = (0.5 * time_difference *
                                     (north_velocity_estimate +
                                      previous_north_velocity_estimate) +
                                     i.northings[-1])
                easting_estimate = (
                    0.5 * time_difference *
                    (east_velocity_estimate + previous_east_velocity_estimate)
                    + i.eastings[-1])
                i.set(
                    easting_estimate,
                    northing_estimate,
                    current_data_point.epoch_timestamp,
                    x_velocity_estimate,
                    y_velocity_estimate,
                    z_velocity_estimate,
                    roll_estimate,
                    pitch_estimate,
                    yaw_estimate,
                    current_data_point.altitude,
                    current_data_point.depth,
                )

        def measurement_update(
            N,
            usbl_measurement,
            particles_list,
            resample_flag=True
        ):  # updates weights of particles and resamples them # USBL uncertainty follow the readings (0.06/100* depth)! assuming noise of northing = easting # noqa

            # Update weights (particle weighting)
            for i in particles_list[-1]:
                weight = i.measurement_prob(usbl_measurement,
                                            usbl_noise(usbl_measurement))
                # weights.append(weight)
                # i.weight.append(weight)
                i.weight = i.weight * weight
            # Normalize weights # this should be in particles...
            weights_list = []
            for i in particles_list[-1]:
                weights_list.append(i.weight)
            normalized_weights = normalize_weights(weights_list)
            for index, particle_ in enumerate(particles_list[-1]):
                particle_.weight = normalized_weights[index]

            # calculate Neff
            weights_list = []
            for i in particles_list[-1]:
                weights_list.append(i.weight)
            effectiveParticleSize = 1 / sum([i**2 for i in weights_list])

            if effectiveParticleSize < len(particles_list[-1]) / 2:
                resample_flag = True
            else:
                resample_flag = False

            if resample_flag:
                # resampling wheel
                temp_particles = []
                index = int(random.random() * N)
                beta = 0.0
                mw = max(weights_list)
                for i in range(N):
                    beta += random.random() * 2.0 * mw
                    while beta > weights_list[index]:
                        beta -= weights_list[index]
                        index = (index + 1) % N
                    temp_particle = Particle()
                    temp_particle.parentID = "{}-{}".format(
                        len(particles_list) - 1, index)
                    particles_list[-1][index].childIDList.append(
                        "{}-{}".format(len(particles_list),
                                       len(temp_particles)))
                    temp_particle.set(
                        particles_list[-1][index].eastings[-1],
                        particles_list[-1][index].northings[-1],
                        particles_list[-1][index].timestamps[-1],
                        particles_list[-1][index].x_velocity[-1],
                        particles_list[-1][index].y_velocity[-1],
                        particles_list[-1][index].z_velocity[-1],
                        particles_list[-1][index].roll[-1],
                        particles_list[-1][index].pitch[-1],
                        particles_list[-1][index].yaw[-1],
                        particles_list[-1][index].altitude[-1],
                        particles_list[-1][index].depth[-1],
                    )
                    temp_particle.set_weight(
                        1 / N)  # particles_list[-1][index].weight)
                    # temp_particle.set_error(usbl_measurement) # maybe can remove this? # noqa
                    temp_particles.append(temp_particle)
                return (True, temp_particles)
            else:
                return (False, particles_list)

        def extract_trajectory(final_particle):
            northings_trajectory = final_particle.northings
            eastings_trajectory = final_particle.eastings
            timestamp_list = final_particle.timestamps
            roll_list = final_particle.roll
            pitch_list = final_particle.pitch
            yaw_list = final_particle.yaw
            altitude_list = final_particle.altitude
            depth_list = final_particle.depth

            parentID = final_particle.parentID
            while parentID != "":
                particle_list = int(parentID.split("-")[0])
                element_list = int(parentID.split("-")[1])

                northings_trajectory = (
                    particles_list[particle_list][element_list].northings[:-1]
                    + northings_trajectory)
                eastings_trajectory = (
                    particles_list[particle_list][element_list].eastings[:-1] +
                    eastings_trajectory)
                timestamp_list = (
                    particles_list[particle_list][element_list].timestamps[:-1]
                    + timestamp_list)
                roll_list = (
                    particles_list[particle_list][element_list].roll[:-1] +
                    roll_list)
                pitch_list = (
                    particles_list[particle_list][element_list].pitch[:-1] +
                    pitch_list)
                yaw_list = (
                    particles_list[particle_list][element_list].yaw[:-1] +
                    yaw_list)
                altitude_list = (
                    particles_list[particle_list][element_list].altitude[:-1] +
                    altitude_list)
                depth_list = (
                    particles_list[particle_list][element_list].depth[:-1] +
                    depth_list)

                parentID = particles_list[particle_list][element_list].parentID
            return (
                northings_trajectory,
                eastings_trajectory,
                timestamp_list,
                roll_list,
                pitch_list,
                yaw_list,
                altitude_list,
                depth_list,
            )

        def mean_trajectory(particles):
            x_list_ = []
            y_list_ = []
            for i in particles:
                x_list_.append(i.weight * i.eastings[-1])
                y_list_.append(i.weight * i.northings[-1])
            x = sum(x_list_)
            y = sum(y_list_)
            return x, y

        x_list = []
        y_list = []

        particles_list = []
        usbl_datapoints = []

        # print('Initializing particles around first point of dead reckoning solution offset by averaged usbl readings') # noqa
        # Interpolate dvl_imu_data to usbl_data to initializing particles at first appropriate usbl timestamp. # noqa
        # if dvl_imu_data[dvl_imu_data_index].epoch_timestamp > usbl_data[usbl_data_index].epoch_timestamp: # noqa
        #     while dvl_imu_data[dvl_imu_data_index].epoch_timestamp > usbl_data[usbl_data_index].epoch_timestamp: # noqa
        #         usbl_data_index += 1
        # interpolate usbl_data to dvl_imu_data to initialize particles
        usbl_data_index = 0
        dvl_imu_data_index = 0
        if (usbl_data[usbl_data_index].epoch_timestamp >
                dvl_imu_data[dvl_imu_data_index].epoch_timestamp):
            while (usbl_data[usbl_data_index].epoch_timestamp >
                   dvl_imu_data[dvl_imu_data_index].epoch_timestamp):
                dvl_imu_data_index += 1
        if (dvl_imu_data[dvl_imu_data_index].epoch_timestamp ==
                usbl_data[usbl_data_index].epoch_timestamp):
            particles = initialize_particles(
                N,
                usbl_data[usbl_data_index],
                dvl_imu_data[dvl_imu_data_index],
                dvl_imu_data[0],
            )  # *For now assume eastings_std = northings_std, usbl_data[usbl_data_index].eastings_std) # noqa
            usbl_data_index += 1
            dvl_imu_data_index += 1
        elif (usbl_data[usbl_data_index].epoch_timestamp <
              dvl_imu_data[dvl_imu_data_index].epoch_timestamp):
            while (usbl_data[usbl_data_index + 1].epoch_timestamp <
                   dvl_imu_data[dvl_imu_data_index].epoch_timestamp):
                if len(usbl_data) - 2 == usbl_data_index:
                    Console.warn(
                        "USBL data does not span to DVL data. Is your data right?"  # noqa
                    )
                    break
                usbl_data_index += 1
            # interpolated_data = interpolate_data(usbl_data[usbl_data_index].epoch_timestamp, dvl_imu_data[dvl_imu_data_index], dvl_imu_data[dvl_imu_data_index+1]) # noqa
            interpolated_data = interpolate_usbl(
                dvl_imu_data[dvl_imu_data_index].epoch_timestamp,
                usbl_data[usbl_data_index],
                usbl_data[usbl_data_index + 1],
            )
            # dvl_imu_data.insert(dvl_imu_data_index+1, interpolated_data)
            usbl_data.insert(usbl_data_index + 1, interpolated_data)
            particles = initialize_particles(
                N,
                usbl_data[usbl_data_index],
                dvl_imu_data[dvl_imu_data_index + 1],
                dvl_imu_data[0],
            )  # *For now assume eastings_std = northings_std, usbl_data[usbl_data_index].eastings_std) # noqa
            usbl_data_index += 1
            dvl_imu_data_index += 1
        else:
            Console.quit(
                "Check dvl_imu_data and usbl_data in particle_filter.py.")
        usbl_datapoints.append(usbl_data[usbl_data_index - 1])
        particles_list.append(particles)
        # Force to start at DR init
        dvl_imu_data_index = 0

        x_, y_ = mean_trajectory(particles)
        x_list.append(x_)
        y_list.append(y_)

        max_uncertainty = 0
        usbl_uncertainty_list = []
        n = 0
        if measurement_update_flag is True:  # perform resampling
            last_usbl_flag = False
            while dvl_imu_data[dvl_imu_data_index] != dvl_imu_data[-1]:
                Console.progress(dvl_imu_data_index, len(dvl_imu_data))
                time_difference = (
                    dvl_imu_data[dvl_imu_data_index + 1].epoch_timestamp -
                    dvl_imu_data[dvl_imu_data_index].epoch_timestamp)
                max_uncertainty += (
                    dvl_noise(dvl_imu_data[dvl_imu_data_index], mode="std") *
                    time_difference)  # * 2

                if (dvl_imu_data[dvl_imu_data_index + 1].epoch_timestamp <
                        usbl_data[usbl_data_index].epoch_timestamp):
                    propagate_particles(
                        particles_list[-1],
                        dvl_imu_data[dvl_imu_data_index],
                        dvl_imu_data[dvl_imu_data_index + 1],
                    )
                    dvl_imu_data_index += 1
                else:
                    if not last_usbl_flag:
                        # interpolate, insert, propagate, resample measurement_update, add new particles to list, check and assign parent id, check parents that have no children and delete it (skip this step for now) ### # noqa
                        interpolated_data = interpolate_dvl(
                            usbl_data[usbl_data_index].epoch_timestamp,
                            dvl_imu_data[dvl_imu_data_index],
                            dvl_imu_data[dvl_imu_data_index + 1],
                        )
                        dvl_imu_data.insert(dvl_imu_data_index + 1,
                                            interpolated_data)
                        propagate_particles(
                            particles_list[-1],
                            dvl_imu_data[dvl_imu_data_index],
                            dvl_imu_data[dvl_imu_data_index + 1],
                        )

                        usbl_uncertainty_list.append(
                            usbl_noise(usbl_data[usbl_data_index]))

                        n += 1
                        resampled, new_particles = measurement_update(
                            N, usbl_data[usbl_data_index], particles_list)

                        if resampled:
                            particles_list.append(new_particles)
                            usbl_datapoints.append(usbl_data[usbl_data_index])
                            # reset usbl_uncertainty_list
                            usbl_uncertainty_list = []
                        else:
                            particles_list = new_particles

                        if usbl_data[usbl_data_index] == usbl_data[-1]:
                            last_usbl_flag = True
                            dvl_imu_data_index += 1
                        else:
                            usbl_data_index += 1
                            dvl_imu_data_index += 1
                    else:
                        propagate_particles(
                            particles_list[-1],
                            dvl_imu_data[dvl_imu_data_index],
                            dvl_imu_data[dvl_imu_data_index + 1],
                        )
                        dvl_imu_data_index += 1

                x_, y_ = mean_trajectory(particles_list[-1])
                x_list.append(x_)
                y_list.append(y_)

            # print (max_uncertainty)

            # select particle trajectory with largest overall weight
            # particles_weight_list = []
            particles_error_list = []
            for i in range(len(particles_list[-1])):
                parentID = particles_list[-1][i].parentID
                particles_error_list.append([])
                if len(particles_list[-1][i].error) != 0:
                    particles_error_list[-1] += particles_list[-1][i].error
                while parentID != "":
                    particle_list = int(parentID.split("-")[0])
                    element_list = int(parentID.split("-")[1])
                    parentID = particles_list[particle_list][
                        element_list].parentID
                    particles_error_list[-1] += particles_list[particle_list][
                        element_list].error
            for i in range(len(particles_error_list)):
                particles_error_list[i] = sum(particles_error_list[i])
            selected_particle = particles_list[-1][particles_error_list.index(
                min(particles_error_list))]
            (
                northings_trajectory,
                eastings_trajectory,
                timestamp_list,
                roll_list,
                pitch_list,
                yaw_list,
                altitude_list,
                depth_list,
            ) = extract_trajectory(selected_particle)
        else:  # do not perform resampling, only propagate
            while dvl_imu_data[dvl_imu_data_index] != dvl_imu_data[-1]:
                propagate_particles(
                    particles_list[-1],
                    dvl_imu_data[dvl_imu_data_index],
                    dvl_imu_data[dvl_imu_data_index + 1],
                )
                dvl_imu_data_index += 1

            ## select particle trajectory with least average error (maybe assign weights without resampling and compare total or average weight? actually doesn't really matter because path won't be used anyway, main purpose of this is to see the std plot) # noqa
            particles_error_list = []
            for i in range(len(particles_list[-1])):
                parentID = particles_list[-1][i].parentID
                particles_error_list.append([])
                particles_error_list[-1].append(particles_list[-1][i].error)
                while parentID != "":
                    particle_list = int(parentID.split("-")[0])
                    element_list = int(parentID.split("-")[1])
                    parentID = particles_list[particle_list][
                        element_list].parentID
                    particles_error_list[-1].append(
                        particles_list[particle_list][element_list].error)
            for i in range(len(particles_error_list)):
                particles_error_list[i] = sum(particles_error_list[i]) / len(
                    particles_error_list[i])
            selected_particle = particles_list[-1][particles_error_list.index(
                min(particles_error_list))]
            (
                northings_trajectory,
                eastings_trajectory,
                timestamp_list,
                roll_list,
                pitch_list,
                yaw_list,
                altitude_list,
                depth_list,
            ) = extract_trajectory(selected_particle)

        # calculate northings std, eastings std, yaw std of particles
        northings_std = []
        eastings_std = []
        yaw_std = []

        arr_northings = []
        arr_eastings = []
        arr_yaw = []
        for i in particles_list[0]:
            arr_northings.append([])
            arr_eastings.append([])
            arr_yaw.append([])
        for i in range(len(particles_list)):
            for j in range(len(particles_list[i])):
                if i != len(particles_list) - 1:
                    arr_northings[j] += particles_list[i][j].northings[:-1]
                    arr_eastings[j] += particles_list[i][j].eastings[:-1]
                    arr_yaw[j] += particles_list[i][j].yaw[:-1]
                else:
                    arr_northings[j] += particles_list[i][j].northings
                    arr_eastings[j] += particles_list[i][j].eastings
                    arr_yaw[j] += particles_list[i][j].yaw
        arr_northings = numpy.array(arr_northings)
        arr_eastings = numpy.array(arr_eastings)
        arr_yaw = numpy.array(arr_yaw)

        for i in numpy.std(arr_northings, axis=0):
            northings_std.append(i)
        for i in numpy.std(arr_eastings, axis=0):
            eastings_std.append(i)
        # yaw_std step check for different extreme values around 0 and 360. not sure if this method below is robust. # noqa
        arr_std_yaw = numpy.std(arr_yaw, axis=0)
        arr_yaw_change = []
        for i in range(len(arr_std_yaw)):
            if (
                    arr_std_yaw[i] > 30
            ):  # if std is more than 30 deg, means there's two extreme values, so minus 360 for anything above 180 deg. # noqa
                arr_yaw_change.append(i)
            # yaw_std.append(i)
        for i in arr_yaw:
            for j in arr_yaw_change:
                if i[j] > 180:
                    i[j] -= 360
        arr_std_yaw = numpy.std(arr_yaw, axis=0)
        for i in arr_std_yaw:
            yaw_std.append(i)
        # numpy.mean(arr, axis=0)

        pf_fusion_dvl_list = []
        for i in range(len(timestamp_list)):
            pf_fusion_dvl = SyncedOrientationBodyVelocity()
            pf_fusion_dvl.epoch_timestamp = timestamp_list[i]
            pf_fusion_dvl.northings = northings_trajectory[i]
            pf_fusion_dvl.eastings = eastings_trajectory[i]
            pf_fusion_dvl.depth = depth_list[i]
            pf_fusion_dvl.roll = roll_list[i]
            pf_fusion_dvl.pitch = pitch_list[i]
            pf_fusion_dvl.yaw = yaw_list[i]
            pf_fusion_dvl.altitude = altitude_list[i]
            pf_fusion_dvl_list.append(pf_fusion_dvl)

        # plt.scatter(x_list, y_list)

        return (
            pf_fusion_dvl_list,
            usbl_datapoints,
            particles_list,
            northings_std,
            eastings_std,
            yaw_std,
        )
def load_configuration_and_camera_system(path, suffix=None):
    """Generate correct_config and camera system objects from input config
    yaml files

    Parameters
    -----------
    path : Path
        User provided Path of source images
    """

    Console.warn("Parsing multipaths with suffix:", suffix)

    # resolve paths to raw, processed and config folders
    path_raw_folder = get_raw_folder(path)
    path_config_folder = get_config_folder(path)

    # resolve path to mission.yaml
    path_mission = path_raw_folder / "mission.yaml"

    # find mission and correct_images yaml files
    if path_mission.exists():
        Console.info("File mission.yaml found at", path_mission)
    else:
        Console.quit("File mission.yaml file not found at", path_raw_folder)

    # load mission parameters
    mission = Mission(path_mission)

    # resolve path to camera.yaml file
    camera_yaml_raw_path = path_raw_folder / "camera.yaml"
    camera_yaml_config_path = path_config_folder / "camera.yaml"

    camera_yaml_path = None
    default_file_path_correct_config = None

    if not camera_yaml_raw_path.exists(
    ) and not camera_yaml_config_path.exists():
        Console.info(
            "Not found camera.yaml file in /raw folder...Using default ",
            "camera.yaml file...",
        )
        # find out default yaml paths
        root = Path(__file__).resolve().parents[1]

        acfr_std_camera_file = "auv_nav/default_yaml/ts1/SSK17-01/camera.yaml"
        sx3_camera_file = "auv_nav/default_yaml/ae2000/YK17-23C/camera.yaml"
        biocam_camera_file = "auv_nav/default_yaml/as6/DY109/camera.yaml"
        biocam4000_15c_camera_file = "auv_nav/default_yaml/alr/jc220/camera.yaml"
        hybis_camera_file = "auv_nav/default_yaml/hybis/camera.yaml"
        ntnu_camera_file = "auv_nav/default_yaml/ntnu_stereo/tautra21/camera.yaml"
        rosbag_camera_file = "auv_nav/default_yaml/rosbag/grassmap/camera.yaml"

        acfr_std_correct_config_file = (
            "correct_images/default_yaml/acfr/correct_images.yaml")
        sx3_std_correct_config_file = (
            "correct_images/default_yaml/sx3/correct_images.yaml")
        biocam_std_correct_config_file = (
            "correct_images/default_yaml/biocam/correct_images.yaml")
        biocam4000_15c_std_correct_config_file = (
            "correct_images/default_yaml/biocam4000_15c/correct_images.yaml")
        hybis_std_correct_config_file = (
            "correct_images/default_yaml/hybis/correct_images.yaml")
        ntnu_std_correct_config_file = (
            "correct_images/default_yaml/ntnu_stereo/correct_images.yaml")
        rosbag_std_correct_config_file = (
            "correct_images/default_yaml/rosbag/correct_images.yaml")

        Console.info("Image format:", mission.image.format)

        if mission.image.format == "acfr_standard":
            camera_yaml_path = root / acfr_std_camera_file
            default_file_path_correct_config = root / acfr_std_correct_config_file
        elif mission.image.format == "seaxerocks_3":
            camera_yaml_path = root / sx3_camera_file
            default_file_path_correct_config = root / sx3_std_correct_config_file
        elif mission.image.format == "biocam":
            camera_yaml_path = root / biocam_camera_file
            default_file_path_correct_config = root / biocam_std_correct_config_file
        elif mission.image.format == "biocam4000_15c":
            camera_yaml_path = root / biocam4000_15c_camera_file
            default_file_path_correct_config = (
                root / biocam4000_15c_std_correct_config_file)
        elif mission.image.format == "hybis":
            camera_yaml_path = root / hybis_camera_file
            default_file_path_correct_config = root / hybis_std_correct_config_file
        elif mission.image.format == "ntnu_stereo":
            camera_yaml_path = root / ntnu_camera_file
            default_file_path_correct_config = root / ntnu_std_correct_config_file
        elif mission.image.format == "rosbag":
            camera_yaml_path = root / rosbag_camera_file
            default_file_path_correct_config = root / rosbag_std_correct_config_file
            camera_yaml_path.copy(camera_yaml_config_path)
            Console.info(
                "Copied camera.yaml file to config folder. Please edit it.")
            Console.info("The file is located at", camera_yaml_config_path)
        else:
            Console.quit(
                "Image system in camera.yaml does not match with mission.yaml",
                "Provide correct camera.yaml in /raw folder... ",
            )
    elif camera_yaml_raw_path.exists(
    ) and not camera_yaml_config_path.exists():
        Console.info("Found camera.yaml file in /raw folder")
        camera_yaml_path = camera_yaml_raw_path
    elif not camera_yaml_raw_path.exists() and camera_yaml_config_path.exists(
    ):
        Console.info("Found camera.yaml file in /config folder")
        camera_yaml_path = camera_yaml_config_path
    elif camera_yaml_raw_path.exists() and camera_yaml_config_path.exists():
        Console.info("Found camera.yaml both in /raw and in /config folder")
        Console.info("Using camera.yaml from /config folder")
        camera_yaml_path = camera_yaml_config_path
    else:
        Console.quit(
            "rosbag image type requires a camera.yaml file in /config folder",
            "Please provide camera.yaml file in /config folder",
        )

    # instantiate the camera system and setup cameras from mission and
    # config files / auv_nav
    camera_system = CameraSystem(camera_yaml_path, path_raw_folder)
    if camera_system.camera_system != mission.image.format:
        Console.quit(
            "Image system in camera.yaml does not match with mission.yaml",
            "Provide correct camera.yaml in /raw folder",
        )
    # check for correct_config yaml path
    path_correct_images = None
    if suffix == "" or suffix is None:
        path_correct_images = path_config_folder / "correct_images.yaml"
    else:
        path_correct_images = path_config_folder / ("correct_images_" +
                                                    suffix + ".yaml")
    if path_correct_images.exists():
        Console.info(
            "Configuration file correct_images.yaml file found at",
            path_correct_images,
        )
    else:
        default_file_path_correct_config.copy(path_correct_images)
        Console.warn(
            "Configuration file not found, copying a default one at",
            path_correct_images,
        )

    # load parameters from correct_config.yaml
    correct_config = CorrectConfig(path_correct_images)
    return correct_config, camera_system
def rescale(
    image_array: np.ndarray,
    interpolate_method: str,
    target_pixel_size_m: float,
    altitude: float,
    f_x: float,
    f_y: float,
    maintain_pixels: bool,
) -> np.ndarray:
    image_shape = image_array.shape
    image_height = image_shape[0]
    image_width = image_shape[1]
    pixel_height = altitude / f_y
    pixel_width = altitude / f_x
    vertical_rescale = pixel_height / target_pixel_size_m
    horizontal_rescale = pixel_width / target_pixel_size_m

    method = None
    if interpolate_method == "bicubic":
        method = Image.BICUBIC
    elif interpolate_method == "bilinear":
        method = Image.BILINEAR
    elif interpolate_method == "nearest_neighbour":
        method = Image.NEAREST
    elif interpolate_method == "lanczos":
        method = Image.LANCZOS
    else:
        Console.error("The requested rescaling method is not implemented.")
        Console.error("Valid methods are: ")
        Console.error("  * bicubic")
        Console.error("  * bilinear")
        Console.error("  * nearest_neighbour")
        Console.error("  * lanczos")
        Console.quit("Rescaling method not implemented.")

    image_rgb = Image.fromarray(image_array, "RGB")

    if maintain_pixels:
        if vertical_rescale < 1 or horizontal_rescale < 1:
            size = (
                int(image_width * horizontal_rescale),
                int(image_height * vertical_rescale),
            )
            image_rgb = image_rgb.resize(size, resample=method)
            size = (image_width, image_height)
            image_rgb = image_rgb.resize(size, resample=method)
        else:
            crop_width = int((1 / horizontal_rescale) * image_width)
            crop_height = int((1 / vertical_rescale) * image_height)

            # find crop box dimensions
            box_left = int((image_width - crop_width) / 2)
            box_upper = int((image_height - crop_height) / 2)
            box_right = image_width - box_left
            box_lower = image_height - box_upper

            # crop the image to the center
            box = (box_left, box_upper, box_right, box_lower)
            cropped_image = image_rgb.crop(box)

            # resize the cropped image to the size of original image
            size = (image_width, image_height)
            image_rgb = cropped_image.resize(size, resample=method)
    else:
        size = (
            int(image_width * horizontal_rescale),
            int(image_height * vertical_rescale),
        )
        image_rgb = image_rgb.resize(size, resample=method)

    image = np.array(image_rgb, dtype=np.uint8)
    return image
def rescale_camera(path, camera_system, camera):
    name = camera.camera_name
    distance_path = camera.distance_path
    interpolate_method = camera.interpolate_method
    image_path = camera.path
    target_pixel_size = camera.target_pixel_size
    maintain_pixels = bool(camera.maintain_pixels)
    output_folder = camera.output_folder

    idx = [
        i for i, camera in enumerate(camera_system.cameras)
        if camera.name == name
    ]

    if len(idx) > 0:
        Console.info("Camera found in camera.yaml file...")
    else:
        Console.warn(
            "Camera not found in camera.yaml file. Please provide a relevant \
            camera.yaml file...")
        return False

    # obtain images to be rescaled
    path_processed = get_processed_folder(path)
    image_path = path_processed / image_path

    # obtain path to distance / altitude values
    full_distance_path = path_processed / distance_path
    full_distance_path = full_distance_path / "csv" / "ekf"
    distance_file = "auv_ekf_" + name + ".csv"
    distance_path = full_distance_path / distance_file

    # obtain focal lengths from calibration file
    camera_params_folder = path_processed / "calibration"
    camera_params_filename = "mono_" + name + ".yaml"
    camera_params_file_path = camera_params_folder / camera_params_filename

    if not camera_params_file_path.exists():
        Console.quit("Calibration file not found...")
    else:
        Console.info("Calibration file found...")

    monocam = MonoCamera(camera_params_file_path)
    focal_length_x = monocam.K[0, 0]
    focal_length_y = monocam.K[1, 1]

    # create output path
    output_directory = path_processed / output_folder
    if not output_directory.exists():
        output_directory.mkdir(parents=True)

    # call rescale function
    dataframe = pd.read_csv(Path(distance_path))
    imagenames_list = [
        filename for filename in os.listdir(image_path)
        if filename[-4:] == ".jpg" or filename[-4:] == ".png"
        or filename[-4:] == ".tif"
    ]
    Console.info("Distance values loaded...")
    rescale_images(
        imagenames_list,
        image_path,
        interpolate_method,
        target_pixel_size,
        dataframe,
        output_directory,
        focal_length_x,
        focal_length_y,
        maintain_pixels,
    )
    return True
Example #30
0
def parse_ntnu_dvl(mission, vehicle, category, output_format, outpath):
    # Get your data from a file using mission paths, for example
    filepath = mission.velocity.filepath
    log_file_path = get_raw_folder(outpath / ".." / filepath)

    category_str = None
    if category == Category.VELOCITY:
        category_str = "velocity"
    elif category == Category.ALTITUDE:
        category_str = "altitude"

    velocity_std_factor = mission.velocity.std_factor
    velocity_std_offset = mission.velocity.std_offset
    heading_offset = vehicle.dvl.yaw
    body_velocity = BodyVelocity(
        velocity_std_factor,
        velocity_std_offset,
        heading_offset,
    )
    altitude = Altitude(altitude_std_factor=mission.altitude.std_factor)

    data_list = []

    num_entries = 0
    num_valid_entries = 0

    # For each log file
    for log_file in log_file_path.glob("*.log"):
        # Open the log file
        df = pd.read_csv(log_file,
                         sep="\t",
                         skiprows=(0, 1),
                         names=header_list)

        if category == Category.VELOCITY:
            Console.info("... parsing velocity")
            # For each row in the file
            for index, row in df.iterrows():
                body_velocity.from_ntnu_dvl(str(log_file.name), row)
                num_entries += 1
                if body_velocity.valid():
                    # DVL provides -32 when no bottom lock
                    data = body_velocity.export(output_format)
                    num_valid_entries += 1
                    data_list.append(data)
        elif category == Category.ALTITUDE:
            Console.info("... parsing altitude")
            # For each row in the file
            for index, row in df.iterrows():
                num_entries += 1
                altitude.from_ntnu_dvl(str(log_file.name), row)
                if altitude.valid():
                    num_valid_entries += 1
                    data = altitude.export(output_format)
                    data_list.append(data)

        elif category == Category.ORIENTATION:
            Console.quit("NTNU DVL parser has no ORIENTATION available")
        elif category == Category.DEPTH:
            Console.quit("NTNU DVL parser has no DEPTH available")
        elif category == Category.USBL:
            Console.quit("NTNU DVL parser has no USBL available")
        else:
            Console.quit("NTNU DVL parser has no category", category,
                         "available")

    Console.info(
        "... parsed",
        num_entries,
        "entries, of which",
        num_valid_entries,
        "are valid for category",
        category_str,
    )

    return data_list